hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7696d0db1433c0a0db1777743290bc228d1566a | 20,228 | py | Python | examples/bs2019/analysis.py | TaoYang-CFEI/mshoot | 26680422a7101b386c703eee8e1979fc87de7da7 | [
"BSD-3-Clause"
] | 14 | 2019-01-15T14:30:43.000Z | 2022-02-06T08:36:36.000Z | examples/bs2019/analysis.py | TaoYang-CFEI/mshoot | 26680422a7101b386c703eee8e1979fc87de7da7 | [
"BSD-3-Clause"
] | 4 | 2019-02-01T10:32:48.000Z | 2021-02-21T08:53:53.000Z | examples/bs2019/analysis.py | TaoYang-CFEI/mshoot | 26680422a7101b386c703eee8e1979fc87de7da7 | [
"BSD-3-Clause"
] | 5 | 2019-02-08T09:20:52.000Z | 2021-04-25T02:17:54.000Z | #%%
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%% Measurementes
ms_path = 'examples/bs2019/measurements.csv'
ms = pd.read_csv(ms_path, index_col=0)
ms.index = pd.to_datetime(ms.index)
t0 = pd.to_datetime('2018-04-05 00:00:00')
t1 = pd.to_datetime('2018-04-08 00:00:00')
ms = ms.loc[t0:t1]
solrad = ms['solrad'].values
Tout = ms['Tout'].values
occ = ms['occ'].values
t = (ms.index - ms.index[0]).total_seconds() / 3600.
fig, ax = plt.subplots(3, 1, figsize=(5, 3), sharex=True)
fig.set_dpi(120)
ax[0].plot(t, Tout, 'b-')
ax[0].set_ylabel("$T_{out}$ [$^\circ$C]")
ax[1].plot(t, solrad, 'b-')
ax[1].set_ylabel("$q_{sol}$ [W/m$^2$]")
ax[2].plot(t, occ, 'b-')
ax[2].set_ylabel("$n_{occ}$ [-]")
ax[2].set_xticks(np.arange(0, 73, 24))
ax[2].set_xlabel("$t$ [h]")
plt.subplots_adjust(0.13, 0.15, 0.98, 0.98)
fig.savefig('examples/bs2019/figs/inputs_mpc.pdf')
### Case 1 ####################################################################
#%% Compare estimates between FMUs
est_dir = 'examples/bs2019/case1/results/est/'
tols = [
'1e-4',
'1e-6',
'1e-7',
'1e-9',
'1e-11'
]
cols = pd.read_csv(est_dir + 'r1c1_dymola_' + tols[0] +
'/parameters_rel.csv').columns
parameters = pd.DataFrame(index=pd.Index(tols, name='tol'),
columns=cols)
for t in tols:
for p in cols:
parameters.loc[t, p] = pd.read_csv(est_dir + 'r1c1_dymola_'
+ t + '/parameters_rel.csv')[p].iloc[0]
parameters.T.plot(kind='bar')
#%% Parameter estimation: validation
est_dir = 'examples/bs2019/case1/results/est/'
tols = [
'1e-4',
'1e-6',
'1e-7',
'1e-9',
'1e-11'
]
idl = pd.read_csv(est_dir + 'ideal.csv', index_col=0)
idl
vld = pd.DataFrame()
for t in tols:
res = pd.read_csv(est_dir + 'r1c1_dymola_' + t +
'/vld_res.csv', index_col=0)
vld[t] = res['T']
idl = idl.loc[:vld.index[-1]]
idl.index /= 3600.
vld.index /= 3600.
# Plot
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
fig.set_dpi(130)
ax.plot(idl['T'], ls='-', label='Measurement')
ax.plot(vld['1e-11'], ls='-.', label='R1C1')
ax.legend(loc='lower right')
ax.set_xticks(np.arange(0, vld.index[-1] + 1, 24))
ax.set_ylabel('$T$ [$^\circ$C]')
ax.set_xlabel('$t$ [h]')
ax.vlines(5*24., ymin=19.5, ymax=26.9, linestyles='--', lw=0.75, color='k')
ax.set_ylim(19.5, 26.9)
ax.text(80, 26.4, "Training")
ax.text(128, 26.4, "Validation")
plt.subplots_adjust(0.1, 0.18, 0.99, 0.99)
fig.savefig('examples/bs2019/figs/validation_T.pdf')
#%% Result overview
fmu = 'r1c1_dymola_1e-11'
hrz = 4
outdir = 'examples/bs2019/case1/results/mpc/{}/h{}/'.format(fmu, hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
# Optimized inputs
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(130)
# ax[0]
ax[0].plot(u['vpos'], 'k-', lw=2)
ax[0].set_ylim(-100, 100)
ax[0].set_ylabel('$q$ [%]')
# ax[1]
# ax[1].plot(xctr['x0'], label='Control')
ax[1].plot(xemu['cair.T'], 'r-', label=fmu)
ax[1].legend(loc='upper right')
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T_i$ [$^\circ$C]')
# ax[0] - subinterval solutions
files = os.listdir(outdir)
ufiles = list()
for f in files:
fname = f.split('.')[0]
if fname[0] == 'u' and len(fname) > 1:
ufiles.append(f)
udfs = list()
for i in range(len(ufiles)):
df = pd.read_csv(outdir + 'u{}.csv'.format(i), index_col=0)
df.index /= 3600.
ax[0].plot(df['vpos'], ls='--', lw=1.)
# plt.show()
#%% Compare horizons
fmu = 'r1c1_dymola_1e-11'
horizons = [2, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(120)
Qrc = dict()
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case1/results/mpc/{}/h{}/'.format(fmu, hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
u['vpos'] *= 20 # [%] -> [W]
Qrc[hrz] = u['vpos'].abs().sum() / 1000.
# Actual horizon string, e.g. "6h"
ahrz = "{}h".format(hrz)
# Color map
lspace = np.linspace(0, 1, len(horizons))
colors = [plt.cm.winter(x) for x in lspace]
# ax[0]
ax[0].plot(u['vpos'], c=colors[i], label=ahrz)
# ax[1]
ax[1].plot(xemu['cair.T'], c=colors[i], label=ahrz)
i += 1
ax[1].legend(loc='center', bbox_to_anchor=(0.5,-0.5), ncol=5)
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[0].set_ylim(-2200, 2200)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[0].set_ylabel('$q$ [W]')
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T$ [$^\circ$C]')
ax[0].set_title('(a)')
ax[1].set_title('(b)')
plt.subplots_adjust(left=0.16, right=0.99, top=0.93, bottom=0.24)
fig.tight_layout()
fig.savefig('examples/bs2019/figs/case1_horizon_tol_1e-11.pdf')
#%% Computational time
# FMU 1e-11
wd1 = 'examples/bs2019/case1/results/mpc/r1c1_dymola_1e-11/'
# FMU 1e-9
wd2 = 'examples/bs2019/case1/results/mpc/r1c1_dymola_1e-9/'
# SVM
wd3 = 'examples/bs2019/case2/results/mpc-lin/'
hdirs1 = [x[0].split('/')[-1] for x in os.walk(wd1)][1:]
hdirs2 = [x[0].split('/')[-1] for x in os.walk(wd2)][1:]
hdirs3 = [x[0].split('/')[-1] for x in os.walk(wd3)][1:]
hix = [int(x[1:]) for x in hdirs1]
hix = sorted(hix)
ct1 = list()
ct2 = list()
ct3 = list()
# Number of optimization variables
nv = [x * 2 for x in hix]
# Optimization horizon [h]
oh = [x for x in hix]
for h in hix:
with open(wd1 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct1.append(x / 60.)
with open(wd2 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct2.append(x / 60.)
with open(wd3 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct3.append(x / 60.)
fig, ax = plt.subplots(1, 1, figsize=(5,3))
fig.set_dpi(120)
plt.plot(oh, ct1, marker='s', c='k', ls=':', lw=1., label='R1C1 FMU (tol=1e-11)')
plt.plot(oh, ct2, marker='o', c='b', ls=':', lw=1., label='R1C1 FMU (tol=1e-9)')
plt.plot(oh, ct3, marker='v', c='r', ls=':', lw=1., label='SVR')
ax.set_xlabel('Optimization horizon [h]')
ax.set_ylabel('Total CPU time [min]')
ax2 = ax.twiny()
ax2.set_xticks(ax.get_xticks())
ax2.set_xlim(ax.get_xlim())
ax2.set_xticklabels([int(x * 2) for x in ax.get_xticks()])
ax2.set_xlabel('Number of optimization variables')
ax.legend()
ax.grid()
plt.subplots_adjust(0.1, 0.18, 0.99, 0.85)
fig.savefig('examples/bs2019/figs/cputime.pdf')
plt.show()
#%% Solution quality - omit CVode FMUs, they seem not working correctly
# Read all inputs and states
wd = 'examples/bs2019/case1/results/mpc/'
fmus = os.listdir(wd)
hz = '/h10/'
new_names = [y[5:].replace('_', ' ') for y in fmus]
for i in range(len(new_names)):
new_names[i] = new_names[i].replace('dymola ', 'tol=')
cdirs = [wd + x + hz for x in fmus]
cmap = {x:y for x, y in zip(cdirs, new_names)}
uall = pd.DataFrame()
xall = pd.DataFrame()
for c, f in zip(cdirs, fmus):
u = pd.read_csv(c + 'u.csv', index_col=0)
x = pd.read_csv(c + 'xemu.csv', index_col=0)
uall[c] = u['vpos']
xall[c] = x['cair.T']
uall = uall.rename(columns=cmap) # Inputs
xall = xall.rename(columns=cmap) # States
# Energy consumption
q = uall * 20.
Q = q.abs().sum() / 1000. # [kWh]
# Constraint violation
cstr = pd.read_csv(wd + 'r1c1_dymola_1e-9/h2/constr.csv')
cstr['time'] = cstr['time'].astype(int)
cstr = cstr.set_index('time')
vup = xall.copy()
vlo = xall.copy()
for c in xall:
vup[c] = xall[c] - cstr['Tmax']
vup[c].loc[vup[c] < 0] = 0
vlo[c] = cstr['Tmin'] - xall[c]
vlo[c].loc[vlo[c] < 0] = 0
vtot = vup + vlo
vtot = vtot.sum()
# Case order for plots
cord = ['tol=1e-4', 'tol=1e-6', 'tol=1e-7', 'tol=1e-9', 'tol=1e-11']
# Ordered results
Qord = [Q.loc[x] for x in cord]
vord = [vtot.loc[x] for x in cord]
# Show both on scatter plot
n_horizons = 5
lspace = np.linspace(0, 1, n_horizons)
colors = [plt.cm.jet(x) for x in lspace]
markers = ['o', 's', 'D', 'v', '^']
fig, ax = plt.subplots(figsize=(5, 3))
fig.set_dpi(120)
for q, v, l, c, m in zip(Qord, vord, cord, colors, markers):
plt.scatter(q, v, label=l, c=c, s=100, marker=m)
ax.set_xlabel('Total energy consumption $Q$ [kWh]')
ax.set_ylabel('Temperature violation $v_T$ [Kh]')
ax.legend(loc='center', ncol=3, bbox_to_anchor=(0.45,-0.4))
ax.grid()
plt.subplots_adjust(0.18, 0.35, 0.97, 0.95)
fig.savefig('examples/bs2019/figs/solution_quality.pdf')
# Case 2 ######################################################################
#%% Model validation
svr_x = pd.read_csv('examples/bs2019/case2/results/mpc-lin/vld_xctr.csv', index_col=0)
svr_x = svr_x.rename(columns={'cair.T':'T'})
svr_x.index /= 3600.
svr_x['T'] -= 273.15
rc_x = pd.read_csv('examples/bs2019/case2/results/mpc-lin/vld_xemu.csv', index_col=0)
rc_x = rc_x.rename(columns={'cair.T':'T'})
rc_x.index /= 3600.
rc_x['T'] -= 273.15
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
fig.set_dpi(130)
ax.plot(rc_x.index, rc_x['T'].values, label='R1C1')
ax.plot(svr_x.index, svr_x['T'].values, label='SVR', ls='--')
ax.legend()
ax.set_xlabel('$t$ [h]')
ax.set_ylabel('$T$ [$^\circ$C]')
ax.set_xticks(np.arange(0, 97, 24))
plt.subplots_adjust(0.13, 0.15, 0.98, 0.98)
fig.savefig('examples/bs2019/figs/svr_validation.pdf')
#%% Overview
hrz = 6
outdir = 'examples/bs2019/case2/results/mpc-lin/h{}/'.format(hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
# Optimized inputs
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(130)
# ax[0]
ax[0].plot(u['vpos'], 'k-', lw=2)
ax[0].set_ylim(-100, 100)
ax[0].set_ylabel('$q$ [%]')
# ax[1]
ax[1].plot(xctr['x0'], label='Control')
ax[1].plot(xemu['cair.T'], 'r--', label='Emulation')
ax[1].legend(loc='upper left')
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T_i$ [$^\circ$C]')
# ax[0] - subinterval solutions
files = os.listdir(outdir)
ufiles = list()
for f in files:
fname = f.split('.')[0]
if fname[0] == 'u' and len(fname) > 1:
ufiles.append(f)
udfs = list()
for i in range(len(ufiles)):
df = pd.read_csv(outdir + 'u{}.csv'.format(i), index_col=0)
df.index /= 3600.
ax[0].plot(df['vpos'], ls='--', lw=1.)
#%%
horizons = [2, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(120)
Qsvr = dict()
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case2/results/mpc-lin/h{}/'.format(hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
u['vpos'] *= 20. # [%] -> [W]
Qsvr[hrz] = u['vpos'].abs().sum() / 1000.
# Actual horizon string, e.g. "6h"
ahrz = "{}h".format(hrz)
# Color map
lspace = np.linspace(0, 1, len(horizons))
colors = [plt.cm.winter(x) for x in lspace]
# ax[0]
ax[0].plot(u['vpos'], c=colors[i], label=ahrz)
# ax[1]
ax[1].plot(xemu['cair.T'], c=colors[i], label=ahrz)
i += 1
ax[1].legend(loc='center', bbox_to_anchor=(0.5,-0.5), ncol=5)
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[0].set_ylim(-2200, 2200)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[0].set_ylabel('$q$ [W]')
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T$ [$^\circ$C]')
ax[0].set_title('(a)')
ax[1].set_title('(b)')
plt.subplots_adjust(left=0.16, right=0.99, top=0.93, bottom=0.24)
fig.tight_layout()
fig.savefig('examples/bs2019/figs/case2_horizon.pdf')
### Case 3 ####################################################################
#%% Result vs. horizon
horizons = [9]#, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 2, sharex=True, sharey=False,
figsize=(6, 4))
fig.set_dpi(120)
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case3/results/mpc/h{}/'.format(hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr['Tmin'] -= 273.15
constr['Tmax'] -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu['cair.T'] -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr['x0'] -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
# Actual horizon string, e.g. "6h"
ahrz = "{}h".format(hrz)
# Color map
lspace = np.linspace(0, 1, len(horizons))
colors = [plt.cm.winter(x) for x in lspace]
# ax[0]
ax[0][0].plot(u['vpos'], c=colors[i], label=ahrz)
ax[0][1].plot(u['dpos'], c=colors[i], label=ahrz)
# ax[1]
ax[1][0].plot(xemu['cair.T'], c=colors[i], label=ahrz)
ax[1][1].plot(xemu['co2.balance.CO2ppmv_i'], c=colors[i], label=ahrz)
i += 1
#ax[1][0].legend(loc='center', bbox_to_anchor=(0.5,-0.5), ncol=5)
ax[1][0].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1][0].plot(constr['Tmax'], 'k--', lw=0.5)
ax[1][1].plot(constr['CO2min'], 'k--', lw=0.5)
ax[1][1].plot(constr['CO2max'], 'k--', lw=0.5)
ax[0][0].set_ylim(0, 105)
ax[1][0].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1][0].set_yticks(np.arange(19, 25, 1))
ax[0][0].set_ylabel('$v_{p}$ [%]')
ax[1][0].set_xlabel('$t$ [h]')
ax[1][0].set_ylabel('$T_i$ [$^\circ$C]')
ax[0][1].set_ylabel('$d_{p}$ [%]')
ax[1][1].set_xlabel('$t$ [h]')
ax[1][1].set_ylabel('$C_i$ [ppm]')
fig.tight_layout()
#%% MPC vs PID - using emulation model results
hrz = 3
# Outputs
ydf_pid = pd.read_csv('examples/bs2019/case3/results/pid/ydf.csv',
index_col=0)
ydf_pid.index /= 3600.
y_pid = ydf_pid.drop(['vpos', 'dpos'], axis=1)
y_mpc = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/yemu.csv'\
.format(hrz), index_col=0)
y_mpc.index /= 3600.
# Control inputs
u_pid = ydf_pid[['vpos', 'dpos']]
y_pid.index /= 3600.
u_mpc = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/u.csv'.format(hrz),
index_col=0)
u_mpc.index /= 3600.
# States
x_pid = pd.read_csv('examples/bs2019/case3/results/pid/xdf.csv',
index_col=0)
x_pid.index /= 3600.
x_mpc = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/xemu.csv'\
.format(hrz), index_col=0)
x_mpc.index /= 3600.
# Constraints
constr = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/constr.csv'\
.format(hrz), index_col=0)
constr.index /= 3600.
# Plot
fig, axes = plt.subplots(2, 2, figsize=(5,3), sharex=True)
fig.set_dpi(120)
ax = axes[0][0]
ax.plot(x_pid['cair.T'] - 273.15, label='PID')
ax.plot(x_mpc['cair.T'] - 273.15, label='MPC')
ax.plot(constr['Tmin'] - 273.15, 'k--', lw=0.5)
ax.plot(constr['Tmax'] - 273.15, 'k--', lw=0.5)
ax.set_ylabel('$T_i$ [$^\circ$C]')
ax.set_yticks(np.arange(19, 26, 2))
ax = axes[0][1]
ax.plot(x_pid['co2.balance.CO2ppmv_i'], label='PID')
ax.plot(x_mpc['co2.balance.CO2ppmv_i'], label='MPC')
ax.plot(constr['CO2min'], 'k--', lw=0.5)
ax.plot(constr['CO2max'], 'k--', lw=0.5)
ax.set_ylabel('$C_i$ [ppm]')
ax.set_yticks(np.arange(400, 1001, 200))
ax = axes[1][0]
ax.plot(u_pid['vpos'], label='PID')
ax.plot(u_mpc['vpos'], label='MPC')
ax.set_ylim(0, 100)
ax.set_xlabel('$t$ [h]')
ax.set_ylabel('$v_{p}$ [%]')
ax = axes[1][1]
ax.plot(u_pid['dpos'], label='PID')
ax.plot(u_mpc['dpos'], label='MPC')
ax.set_ylim(0, 100)
ax.set_xlabel('$t$ [h]')
ax.set_ylabel('$d_{p}$ [%]')
ax.set_xticks(np.arange(0, u_pid.index.values[-1] + 1, 24))
fig.tight_layout()
axes[1][0].legend(loc='center', bbox_to_anchor=(1.15, -0.6), ncol=2)
plt.subplots_adjust(0.15, 0.25, 0.99, 0.98)
fig.savefig('examples/bs2019/figs/case3_mpc_pid.pdf')
#%% MPC vs PID - using control model results
hrz = 10
# Outputs
ydf_pid = pd.read_csv('examples/bs2019/case3/results/pid/ydf.csv',
index_col=0)
ydf_pid.index /= 3600.
y_pid = ydf_pid.drop(['vpos', 'dpos'], axis=1)
y_mpc = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/yemu.csv'\
.format(hrz), index_col=0)
y_mpc.index /= 3600.
# Control inputs
u_pid = ydf_pid[['vpos', 'dpos']]
y_pid.index /= 3600.
u_mpc = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/u.csv'.format(hrz),
index_col=0)
u_mpc.index /= 3600.
# States
x_pid = pd.read_csv('examples/bs2019/case3/results/pid/xdf.csv',
index_col=0)
x_pid.index /= 3600.
x_mpc = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/xctr.csv'\
.format(hrz), index_col=0)
x_mpc.index /= 3600.
# Constraints
constr = pd.read_csv('examples/bs2019/case3/results/mpc/h{}/constr.csv'\
.format(hrz), index_col=0)
constr.index /= 3600.
# Plot
fig, axes = plt.subplots(2, 2, figsize=(5,3), sharex=True)
fig.set_dpi(120)
ax = axes[0][0]
ax.plot(x_pid['cair.T'] - 273.15, label='PID')
ax.plot(x_mpc['x0'] - 273.15, label='MPC')
ax.plot(constr['Tmin'] - 273.15, 'k--', lw=0.5)
ax.plot(constr['Tmax'] - 273.15, 'k--', lw=0.5)
ax.set_ylabel('$T_i$ [$^\circ$C]')
ax.set_yticks(np.arange(19, 26, 2))
ax = axes[0][1]
ax.plot(x_pid['co2.balance.CO2ppmv_i'], label='PID')
ax.plot(x_mpc['x1'], label='MPC')
ax.plot(constr['CO2min'], 'k--', lw=0.5)
ax.plot(constr['CO2max'], 'k--', lw=0.5)
ax.set_ylabel('$C_i$ [ppm]')
ax.set_yticks(np.arange(400, 1001, 200))
ax = axes[1][0]
ax.plot(u_pid['vpos'], label='PID')
ax.plot(u_mpc['vpos'], label='MPC')
ax.set_ylim(0, 100)
ax.set_xlabel('$t$ [h]')
ax.set_ylabel('$v_{p}$ [%]')
ax = axes[1][1]
ax.plot(u_pid['dpos'], label='PID')
ax.plot(u_mpc['dpos'], label='MPC')
ax.set_ylim(0, 100)
ax.set_xlabel('$t$ [h]')
ax.set_ylabel('$d_{p}$ [%]')
ax.set_xticks(np.arange(0, u_pid.index.values[-1] + 1, 24))
fig.tight_layout()
axes[1][0].legend(loc='center', bbox_to_anchor=(1.15, -0.6), ncol=2)
plt.subplots_adjust(0.15, 0.25, 0.99, 0.98)
fig.savefig('examples/bs2019/figs/case3_mpc_pid.pdf') | 25.221945 | 86 | 0.595165 |
fd0a3f006005472ea7e2dbb973173685da622ddf | 462 | py | Python | mspray/apps/main/migrations/0040_auto_20171209_0252.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | null | null | null | mspray/apps/main/migrations/0040_auto_20171209_0252.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | 76 | 2018-03-15T09:37:56.000Z | 2019-05-15T12:45:51.000Z | mspray/apps/main/migrations/0040_auto_20171209_0252.py | onaio/mspray | b3e0f4b5855abbf0298de6b66f2e9f472f2bf838 | [
"Apache-2.0"
] | 1 | 2020-10-31T07:15:22.000Z | 2020-10-31T07:15:22.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-09 02:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0039_weeklyreport_structures'),
]
operations = [
migrations.AlterField(
model_name='weeklyreport',
name='structures',
field=models.IntegerField(default=0),
),
]
| 22 | 49 | 0.625541 |
ce0282942a7dfc5bf78c8acbfd8f22d00e6ab702 | 79 | py | Python | Python/main/test.py | suninsky/ReceiptOCR | 1be8f4e91a2188b57484e2a2b52f08e3cf9e5483 | [
"MIT"
] | 1 | 2018-10-11T04:17:27.000Z | 2018-10-11T04:17:27.000Z | Python/main/test.py | suninsky/ReceiptOCR | 1be8f4e91a2188b57484e2a2b52f08e3cf9e5483 | [
"MIT"
] | null | null | null | Python/main/test.py | suninsky/ReceiptOCR | 1be8f4e91a2188b57484e2a2b52f08e3cf9e5483 | [
"MIT"
] | null | null | null | f=open('result','r')
result=f.read()
print ','.join(result.split('\n')[0:3])[0] | 26.333333 | 42 | 0.594937 |
221865a2ee336166409384ddb8df87ef61ddef91 | 83,175 | py | Python | salt/netapi/rest_cherrypy/app.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | salt/netapi/rest_cherrypy/app.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | salt/netapi/rest_cherrypy/app.py | cbosdo/salt-1 | 9084d662781f9c0944804ba087e652c2ddb730bf | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://github.com/cherrypy/cherrypy/issues/1298>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionaddedd:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionaddedd:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import json
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
# Import third-party libs
# pylint: disable=import-error
import cherrypy
import yaml
import salt.ext.six as six
# pylint: enable=import-error
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except (cherrypy.TimeoutError, salt.exceptions.SaltClientTimeout):
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
return out(ret)
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.cors_tool = cherrypy.Tool('before_request_body',
cors_tool, priority=50)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if cherrypy.session.get('user'):
chunk['__current_eauth_user'] = cherrypy.session.get('user')
if cherrypy.session.get('groups'):
chunk['__current_eauth_groups'] = cherrypy.session.get('groups')
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.list_job' if jid else 'jobs.list_jobs',
'jid': jid,
}]
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns.keys():
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.moves.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.moves.StringIO(pub_key))
tarball.addfile(priv_key_file, six.moves.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
cherrypy.session['user'] = token['name']
if 'groups' in token:
cherrypy.session['groups'] = token['groups']
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.info(saltEvent.tag)
console.debug(saltEvent.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
# Late import
try:
from cherrypy.lib import cpstats
except ImportError:
logger.error('Import of cherrypy.cpstats failed. Possible '
'upstream bug here: https://github.com/cherrypy/cherrypy/issues/1444')
return {}
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in six.iteritems(self.url_map):
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
| 31.978085 | 250 | 0.575269 |
7adffba1ca679eea400ec7eeac7d960a50759561 | 1,019 | py | Python | exercises/Desafio042.py | zThiago15/Curso-em-Video | ef25e0497edb79bdfbe71fde485f4dafc0d2a0e6 | [
"MIT"
] | null | null | null | exercises/Desafio042.py | zThiago15/Curso-em-Video | ef25e0497edb79bdfbe71fde485f4dafc0d2a0e6 | [
"MIT"
] | null | null | null | exercises/Desafio042.py | zThiago15/Curso-em-Video | ef25e0497edb79bdfbe71fde485f4dafc0d2a0e6 | [
"MIT"
] | 1 | 2021-07-24T21:39:26.000Z | 2021-07-24T21:39:26.000Z | cores = {'limpar':'\033[m', 'vermelho':'\033[1;31m','ciano':'\033[1;36m','azul':'\033[1;34m','amarelo':'\033[1;33m','verde':'\033[1;32m'}
print('===== Tipo de Triângulo -> Aprimoramento do Desafio 35 =====')
print()
s1 = float(input('Primeiro segmento: '))
s2 = float(input('Segundo segmento: '))
s3 = float(input('Terceiro segmento: '))
if s1 < s2+s3 and s2 < s1+s3 and s3 < s1+s2:
print(f'{(cores["verde"])}PODE {(cores["limpar"])}se formar um triângulo! Do tipo:')
if s1 == s2 == s3: #Se forem os 3 segmentos iguais
print(f'{(cores["amarelo"])}EQUILÁTERO! {(cores["limpar"])}(Todos os lados são iguais).')
elif s1 != s2 != s3 != s1: #Se forem 3 segmentos diferentes
print(f'{(cores["azul"])}ESCALENO! {(cores["limpar"])}(Todos os lados diferentes).')
else: #Se forem 2 segmentos iguais e 1 diferente
print(f'{(cores["ciano"])}ISÓSCELES! {(cores["limpar"])}(Dois lados iguais).')
else:
print(f'{(cores["vermelho"])}NÃO PODE {(cores["limpar"])}se formar um triângulo!') | 48.52381 | 137 | 0.618253 |
7e6eb379909bc39935c35becf370cab55cd6693f | 2,743 | py | Python | web_app/routes.py | pcsubirachs/pybit_wallet | 0fb6c12841c66546d0466602cbd46d28376eb16f | [
"MIT"
] | null | null | null | web_app/routes.py | pcsubirachs/pybit_wallet | 0fb6c12841c66546d0466602cbd46d28376eb16f | [
"MIT"
] | null | null | null | web_app/routes.py | pcsubirachs/pybit_wallet | 0fb6c12841c66546d0466602cbd46d28376eb16f | [
"MIT"
] | null | null | null | # current app ppints to config in app.py
from flask import Blueprint, jsonify, request, render_template, current_app
from bitcoin import *
from web_app.make_address import make_address
from web_app.make_address import bp
from web_app.create_btc_wallet import create_wallet
import requests
from bs4 import BeautifulSoup
#
# ROUTING
#
this = Blueprint("this", __name__)
@this.route("/", methods=['POST', 'GET'])
def index():
"""
# generate private key
private_key = random_key()
# convert private key to public key
public_key = privtopub(private_key)
# then, create a readable Bitcoin address
address = pubtoaddr(public_key)
return render_template('index.html', private_key=private_key, public_key=public_key, address=address)
"""
cw = create_wallet()
seed=cw[0]
private_key=cw[1]
public_key=cw[2]
address=cw[3]
#return render_template('test.html', message = ("Yooo: " + seed + private_key + public_key + address))
return render_template('index.html', seed=seed, private_key=private_key, public_key=public_key, address=address)
@this.route("/brain", methods=['POST', 'GET'])
def brain():
#user input here
phrase = request.form['user_phrase']
# generate private key from input
priv_key_brain = sha256(phrase)
# generate public key from your private key
pub_key_brain = privtopub(priv_key_brain)
# then create a readable Bitcoin address
add_brain = pubtoaddr(pub_key_brain)
return render_template('index.html', phrase=phrase, priv_key_brain=priv_key_brain, pub_key_brain=pub_key_brain, add_brain=add_brain)
@this.route("/multisig", methods=['POST', 'GET'])
def multi_sig():
cw_1 = create_wallet()
cw_2 = create_wallet()
cw_3 = create_wallet()
seed_1=cw_1[0]
private_key_1=cw_1[1]
public_key_1=cw_1[2]
address_1=cw_1[3]
seed_2=cw_2[0]
private_key_2=cw_2[1]
public_key_2=cw_2[2]
address_2=cw_2[3]
seed_3=cw_3[0]
private_key_3=cw_3[1]
public_key_3=cw_3[2]
address_3=cw_3[3]
return render_template('index.html', seed_1=seed_1, private_key_1=private_key_1, public_key_1=public_key_1, address_1=address_1,
seed_2=seed_2, private_key_2=private_key_2, public_key_2=public_key_2, address_2=address_2,
seed_3=seed_3, private_key_3=private_key_3, public_key_3=public_key_3, address_3=address_3)
@this.route("/test", methods=['POST','GET'])
def test():
if request.method == "POST":
todo = request.form.get("todo")
print(todo)
return render_template('test.html')
@this.route("/fknabt", methods=['POST', 'GET'])
def jinja():
return render_template('PYBIT_fknabt.html') | 31.528736 | 136 | 0.696682 |
d28cd8c982dd0dd1aeffb307b4fe44ff810054c6 | 14,698 | py | Python | lib/datasets/pascal_voc.py | yihui-he/py-faster-rcnn | 48b9977eed0e6ab34f327bf548cbd5985bab6e81 | [
"BSD-2-Clause"
] | 15 | 2016-08-22T06:08:45.000Z | 2020-01-08T19:31:34.000Z | lib/datasets/pascal_voc.py | liyongsheng-tech/py-faster-rcnn | fe6ae69bdc0a41a4bcafbf277103d5642ed8a9dc | [
"BSD-2-Clause"
] | 1 | 2018-01-18T09:52:31.000Z | 2018-01-18T12:13:13.000Z | lib/datasets/pascal_voc.py | liyongsheng-tech/py-faster-rcnn | fe6ae69bdc0a41a4bcafbf277103d5642ed8a9dc | [
"BSD-2-Clause"
] | 9 | 2017-12-11T02:54:25.000Z | 2020-01-16T15:16:18.000Z | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import cPickle
import subprocess
import uuid
from voc_eval import voc_eval
from fast_rcnn.config import cfg
class pascal_voc(imdb):
def __init__(self, image_set, year, devkit_path=None):
imdb.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None \
else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.selective_search_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'use_diff' : False,
'matlab_eval' : False,
'rpn_file' : None,
'min_size' : 2}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def selective_search_roidb(self):
"""
Return the database of selective search regions of interest.
Ground-truth ROIs are also included.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path,
self.name + '_selective_search_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
ss_roidb = self._load_selective_search_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self._load_selective_search_roidb(None)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print 'loading {}'.format(filename)
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = cPickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_selective_search_roidb(self, gt_roidb):
filename = os.path.abspath(os.path.join(cfg.DATA_DIR,
'selective_search_data',
self.name + '.mat'))
assert os.path.exists(filename), \
'Selective search data not found at: {}'.format(filename)
raw_data = sio.loadmat(filename)['boxes'].ravel()
box_list = []
for i in xrange(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = ds_utils.unique_boxes(boxes)
boxes = boxes[keep, :]
keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir = 'output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
ious = np.linspace(.5, 0.95, np.round((0.95-.5)/.05)+1, endpoint=True)
apss = []
for ov in ious:
aps = []
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=ov,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
apss.append(aps)
print('mmAP {:.4f}'.format(np.mean(apss)))
print('mAP@.5 {:.4f}'.format(np.mean(apss[0])))
print('mAP@.70 {:.4f}'.format(np.mean(apss[np.where(ious==.70)[0][0]])))
print('mAP@.75 {:.4f}'.format(np.mean(apss[np.where(ious==.75)[0][0]])))
#print('Mean AP = {:.4f}'.format(np.mean(aps)))
#print('~~~~~~~~')
#print('Results:')
#for ap in aps:
# print('{:.3f}'.format(ap))
#print('{:.3f}'.format(np.mean(aps)))
#print('~~~~~~~~')
#print('')
#print('--------------------------------------------------------------')
#print('Results computed with the **unofficial** Python eval code.')
#print('Results should be very close to the official MATLAB eval code.')
#print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
#print('-- Thanks, The Management')
#print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print '-----------------------------------------------------'
print 'Computing results with the official MATLAB eval code.'
print '-----------------------------------------------------'
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print('Running:\n{}'.format(cmd))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed; embed()
| 41.637394 | 81 | 0.539189 |
b8a8d8eb79bbf37aa565d3344479ec9a21044296 | 1,252 | py | Python | tests/server/performance_test.py | sethvargo/vaex | c610324316b2c0a14b8ceac2a30e202adc9da28b | [
"MIT"
] | 337 | 2016-02-11T07:36:35.000Z | 2018-12-10T07:17:35.000Z | tests/server/performance_test.py | sethvargo/vaex | c610324316b2c0a14b8ceac2a30e202adc9da28b | [
"MIT"
] | 127 | 2016-07-06T15:43:14.000Z | 2018-12-11T18:46:27.000Z | tests/server/performance_test.py | sethvargo/vaex | c610324316b2c0a14b8ceac2a30e202adc9da28b | [
"MIT"
] | 29 | 2016-10-05T14:15:28.000Z | 2018-11-29T10:17:00.000Z | import pytest
import contextlib
@contextlib.contextmanager
def small_buffer(ds, size=3):
if ds.is_local():
previous = ds.executor.chunk_size
ds.executor.chunk_size = size
ds._invalidate_selection_cache()
try:
yield
finally:
ds.executor.chunk_size = previous
else:
yield # for remote datasets we don't support this ... or should we?
def test_delayed(df_server, df_remote, webserver, client):
xmin = df_server.x.min()
xmax = df_server.x.max()
remote_calls = df_remote.executor.remote_calls
# import pdb; pdb.set_trace()
assert df_remote.x.min() == xmin
assert df_remote.executor.remote_calls == remote_calls + 1
# off, top passes, bottom does not
remote_calls = df_remote.executor.remote_calls
df_remote.x.min()
df_remote.x.max()
assert df_remote.executor.remote_calls == remote_calls + 2
remote_calls = df_remote.executor.remote_calls
vmin = df_remote.x.min(delay=True)
vmax = df_remote.x.max(delay=True)
assert df_remote.executor.remote_calls == remote_calls
df_remote.execute()
assert vmin.get() == xmin
assert vmax.get() == xmax
assert df_remote.executor.remote_calls == remote_calls + 1
| 30.536585 | 75 | 0.683706 |
843926d164dc7ae4adafa4af53dfc0c9dd8f404c | 1,473 | py | Python | src/advanced python/ad_vector.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | null | null | null | src/advanced python/ad_vector.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | 3 | 2019-12-26T05:13:55.000Z | 2020-03-07T06:59:56.000Z | src/advanced python/ad_vector.py | sudeep0901/python | 7a50af12e72d21ca4cad7f2afa4c6f929552043f | [
"MIT"
] | null | null | null | class Vector:
# def __init__(self, x, y):
# print("Object init method called")
# self.x = x
# self.y = y
# def __init__(self, **coords):
# print("Object init method called")
# self.__dict__.update(coords)
# when prefix with underscode, object attributes are private
# and cannot be accessed from class instance directly
# when accessing attribute starting with _, AttributeError is reaised
def __getattr__(self, name):
print("name=",name)
private_name = "_" + name
if private_name not in self.__dict__:
raise(AttributeError("object has notatribute {!r}".format(private_name)))
return getattr(self, private_name)
def __setattr__(self, name, value):
raise AttributeError("Can't set attr {!r}".format(name))
def __init__(self, **coords):
print("Object init method called")
private_coords = {'_' + k: v for k, v in coords.items()}
self.__dict__.update(private_coords)
def __repr__(self):
# return "{} {}, {}".format(
# self.__class__, self.x, self.y)
return "{}, {}".format(
self.__class__.__name__,
", ".join("{k}={v}".format(
k=k, v=self.__dict__[k])
for k in sorted(self.__dict__.keys()
)
)
)
# can access diction ary object using
# vars(object name)
# object.__dict__ | 32.733333 | 85 | 0.570944 |
5cde76fdb49b09332d922fd35c0b917b20741a49 | 1,881 | py | Python | qiskit/extensions/standard/x.py | luigizuccarelli/qiskit-sdk-py | c8f04404cacc1096264b9f6fa119e82d72030a59 | [
"Apache-2.0"
] | 1 | 2019-03-08T16:25:23.000Z | 2019-03-08T16:25:23.000Z | qiskit/extensions/standard/x.py | luigizuccarelli/qiskit-sdk-py | c8f04404cacc1096264b9f6fa119e82d72030a59 | [
"Apache-2.0"
] | null | null | null | qiskit/extensions/standard/x.py | luigizuccarelli/qiskit-sdk-py | c8f04404cacc1096264b9f6fa119e82d72030a59 | [
"Apache-2.0"
] | 1 | 2018-03-05T12:31:04.000Z | 2018-03-05T12:31:04.000Z | # -*- coding: utf-8 -*-
# pylint: disable=invalid-name
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Pauli X (bit-flip) gate.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import InstructionSet
from qiskit import QuantumCircuit
from qiskit import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
class XGate(Gate):
"""Pauli X (bit-flip) gate."""
def __init__(self, qubit, circ=None):
"""Create new X gate."""
super().__init__("x", [], [qubit], circ)
def qasm(self):
"""Return OPENQASM string."""
qubit = self.arg[0]
return self._qasmif("x %s[%d];" % (qubit[0].name, qubit[1]))
def inverse(self):
"""Invert this gate."""
return self # self-inverse
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.x(self.arg[0]))
def x(self, q):
"""Apply X to q."""
if isinstance(q, QuantumRegister):
gs = InstructionSet()
for j in range(q.size):
gs.add(self.x((q, j)))
return gs
self._check_qubit(q)
return self._attach(XGate(q, self))
QuantumCircuit.x = x
CompositeGate.x = x
| 28.938462 | 79 | 0.636895 |
52586cf70f55c53802a3b6c2cc9d0affac5e7f2e | 762 | py | Python | exercises/ex5-ec.py | Zorlin/lp3thw | 225c8deea84df017ac1649265730d2363880906e | [
"Apache-1.1"
] | null | null | null | exercises/ex5-ec.py | Zorlin/lp3thw | 225c8deea84df017ac1649265730d2363880906e | [
"Apache-1.1"
] | null | null | null | exercises/ex5-ec.py | Zorlin/lp3thw | 225c8deea84df017ac1649265730d2363880906e | [
"Apache-1.1"
] | null | null | null | name = 'Zed A. Shaw'
age = 35 # not a lie
height = 74 # inches
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print(f"Let's talk about {name}.")
print(f"He's {height} inches tall.")
print(f"He's {weight} pounds heavy.")
print("Actually that's not too heavy.")
print(f"He's got {eyes} eyes and {hair} hair.")
print(f"His teeth are usually {teeth} depending on the coffee.")
# this line is tricky, try to get it exactly right
total = age + height + weight
print(f"If I add {age}, {height}, and {weight} I get {total}.")
# Number of centimers in an inch
centimeters_per_inch = 2.54
kilograms_per_pound = 0.453592
print(f"He's {height * centimeters_per_inch } centimeters tall.")
print(f"He's {weight * kilograms_per_pound } kilograms heavy.")
| 28.222222 | 65 | 0.694226 |
1a489b9563122ec876320dfb0b2b8670498ba004 | 1,712 | py | Python | pysrc/utils.py | learnforpractice/pyeosk | 5fd0f0d32e0f24b78f7522b47f596e70a1b5f0e0 | [
"MIT"
] | 15 | 2018-10-08T08:58:36.000Z | 2022-02-25T05:35:09.000Z | pysrc/utils.py | learnforpractice/pyeosk | 5fd0f0d32e0f24b78f7522b47f596e70a1b5f0e0 | [
"MIT"
] | 7 | 2018-11-12T09:15:46.000Z | 2022-03-25T14:52:26.000Z | pysrc/utils.py | learnforpractice/pyeosk | 5fd0f0d32e0f24b78f7522b47f596e70a1b5f0e0 | [
"MIT"
] | 8 | 2018-11-13T08:52:23.000Z | 2021-04-24T18:22:17.000Z | import json
from . import eosapi
from . import config
def create_account_on_chain(from_account, new_account, balance, public_key):
assert len(new_account) == 12
assert balance <= 1.0
assert len(public_key) == 53 and public_key[:3] == 'EOS'
memo = '%s-%s'%(new_account, public_key)
return eosapi.transfer(from_account, 'signupeoseos', balance, memo)
def buyrambytes(payer, receiver, _bytes):
args = {"payer":payer,"receiver":receiver,"bytes":_bytes}
return eosapi.push_action(config.system_contract, 'buyrambytes', args, {payer:'active'})
def buyram(payer, receiver, quant):
args = {'payer':payer, 'receiver':receiver, 'quant':'%.4f %s'%(quant, config.main_token)}
return eosapi.push_action(config.system_contract, 'buyram', args, {payer:'active'})
def sellram(account, _bytes):
return eosapi.push_action(config.system_contract, 'sellram', {'account':account, 'bytes':_bytes}, {account:'active'})
def dbw(_from, _to, net, cpu, transfer=False):
args = {'from':_from,
'receiver':_to,
'stake_net_quantity':'%.4f %s'%(net, config.main_token),
'stake_cpu_quantity':'%.4f %s'%(cpu, config.main_token),
'transfer':transfer
}
return eosapi.push_action(config.system_contract, 'delegatebw', args, {_from:'active'})
def undbw(_from, _to, net, cpu, transfer=False):
args = {'from':_from,
'receiver':_to,
'unstake_net_quantity':'%.4f %s'%(net, config.main_token),
'unstake_cpu_quantity':'%.4f %s'%(cpu, config.main_token),
'transfer':transfer
}
return eosapi.push_action(config.system_contract, 'undelegatebw', args, {_from:'active'})
| 42.8 | 121 | 0.656542 |
0cddca92f84ac46000fb32a485d832c5bc672908 | 1,406 | py | Python | service/service/urls.py | NewsNerdsAtCoJMC/ProjectTicoTeam6 | 0cc88de0e69b07b736b5b6984e6f24008a2d7bff | [
"MIT"
] | null | null | null | service/service/urls.py | NewsNerdsAtCoJMC/ProjectTicoTeam6 | 0cc88de0e69b07b736b5b6984e6f24008a2d7bff | [
"MIT"
] | null | null | null | service/service/urls.py | NewsNerdsAtCoJMC/ProjectTicoTeam6 | 0cc88de0e69b07b736b5b6984e6f24008a2d7bff | [
"MIT"
] | null | null | null | """service URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from volunteers import views as volunteer_views
from organizations import views as organizations_views
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^$', volunteer_views.index, name='index'),
url(r'^volunteers/$', volunteer_views.detail, name='index'),
url(r'^organizations/$', organizations_views.detail, name='index'),
url(r'^about/$', volunteer_views.about, name='index'),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^admin/', admin.site.urls),
#url(r'^organizations/', organizations_views.home, name='home'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 41.352941 | 79 | 0.723329 |
6c8ebbe22ce848a8fef373b377d92bc08163a99c | 1,394 | py | Python | src/markdown_kernel/kernel.py | vatlab/markdown-kernel | 7b255e99555219165093fa5a5d2e5d4b6f9ef17e | [
"BSD-3-Clause"
] | 21 | 2018-03-29T09:49:48.000Z | 2020-05-31T22:34:58.000Z | src/markdown_kernel/kernel.py | vatlab/markdown-kernel | 7b255e99555219165093fa5a5d2e5d4b6f9ef17e | [
"BSD-3-Clause"
] | 5 | 2018-05-09T08:27:28.000Z | 2020-04-25T18:01:48.000Z | src/markdown_kernel/kernel.py | vatlab/markdown-kernel | 7b255e99555219165093fa5a5d2e5d4b6f9ef17e | [
"BSD-3-Clause"
] | 5 | 2018-05-24T02:47:23.000Z | 2022-02-15T12:26:52.000Z | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
from ipykernel.kernelbase import Kernel
from ._version import __version__
class markdown_kernel(Kernel):
implementation = 'markdown'
implementation_version = __version__
banner = "markdown kernel"
help_links = 'http://github.com/vatlab/markdown_kernel'
language = 'markdown'
language_info = {
'mimetype': 'text/markdown',
'name': 'Markdown',
'file_extension': '.md',
'pygments_lexer': 'markdown',
'codemirror_mode': 'markdown',
}
def do_execute(self,
code,
silent,
store_history=True,
user_expressions=None,
allow_stdin=False):
if not silent:
self.send_response(self.iopub_socket, 'display_data', {
'metadata': {},
'data': {
'text/markdown': code
}
})
return {
'status': 'ok',
'payload': [],
'execution_count': self.execution_count,
'user_expressions': {}
}
if __name__ == '__main__':
from ipykernel.kernelapp import IPKernelApp
IPKernelApp.launch_instance(kernel_class=markdown_kernel)
| 29.041667 | 77 | 0.573171 |
6c5be9986622a51a3012adf7a61a487928bf07cc | 10,775 | py | Python | bamsurgeon/replacereads.py | cdarby/bamsurgeon | b72562b6c92488cfb2d2e5ef1ccb76d7c181eb60 | [
"MIT"
] | 1 | 2019-12-23T01:38:45.000Z | 2019-12-23T01:38:45.000Z | bamsurgeon/replacereads.py | cdarby/bamsurgeon | b72562b6c92488cfb2d2e5ef1ccb76d7c181eb60 | [
"MIT"
] | null | null | null | bamsurgeon/replacereads.py | cdarby/bamsurgeon | b72562b6c92488cfb2d2e5ef1ccb76d7c181eb60 | [
"MIT"
] | 1 | 2019-12-23T01:38:51.000Z | 2019-12-23T01:38:51.000Z | #!/usr/bin/env python
import sys
import pysam
import argparse
import random
from collections import defaultdict
from string import maketrans
def rc(dna):
''' reverse complement '''
complements = maketrans('acgtrymkbdhvACGTRYMKBDHV', 'tgcayrkmvhdbTGCAYRKMVHDB')
return dna.translate(complements)[::-1]
def cleanup(read,orig,RG):
'''
fixes unmapped reads that are marked as 'reverse'
fill in read group at random from existing RGs if
RG tags are present in .bam header
Retain 10X-based tags from original BAM record, including Lariat linked-read alignment tags
(see https://support.10xgenomics.com/genome-exome/software/pipelines/latest/output/bam)
'''
if read.is_unmapped and read.is_reverse:
read.is_reverse = False
qual = read.qual
read.seq = rc(read.seq)
read.qual = qual[::-1]
if read.mate_is_unmapped and read.mate_is_reverse:
read.mate_is_reverse = False
# mate seq/qual should be caught by the above logic
if RG:
hasRG = False
if read.tags is not None:
for tag in read.tags:
if tag[0] == 'RG':
hasRG = True
# use RG from original read if it exists
if orig is not None:
if not hasRG and orig.tags is not None:
for tag in orig.tags:
if tag[0] == 'RG':
read.tags = read.tags + [tag]
hasRG = True
if not hasRG:
# give up and add random read group from list in header (e.g. for simulated reads)
newRG = RG[random.randint(0,len(RG)-1)]
read.tags = read.tags + [("RG",newRG)]
tenxTags = [("BX","Z"),("BC","Z"),("QT","Z"),("RX","Z"),("QX","Z"),("TR","Z"),("TQ","Z"),("PC","i"),("PS","i"),("HP","i"),("MI","i"),("AS","i"),("XS","i"),("AM","A"),("XM","A"),("XT","i")]
for (t,dtype) in tenxTags:
try:
tagval = orig.get_tag(t)
read.set_tag(t,tagval,value_type=dtype)
except KeyError:
continue
return read
def getRGs(bam):
'''return list of RG IDs'''
RG = []
if 'RG' in bam.header:
for headRG in bam.header['RG']:
RG.append(headRG['ID'])
return RG
def getExcludedReads(file):
'''read list of excluded reads into a dictionary'''
ex = {}
f = open(file,'r')
for line in f:
line = line.strip()
ex[line] = True
f.close()
return ex
def compare_ref(targetbam, donorbam):
''' if targetbam and donorbam are aligned to different references
and the references are in a different order it's a problem
'''
for ref in targetbam.references:
if ref not in donorbam.references or donorbam.gettid(ref) != targetbam.gettid(ref):
sys.stderr.write("contig mismatch: %s\n" % ref)
return False
return True
def replaceReads(targetbam, donorbam, outputbam, nameprefix=None, excludefile=None, allreads=False, keepqual=False, progress=False, keepsecondary=False, keepsupplementary=False, seed=None):
''' targetbam, donorbam, and outputbam are pysam.Samfile objects
outputbam must be writeable and use targetbam as template
read names in excludefile will not appear in final output
'''
if seed is not None: random.seed(int(seed))
# check whether references are compatible
if not compare_ref(targetbam, donorbam):
sys.exit("Target and donor are aligned to incompatable reference genomes!")
RG = getRGs(targetbam) # read groups
exclude = {}
if excludefile:
exclude = getExcludedReads(excludefile)
# load reads from donorbam into dict
sys.stdout.write("loading donor reads into dictionary...\n")
#rdict = defaultdict(list)
rdict = {}
secondary = defaultdict(list) # track secondary alignments, if specified
supplementary = defaultdict(list) # track supplementary alignments, if specified
excount = 0 # number of excluded reads
nullcount = 0 # number of null reads
nr = 0
for read in donorbam.fetch(until_eof=True):
if read.seq is not None:
if read.qname not in exclude:
pairname = 'F' # read is first in pair
if read.is_read2:
pairname = 'S' # read is second in pair
if not read.is_paired:
pairname = 'U' # read is unpaired
if nameprefix:
qual = read.qual # temp
read.qname = nameprefix + read.qname # must set name _before_ setting quality (see pysam docs)
read.qual = qual
extqname = ','.join((read.qname,pairname))
if not read.is_secondary and not read.is_supplementary:
rdict[extqname] = read
nr += 1
elif read.is_secondary and keepsecondary:
secondary[extqname].append(read)
elif read.is_supplementary and keepsupplementary:
supplementary[extqname].append(read)
else: # no seq!
excount += 1
else:
nullcount += 1
print 'secondary reads count:'+ str(sum([len(v) for k,v in secondary.iteritems()]))
print 'supplementary reads count:'+ str(sum([len(v) for k,v in supplementary.iteritems()]))
sys.stdout.write("loaded " + str(nr) + " reads, (" + str(excount) + " excluded, " + str(nullcount) + " null or secondary or supplementary--> ignored)\n")
excount = 0
recount = 0 # number of replaced reads
used = {}
prog = 0
ignored_target = 0 # number of supplemental / secondary reads in original
for read in targetbam.fetch(until_eof=True):
if read.is_secondary or read.is_supplementary:
ignored_target += 1
continue
prog += 1
if progress and prog % 10000000 == 0:
sys.stdout.write("processed " + str(prog) + " reads.\n")
if read.qname not in exclude:
pairname = 'F' # read is first in pair
if read.is_read2:
pairname = 'S' # read is second in pair
if not read.is_paired:
pairname = 'U' # read is unpaired
if nameprefix:
qual = read.qual # temp
read.qname = nameprefix + read.qname
read.qual = qual
extqname = ','.join((read.qname,pairname))
#check if this read has been processed already. If so, skip to the next read
if used.get(extqname): continue
newReads = []
if extqname in rdict:
if keepqual:
try:
rdict[extqname].qual = read.qual
except ValueError as e:
sys.stdout.write("error replacing quality score for read: " + str(rdict[extqname].qname) + " : " + str(e) + "\n")
sys.stdout.write("donor: " + str(rdict[extqname]) + "\n")
sys.stdout.write("target: " + str(read) + "\n")
sys.exit(1)
newReads = [rdict[extqname]]
used[extqname] = True
recount += 1
if extqname in secondary and keepsecondary:
newReads.extend(secondary[extqname])
used[extqname] = True
recount += len(secondary[extqname])
if extqname in supplementary and keepsupplementary:
newReads.extend(supplementary[extqname])
used[extqname] = True
recount += len(supplementary[extqname])
#non of the above, then write the original read back
elif len(newReads) == 0:
newReads = [read]
assert(len(newReads) != 0)
for newRead in newReads:
newRead = cleanup(newRead,read,RG)
outputbam.write(newRead)
else:
excount += 1
sys.stdout.write("replaced " + str(recount) + " reads (" + str(excount) + " excluded )\n")
sys.stdout.write("kept " + str(sum([len(v) for k,v in secondary.iteritems()])) + " secondary reads.\n")
sys.stdout.write("kept " + str(sum([len(v) for k,v in supplementary.iteritems()])) + " supplementary reads.\n")
sys.stdout.write("ignored %d non-primary reads in target BAM.\n" % ignored_target)
nadded = 0
# dump the unused reads from the donor if requested with --all
if allreads:
for extqname in rdict.keys():
if extqname not in used and extqname not in exclude:
rdict[extqname] = cleanup(rdict[extqname],None,RG)
outputbam.write(rdict[extqname])
nadded += 1
sys.stdout.write("added " + str(nadded) + " reads due to --all\n")
def main(args):
targetbam = pysam.Samfile(args.targetbam, 'rb')
donorbam = pysam.Samfile(args.donorbam, 'rb')
outputbam = pysam.Samfile(args.outputbam, 'wb', template=targetbam)
replaceReads(targetbam, donorbam, outputbam, args.namechange, args.exclfile, args.all, args.keepqual, args.progress, args.keepsecondary,args.keepsupplementary)
targetbam.close()
donorbam.close()
outputbam.close()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='replaces aligned reads in bamfile1 with aligned reads from bamfile2')
parser.add_argument('-b', '--bam', dest='targetbam', required=True, help='original .bam')
parser.add_argument('-r', '--replacebam', dest='donorbam', required=True, help='.bam with reads to replace original bam')
parser.add_argument('-o', '--outputbam', dest='outputbam', required=True, help="name for new .bam output")
parser.add_argument('-n', '--namechange', dest='namechange', default=None, help="change all read names by prepending string (passed as -n [string])")
parser.add_argument('-x', '--exclude', dest='exclfile', default=None, help="file containing a list of read names to ignore (exclude from output)")
parser.add_argument('--all', action='store_true', default=False, help="append reads that don't match target .bam")
parser.add_argument('--keepqual', action='store_true', default=False, help="keep original quality scores, replace read and mapping only for primary reads")
parser.add_argument('--progress', action='store_true', default=False, help="output progress every 10M reads")
parser.add_argument('--keepsecondary', action='store_true', default=False, help='keep secondary reads in final BAM')
parser.add_argument('--keepsupplementary', action='store_true', default=False, help='keep supplementary reads in final BAM')
args = parser.parse_args()
main(args)
| 43.800813 | 192 | 0.598515 |
231f01577e4857beaeceadc7bab8cde98268a671 | 95 | py | Python | users/urls.py | qquokka/reboot-django | 2015091edf705ab70b82efb607c480bff6fba051 | [
"CNRI-Python"
] | null | null | null | users/urls.py | qquokka/reboot-django | 2015091edf705ab70b82efb607c480bff6fba051 | [
"CNRI-Python"
] | 9 | 2021-03-19T01:21:44.000Z | 2022-03-12T00:00:12.000Z | users/urls.py | qquokka/reboot-django | 2015091edf705ab70b82efb607c480bff6fba051 | [
"CNRI-Python"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
# path('', views.index)
] | 15.833333 | 28 | 0.673684 |
d683dd759f4d25b50621a081268258fb51e06d4d | 371 | py | Python | medcat/__init__.py | louni-g/MedCAT | 5ffa96b9cef2520ab3a4b406aa2ab880b38f1e13 | [
"MIT"
] | null | null | null | medcat/__init__.py | louni-g/MedCAT | 5ffa96b9cef2520ab3a4b406aa2ab880b38f1e13 | [
"MIT"
] | 2 | 2021-04-28T11:40:59.000Z | 2021-04-28T11:42:14.000Z | medcat/__init__.py | louni-g/MedCAT | 5ffa96b9cef2520ab3a4b406aa2ab880b38f1e13 | [
"MIT"
] | null | null | null | name = 'medcat'
# Hacky patch to the built-in copy module coz otherwise, thinc.config.Config.copy will fail on Python <= 3.6.
# (fixed in python 3.7 https://docs.python.org/3/whatsnew/3.7.html#re)
import sys
if sys.version_info.major == 3 and sys.version_info.minor <= 6:
import copy
import re
copy._deepcopy_dispatch[type(re.compile(''))] = lambda r, _: r
| 33.727273 | 109 | 0.703504 |
6c47ff27a86d37a356c39ed0dff3f597ffea2111 | 17 | py | Python | test.py | icrow78/KBpython_class_plus | af0409e33241444399068135d47cd12dcd849073 | [
"Apache-2.0"
] | null | null | null | test.py | icrow78/KBpython_class_plus | af0409e33241444399068135d47cd12dcd849073 | [
"Apache-2.0"
] | null | null | null | test.py | icrow78/KBpython_class_plus | af0409e33241444399068135d47cd12dcd849073 | [
"Apache-2.0"
] | null | null | null | print("Carlos")
| 5.666667 | 15 | 0.647059 |
b9e99c9f243755fc8527433c451ebbde9fc6e2fc | 1,815 | py | Python | Advanced-Course/04-Email-Confirmation-Advanced/resources/item.py | suzynakayama/python-flask-udemy | 95d2c5fa328e2f50d0893d73fd386fb713d1f12b | [
"MIT"
] | 1 | 2021-11-30T14:13:10.000Z | 2021-11-30T14:13:10.000Z | Advanced-Course/04-Email-Confirmation-Advanced/resources/item.py | suzynakayama/python-flask-udemy | 95d2c5fa328e2f50d0893d73fd386fb713d1f12b | [
"MIT"
] | null | null | null | Advanced-Course/04-Email-Confirmation-Advanced/resources/item.py | suzynakayama/python-flask-udemy | 95d2c5fa328e2f50d0893d73fd386fb713d1f12b | [
"MIT"
] | null | null | null | from flask_restful import Resource
from flask import request
from flask_jwt_extended import jwt_required, fresh_jwt_required
from models.item import ItemModel
from schemas.item import ItemSchema
from libs.strings.strings import gettext
item_schema = ItemSchema()
item_list_schema = ItemSchema(many=True)
class Item(Resource):
@classmethod
def get(cls, name: str):
item = ItemModel.find_by_name(name)
if item:
return item_schema.dump(item), 200
return {"message": gettext('item_not_found')}, 404
@classmethod
@fresh_jwt_required
def post(cls, name: str):
if ItemModel.find_by_name(name):
return {"message": gettext('item_name_already_exists').format(name)}, 400
item_json = request.get_json()
item_json["name"] = name
item = item_schema.load(item_json)
try:
item.save_to_db()
except:
return {"message": gettext('item_error_inserting')}, 500
return item_schema.dump(item), 201
@classmethod
@jwt_required
def delete(cls, name: str):
item = ItemModel.find_by_name(name)
if item:
item.delete_from_db()
return {"message": gettext('item_deleted')}, 200
return {"message": gettext('item_not_found')}, 404
@classmethod
def put(cls, name: str):
item_json = request.get_json()
item = ItemModel.find_by_name(name)
if item:
item.price = item_json["price"]
else:
item_json["name"] = name
item = item_schema.load(item_json)
item.save_to_db()
return item_schema.dump(item), 200
class ItemList(Resource):
@classmethod
def get(cls):
return {"items": item_list_schema.dump(ItemModel.find_all())}, 200
| 26.304348 | 85 | 0.637466 |
1913b583aa61ab656094c79bea7b502dcb24aca7 | 1,270 | py | Python | create_gpu_metrics.py | realnaveen/gcp-gpu-utilization-metrics | 6e62ea324bf097817474b51119786e8222dd9fdf | [
"MIT"
] | 13 | 2018-10-19T20:04:45.000Z | 2021-07-23T01:34:38.000Z | create_gpu_metrics.py | realnaveen/gcp-gpu-utilization-metrics | 6e62ea324bf097817474b51119786e8222dd9fdf | [
"MIT"
] | 3 | 2019-01-22T03:42:49.000Z | 2019-06-10T07:49:57.000Z | create_gpu_metrics.py | realnaveen/gcp-gpu-utilization-metrics | 6e62ea324bf097817474b51119786e8222dd9fdf | [
"MIT"
] | 3 | 2019-02-16T01:59:42.000Z | 2019-06-04T07:47:02.000Z | import os
from google.cloud import monitoring_v3
project_id = (os.environ['GOOGLE_CLOUD_PROJECT'] or
os.environ['GCLOUD_PROJECT'])
class MissingProjectIdError(Exception):
pass
if not project_id:
raise MissingProjectIdError(
'Set the environment variable ' +
'GCLOUD_PROJECT to your Google Cloud Project Id.')
GPU_UTILIZATION_METRIC_NAME = "gpu_utilization"
GPU_MEMORY_UTILIZATION_METRIC_NAME = "gpu_memory_utilization"
client = monitoring_v3.MetricServiceClient()
project_name = client.project_path(project_id)
def add_new_metrics(type, desc):
descriptor = monitoring_v3.types.MetricDescriptor()
descriptor.type = 'custom.googleapis.com/{type}'.format(
type=type)
descriptor.metric_kind = (
monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE)
descriptor.value_type = (
monitoring_v3.enums.MetricDescriptor.ValueType.INT64)
descriptor.description = desc
descriptor = client.create_metric_descriptor(project_name, descriptor)
print('Created {}.'.format(descriptor.name))
add_new_metrics(GPU_UTILIZATION_METRIC_NAME, 'Metric for GPU utilization.')
add_new_metrics(GPU_MEMORY_UTILIZATION_METRIC_NAME,
'Metric for GPU memory utilization.')
| 30.97561 | 75 | 0.746457 |
47125592ea6ad498f7b7a17b936356bce7238586 | 3,200 | py | Python | image_processing/tracker.py | wiktorowski211/Fruit-Viking | d45c35a56425c4d42a2ee59586bd559037cf07db | [
"MIT"
] | null | null | null | image_processing/tracker.py | wiktorowski211/Fruit-Viking | d45c35a56425c4d42a2ee59586bd559037cf07db | [
"MIT"
] | 2 | 2019-05-22T09:06:16.000Z | 2019-05-28T21:38:04.000Z | image_processing/tracker.py | wiktorowski211/Fruit-Viking | d45c35a56425c4d42a2ee59586bd559037cf07db | [
"MIT"
] | 1 | 2019-03-21T13:02:29.000Z | 2019-03-21T13:02:29.000Z | import cv2
import numpy as np
import time
from threading import Thread
from image_processing.camera import Camera
from image_processing.prediction import Prediction
class Tracker:
def __init__(self, camera, prediction):
self.prediction = prediction
self.camera = camera
self.previous_position = 0, 0
# loop
self.frequency = 30
self.running = False
self.position = 0, 0
# image processing
self.kernel_open = np.ones((5, 5))
self.kernel_close = np.ones((20, 20))
# should be None but since we don't have
# color picking we set it to torch color
self.lower_color = (-10, -10, 245)
self.upper_color = (10, 10, 265)
def start(self):
self.running = True
thread = Thread(target=self.loop)
thread.start()
def stop(self):
self.running = False
def loop(self):
while self.running:
start = time.time()
# Heavy load
img = self.camera.image()
position = self.get_position(img)
timestamp = time.time() * 1000
predicted = self.prediction.process(position, 50, timestamp)
self.position = predicted
# End of heavy load
sleep_time = 1. / self.frequency - (time.time() - start)
if sleep_time > 0:
time.sleep(sleep_time)
def get_position(self, img):
if self.is_color_set():
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(img_hsv, self.lower_color, self.upper_color)
mask_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel_open)
mask_close = cv2.morphologyEx(mask_open, cv2.MORPH_CLOSE, self.kernel_close)
contours, h = cv2.findContours(mask_close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
largest_contour = max(contours, key=cv2.contourArea)
self.previous_position = self.highpoint(largest_contour)
return self.previous_position
else:
return self.previous_position
else:
return self.previous_position
def highpoint(self, contour):
return tuple(contour[contour[:, :, 1].argmin()][0])
def set_color(self, color):
self.lower_color, self.upper_color = color
def is_color_set(self):
return self.lower_color is not None and self.upper_color is not None
if __name__ == '__main__':
camera = Camera(1280, 720)
prediction = Prediction(200)
tracker = Tracker(camera, prediction)
while True:
start = time.time()
# Heavy load
img = camera.image()
position = tracker.get_position(img)
timestamp = time.time() * 1000
predicted = prediction.process(position, 50, timestamp)
camera.draw_circle(img, position, -1, (0, 0, 255))
camera.draw_circle(img, predicted, 10, (255, 0, 0))
camera.show(img)
# End of heavy load
sleep_time = 1. / 30 - (time.time() - start)
if sleep_time > 0:
time.sleep(sleep_time)
| 26.229508 | 98 | 0.598438 |
e3bc5780854aa218bf65738b8c586d8def7a7194 | 83 | py | Python | misc/deployment/uwsgi-flask-fork/load_model.py | ebiggerr/malaya | be757c793895522f80b929fe82353d90762f7fff | [
"MIT"
] | 88 | 2021-01-06T10:01:31.000Z | 2022-03-30T17:34:09.000Z | misc/deployment/uwsgi-flask-fork/load_model.py | zulkiflizaki/malaya | 2358081bfa43aad57d9415a99f64c68f615d0cc4 | [
"MIT"
] | 43 | 2021-01-14T02:44:41.000Z | 2022-03-31T19:47:42.000Z | misc/deployment/uwsgi-flask-fork/load_model.py | zulkiflizaki/malaya | 2358081bfa43aad57d9415a99f64c68f615d0cc4 | [
"MIT"
] | 38 | 2021-01-06T07:15:03.000Z | 2022-03-19T05:07:50.000Z | import malaya
model = malaya.sentiment.transformer(model='albert', validate=True)
| 20.75 | 67 | 0.795181 |
63a99f608f691d04b3275be16c5b2c9d0717bf3b | 1,385 | py | Python | tests/__init__.py | ibrechin/django-zendesk-tickets | f4651836d6a0aaad46fcd90229e482bf4673e9ac | [
"MIT"
] | 4 | 2017-04-12T08:11:07.000Z | 2022-01-26T16:30:52.000Z | tests/__init__.py | ibrechin/django-zendesk-tickets | f4651836d6a0aaad46fcd90229e482bf4673e9ac | [
"MIT"
] | 6 | 2015-12-07T12:13:49.000Z | 2022-03-21T12:19:10.000Z | tests/__init__.py | ibrechin/django-zendesk-tickets | f4651836d6a0aaad46fcd90229e482bf4673e9ac | [
"MIT"
] | 2 | 2021-04-11T06:31:40.000Z | 2021-07-26T06:20:33.000Z | import os
import sys
import django
from django.conf import settings
from django.test.runner import DiscoverRunner
test_settings = dict(
DEBUG=True,
SECRET_KEY='a' * 50,
ROOT_URLCONF='tests.urls',
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'zendesk_tickets',
),
MIDDLEWARE=[
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
],
SESSION_ENGINE='django.contrib.sessions.backends.signed_cookies',
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates')],
'APP_DIRS': True,
}],
ZENDESK_BASE_URL='https://zendesk.local/',
ZENDESK_API_USERNAME='zendesk_user',
ZENDESK_API_TOKEN='api_token',
ZENDESK_REQUESTER_ID=111111,
ZENDESK_GROUP_ID=222222,
ZENDESK_CUSTOM_FIELDS={
'referer': 31,
'username': 32,
'user_agent': 33,
'contact_email': 34,
},
)
def run():
if not settings.configured:
settings.configure(**test_settings)
django.setup()
failures = DiscoverRunner(verbosity=2, failfast=False, interactive=False).run_tests(['tests'])
sys.exit(failures)
if __name__ == '__main__':
run()
| 26.634615 | 98 | 0.662094 |
2d5beca38a32fc4b20c9b4697c97dd5e274f043f | 172 | py | Python | src/model/snrna.py | AstrorEnales/GenCoNet | c596d31a889f14499883fcdf74fdc67f927a806e | [
"MIT"
] | 2 | 2019-12-05T11:46:48.000Z | 2022-03-09T00:11:06.000Z | src/model/snrna.py | AstrorEnales/GenCoNet | c596d31a889f14499883fcdf74fdc67f927a806e | [
"MIT"
] | null | null | null | src/model/snrna.py | AstrorEnales/GenCoNet | c596d31a889f14499883fcdf74fdc67f927a806e | [
"MIT"
] | null | null | null | from model.rna import RNA
class SnRNA(RNA):
def __init__(self, ids: [str], names: [str]):
super().__init__(ids, names)
self.primary_id_prefix = 'HGNC' | 24.571429 | 49 | 0.633721 |
51ee23d61adc99b969cc67b542221ea0043e30df | 2,857 | py | Python | unet3d/utils/utils.py | rickymwalsh/ot_da_v0 | 81263e3b4794ef1e51253af67d5e8849044c4f8e | [
"MIT"
] | 2 | 2021-11-03T17:29:51.000Z | 2022-03-30T18:58:30.000Z | unet3d/utils/utils.py | rickymwalsh/ot_da_v0 | 81263e3b4794ef1e51253af67d5e8849044c4f8e | [
"MIT"
] | null | null | null | unet3d/utils/utils.py | rickymwalsh/ot_da_v0 | 81263e3b4794ef1e51253af67d5e8849044c4f8e | [
"MIT"
] | 2 | 2021-11-08T09:12:40.000Z | 2022-01-26T09:37:33.000Z | import pickle
import os
import collections
import nibabel as nib
import numpy as np
from nilearn.image import reorder_img, new_img_like
from .nilearn_custom_utils.nilearn_utils import crop_img_to
from .sitk_utils import resample_to_spacing, calculate_origin_offset
def pickle_dump(item, out_file):
with open(out_file, "wb") as opened_file:
pickle.dump(item, opened_file)
def pickle_load(in_file):
with open(in_file, "rb") as opened_file:
return pickle.load(opened_file)
def get_affine(in_file):
return read_image(in_file).affine
def read_image_files(image_files, image_shape=None, crop=None, label_indices=None):
"""
:param image_files:
:param image_shape:
:param crop:
:param use_nearest_for_last_file: If True, will use nearest neighbor interpolation for the last file. This is used
because the last file may be the labels file. Using linear interpolation here would mess up the labels.
:return:
"""
if label_indices is None:
label_indices = []
elif not isinstance(label_indices, collections.Iterable) or isinstance(label_indices, str):
label_indices = [label_indices]
image_list = list()
for index, image_file in enumerate(image_files):
if (label_indices is None and (index + 1) == len(image_files)) \
or (label_indices is not None and index in label_indices):
interpolation = "nearest"
else:
interpolation = "linear"
image_list.append(read_image(image_file, image_shape=image_shape, crop=crop, interpolation=interpolation))
return image_list
def read_image(in_file, image_shape=None, interpolation='linear', crop=None):
# print("Reading: {0}".format(in_file))
image = nib.load(os.path.abspath(in_file))
image = fix_shape(image)
# print("Image shape: ", image_shape)
if crop:
image = crop_img_to(image, crop, copy=True)
if image_shape:
return resize(image, new_shape=image_shape, interpolation=interpolation)
else:
return image
def fix_shape(image):
if image.shape[-1] == 1:
return image.__class__(dataobj=np.squeeze(image.get_data()), affine=image.affine)
return image
def resize(image, new_shape, interpolation="linear"):
image = reorder_img(image, resample=interpolation)
zoom_level = np.divide(new_shape, image.shape)
new_spacing = np.divide(image.header.get_zooms(), zoom_level)
new_data = resample_to_spacing(image.get_data(), image.header.get_zooms(), new_spacing,
interpolation=interpolation)
new_affine = np.copy(image.affine)
np.fill_diagonal(new_affine, new_spacing.tolist() + [1])
new_affine[:3, 3] += calculate_origin_offset(new_spacing, image.header.get_zooms())
return new_img_like(image, new_data, affine=new_affine)
| 34.841463 | 118 | 0.707735 |
43e9f3f6dcb57615750bdee9f4e8f2b71f188d3b | 49,316 | py | Python | tests/test_modeling_bart.py | Santosh-Gupta/transformers | 8db01bdab4813105fba6e8de369b69b6fc37c9a7 | [
"Apache-2.0"
] | 1 | 2021-04-20T17:01:32.000Z | 2021-04-20T17:01:32.000Z | tests/test_modeling_bart.py | Tarpelite/title_generation | 8b9a9878e42ef1217b346daf492450c3d004dd21 | [
"MIT"
] | null | null | null | tests/test_modeling_bart.py | Tarpelite/title_generation | 8b9a9878e42ef1217b346daf492450c3d004dd21 | [
"MIT"
] | 1 | 2020-09-10T21:01:41.000Z | 2020-09-10T21:01:41.000Z | # coding=utf-8
# Copyright 2020 Huggingface
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import timeout_decorator # noqa
from transformers import is_torch_available
from transformers.file_utils import cached_property
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor
from .utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import (
AutoModel,
AutoModelForSequenceClassification,
AutoTokenizer,
BartModel,
BartForConditionalGeneration,
BartForSequenceClassification,
BartConfig,
BartTokenizer,
MBartTokenizer,
BatchEncoding,
)
from transformers.modeling_bart import (
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
shift_tokens_right,
invert_mask,
_prepare_bart_decoder_inputs,
SinusoidalPositionalEmbedding,
)
@require_torch
class ModelTester:
def __init__(
self, parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_labels = False
self.vocab_size = 99
self.hidden_size = 16
self.num_hidden_layers = 2
self.num_attention_heads = 4
self.intermediate_size = 4
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 20
self.eos_token_id = 2
self.pad_token_id = 1
self.bos_token_id = 0
torch.manual_seed(0)
def prepare_config_and_inputs_for_common(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(3,)
input_ids[:, -1] = 2 # Eos Token
config = BartConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
inputs_dict = prepare_bart_inputs_dict(config, input_ids)
return config, inputs_dict
def prepare_bart_inputs_dict(
config, input_ids, attention_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
@require_torch
class BARTModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(BartModel, BartForConditionalGeneration, BartForSequenceClassification) if is_torch_available() else ()
)
all_generative_model_classes = (BartForConditionalGeneration,) if is_torch_available() else ()
is_encoder_decoder = True
# TODO(SS): fix the below in a separate PR
test_pruning = False
test_torchscript = False
test_head_masking = False
test_resize_embeddings = True # This requires inputs_dict['input_ids']
test_missing_keys = False # because BartForConditionalGeneration and BartModel now have identical state_dict
def setUp(self):
self.model_tester = ModelTester(self)
self.config_tester = ConfigTester(self, config_class=BartConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_initialization_more(self):
# (config, input_ids, token_type_ids, input_mask, *unused) = \
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
model = BartModel(config)
model.to(torch_device)
model.eval()
# test init
self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item())
def _check_var(module):
"""Check that we initialized various parameters from N(0, config.init_std)."""
self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2)
_check_var(model.encoder.embed_tokens)
_check_var(model.encoder.layers[0].self_attn.k_proj)
_check_var(model.encoder.layers[0].fc1)
_check_var(model.encoder.embed_positions)
def test_advanced_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict["input_ids"][:, -2:] = config.pad_token_id
decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_bart_decoder_inputs(
config, inputs_dict["input_ids"]
)
model = BartModel(config).to(torch_device).eval()
decoder_features_with_created_mask = model(**inputs_dict)[0]
decoder_features_with_passed_mask = model(
decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict
)[0]
_assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask)
useless_mask = torch.zeros_like(decoder_attn_mask)
decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0]
self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions
self.assertEqual(
decoder_features.size(), (self.model_tester.batch_size, self.model_tester.seq_length, config.d_model)
)
if decoder_attn_mask.min().item() < -1e3: # some tokens were masked
self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item())
# Test different encoder attention masks
decoder_features_with_long_encoder_mask = model(
inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long()
)[0]
_assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask)
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
@unittest.skip("Passing inputs_embeds not implemented for Bart.")
def test_inputs_embeds(self):
pass
def test_tiny_model(self):
model_name = "sshleifer/bart-tiny-random"
tiny = AutoModel.from_pretrained(model_name) # same vocab size
tok = AutoTokenizer.from_pretrained(model_name) # same tokenizer
inputs_dict = tok.batch_encode_plus(["Hello my friends"], return_tensors="pt")
with torch.no_grad():
tiny(**inputs_dict)
EN_CODE = 250004
@require_torch
class MBartIntegrationTests(unittest.TestCase):
src_text = [
" UN Chief Says There Is No Military Solution in Syria",
" I ate lunch twice yesterday",
]
tgt_text = ["Şeful ONU declară că nu există o soluţie militară în Siria", "to be padded"]
expected_src_tokens = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def setUpClass(cls):
checkpoint_name = "facebook/mbart-large-en-ro"
cls.tokenizer = MBartTokenizer.from_pretrained(checkpoint_name)
cls.pad_token_id = 1
return cls
@cached_property
def model(self):
"""Only load the model if needed."""
model = BartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro").to(torch_device)
if "cuda" in torch_device:
model = model.half()
return model
@slow
def test_enro_forward(self):
model = self.model
net_input = {
"input_ids": _long_tensor(
[
[3493, 3060, 621, 104064, 1810, 100, 142, 566, 13158, 6889, 5, 2, 250004],
[64511, 7, 765, 2837, 45188, 297, 4049, 237, 10, 122122, 5, 2, 250004],
]
),
"decoder_input_ids": _long_tensor(
[
[250020, 31952, 144, 9019, 242307, 21980, 55749, 11, 5, 2, 1, 1],
[250020, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2],
]
),
"generation_mode": False,
}
net_input["attention_mask"] = net_input["input_ids"].ne(self.pad_token_id)
with torch.no_grad():
logits, *other_stuff = model(**net_input)
expected_slice = torch.tensor([9.0078, 10.1113, 14.4787], device=torch_device, dtype=model.dtype)
result_slice = logits[0][0][:3]
self.assertTrue(torch.allclose(expected_slice, result_slice, atol=TOLERANCE))
@slow
def test_enro_generate(self):
inputs: dict = self.tokenizer.prepare_translation_batch([self.src_text[0]]).to(torch_device)
translated_tokens = self.model.generate(input_ids=inputs["input_ids"].to(torch_device))
decoded = self.tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
self.assertEqual(self.tgt_text[0], decoded[0])
def test_mbart_enro_config(self):
mbart_models = ["facebook/mbart-large-en-ro"]
expected = {"scale_embedding": True, "output_past": True}
for name in mbart_models:
config = BartConfig.from_pretrained(name)
self.assertTrue(config.is_valid_mbart())
for k, v in expected.items():
try:
self.assertEqual(v, getattr(config, k))
except AssertionError as e:
e.args += (name, k)
raise
def test_mbart_fast_forward(self):
config = BartConfig(
vocab_size=99,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
add_final_layer_norm=True,
)
lm_model = BartForConditionalGeneration(config).to(torch_device)
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(logits.shape, expected_shape)
def test_enro_tokenizer_prepare_translation_batch(self):
batch = self.tokenizer.prepare_translation_batch(
self.src_text, tgt_texts=self.tgt_text, max_length=len(self.expected_src_tokens),
)
self.assertIsInstance(batch, BatchEncoding)
self.assertEqual((2, 14), batch.input_ids.shape)
self.assertEqual((2, 14), batch.attention_mask.shape)
result = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, result)
self.assertEqual(2, batch.decoder_input_ids[0, -2]) # EOS
def test_enro_tokenizer_batch_encode_plus(self):
ids = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens, ids)
def test_enro_tokenizer_truncation(self):
src_text = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], str)
desired_max_length = 10
ids = self.tokenizer.prepare_translation_batch(
src_text, return_tensors=None, max_length=desired_max_length
).input_ids[0]
self.assertEqual(ids[-2], 2)
self.assertEqual(ids[-1], EN_CODE)
self.assertEqual(len(ids), desired_max_length)
@require_torch
class BartHeadTests(unittest.TestCase):
vocab_size = 99
def _get_config_and_data(self):
input_ids = torch.tensor(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=torch.long,
device=torch_device,
)
batch_size = input_ids.shape[0]
config = BartConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
return config, input_ids, batch_size
def test_sequence_classification_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
labels = _long_tensor([2] * batch_size).to(torch_device)
model = BartForSequenceClassification(config)
model.to(torch_device)
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels)
logits = outputs[1]
expected_shape = torch.Size((batch_size, config.num_labels))
self.assertEqual(logits.shape, expected_shape)
loss = outputs[0]
self.assertIsInstance(loss.item(), float)
@timeout_decorator.timeout(1)
def test_lm_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
lm_model = BartForConditionalGeneration(config)
lm_model.to(torch_device)
loss, logits, enc_features = lm_model(input_ids=input_ids, labels=lm_labels)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(logits.shape, expected_shape)
self.assertIsInstance(loss.item(), float)
def test_lm_uneven_forward(self):
config = BartConfig(
vocab_size=self.vocab_size,
d_model=14,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=8,
decoder_ffn_dim=8,
max_position_embeddings=48,
)
lm_model = BartForConditionalGeneration(config).to(torch_device)
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(logits.shape, expected_shape)
def test_generate_beam_search(self):
input_ids = torch.Tensor([[71, 82, 2], [68, 34, 2]]).long().to(torch_device)
config = BartConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
lm_model = BartForConditionalGeneration(config).to(torch_device)
lm_model.eval()
max_length = 5
new_input_ids = lm_model.generate(
input_ids.clone(),
do_sample=True,
num_return_sequences=1,
num_beams=2,
no_repeat_ngram_size=3,
max_length=max_length,
)
self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length))
# TODO(SS): uneven length batches, empty inputs
def test_shift_tokens_right(self):
input_ids = torch.Tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]]).long()
shifted = shift_tokens_right(input_ids, 1)
n_pad_before = input_ids.eq(1).float().sum()
n_pad_after = shifted.eq(1).float().sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(torch.eq(shifted[:, 0], 2).all())
@slow
def test_tokenization(self):
tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
examples = [" Hello world", " DomDramg"] # need leading spaces for equality
fairseq_results = [
torch.Tensor([0, 20920, 232, 2]),
torch.Tensor([0, 11349, 495, 4040, 571, 2]),
]
for ex, desired_result in zip(examples, fairseq_results):
bart_toks = tokenizer.encode(ex, return_tensors="pt")
_assert_tensors_equal(desired_result.long(), bart_toks, prefix=ex)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_generate_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
model = BartForConditionalGeneration(config).eval().to(torch_device).half()
model.generate(input_ids, attention_mask=attention_mask, do_sample=False, early_stopping=True)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_base_model_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
lm_model = BartForConditionalGeneration(config).eval().to(torch_device).half()
lm_model(input_ids, attention_mask=attention_mask)
def test_default_generate_kwargs(self):
config, input_ids, _ = self._get_config_and_data()
model = BartForConditionalGeneration(config).eval().to(torch_device)
model.generate(input_ids)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_dummy_inputs(self):
config, *_ = self._get_config_and_data()
model = BartForConditionalGeneration(config).eval().to(torch_device)
model(**model.dummy_inputs)
def test_prepare_bart_decoder_inputs(self):
config, *_ = self._get_config_and_data()
input_ids = _long_tensor(([4, 4, 2]))
decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]])
ignore = float("-inf")
decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_bart_decoder_inputs(
config, input_ids, decoder_input_ids
)
expected_causal_mask = torch.tensor(
[[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] # never attend to the final token, because its pad
).to(input_ids.device)
self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size())
self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all())
def test_resize_tokens_embeddings_more(self):
config, input_ids, _ = self._get_config_and_data()
def _get_embs(m):
return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone())
model = BartForConditionalGeneration(config).eval().to(torch_device)
input, output = _get_embs(model)
self.assertTrue(torch.eq(input, output).all())
new_vocab_size = 45
model.resize_token_embeddings(new_vocab_size)
input_new, output_new = _get_embs(model)
self.assertEqual(input_new.shape, (new_vocab_size, config.d_model))
self.assertEqual(output_new.shape, (new_vocab_size, config.d_model))
self.assertTrue(torch.eq(input_new, output_new).all())
def _assert_tensors_equal(a, b, atol=1e-12, prefix=""):
"""If tensors not close, or a and b arent both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
msg = "{} != {}".format(a, b)
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device,)
TOLERANCE = 1e-4
@require_torch
class BartModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = BartModel.from_pretrained("facebook/bart-large").to(torch_device)
input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
inputs_dict = prepare_bart_inputs_dict(model.config, input_ids)
with torch.no_grad():
output = model(**inputs_dict)[0]
expected_shape = torch.Size((1, 11, 1024))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device
)
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE))
@slow
def test_mnli_inference(self):
example_b = [0, 31414, 232, 328, 740, 1140, 69, 46078, 1588, 2, 1]
input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], example_b])
model = AutoModelForSequenceClassification.from_pretrained("facebook/bart-large-mnli").to(
torch_device
) # eval called in from_pre
inputs_dict = prepare_bart_inputs_dict(model.config, input_ids)
# Test that model hasn't changed
with torch.no_grad():
batched_logits, features = model(**inputs_dict)
expected_shape = torch.Size((2, 3))
self.assertEqual(batched_logits.shape, expected_shape)
expected_slice = torch.Tensor([[0.1907, 1.4342, -1.0289]]).to(torch_device)
logits_arr = batched_logits[0].detach()
# Test that padding does not change results
input_ids_no_pad = _long_tensor([example_b[:-1]])
inputs_dict = prepare_bart_inputs_dict(model.config, input_ids=input_ids_no_pad)
with torch.no_grad():
logits2 = model(**inputs_dict)[0]
_assert_tensors_equal(batched_logits[1], logits2, atol=TOLERANCE)
_assert_tensors_equal(expected_slice, logits_arr, atol=TOLERANCE)
@unittest.skip("This is just too slow")
def test_model_from_pretrained(self):
# Forces 1.6GB download from S3 for each model
for model_name in BART_PRETRAINED_MODEL_ARCHIVE_LIST:
model = BartModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
def test_xsum_summarization_same_as_fairseq(self):
model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-xsum").to(torch_device)
self.assertFalse(model.config.is_valid_mbart())
tok = BartTokenizer.from_pretrained("facebook/bart-large")
PGE_ARTICLE = """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."""
EXPECTED_SUMMARY = "California's largest power company has begun shutting off power to tens of thousands of homes and businesses in the state."
dct = tok.batch_encode_plus([PGE_ARTICLE], max_length=1024, pad_to_max_length=True, return_tensors="pt",).to(
torch_device
)
hypotheses_batch = model.generate(
input_ids=dct["input_ids"],
attention_mask=dct["attention_mask"],
num_beams=2,
max_length=62,
min_length=11,
length_penalty=1.0,
no_repeat_ngram_size=3,
early_stopping=True,
decoder_start_token_id=model.config.eos_token_id,
)
decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True,)
self.assertEqual(EXPECTED_SUMMARY, decoded[0])
def test_xsum_config_generation_params(self):
config = BartConfig.from_pretrained("facebook/bart-large-xsum")
expected_params = dict(num_beams=6, do_sample=False, early_stopping=True, length_penalty=1.0)
config_params = {k: getattr(config, k, "MISSING") for k, v in expected_params.items()}
self.assertDictEqual(expected_params, config_params)
@slow
def test_cnn_summarization_same_as_fairseq(self):
hf = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
tok = BartTokenizer.from_pretrained("facebook/bart-large")
FRANCE_ARTICLE = ' Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa
EXPECTED_SUMMARY_FRANCE = 'French prosecutor says he\'s not aware of any video footage from on board the plane. German daily Bild and French Paris Match claim to have found a cell phone video of the crash. A French Gendarmerie spokesman calls the reports "completely wrong" and "unwarranted" German airline Lufthansa confirms co-pilot Andreas Lubitz had battled depression.'
SHORTER_ARTICLE = ' (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.'
EXPECTED_SUMMARY_SHORTER = "The Palestinian Authority becomes the 123rd member of the International Criminal Court. The move gives the court jurisdiction over alleged crimes in Palestinian territories. Israel and the United States opposed the Palestinians' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki said it was a move toward greater justice."
# The below article tests that we don't add any hypotheses outside of the top n_beams
IRAN_ARTICLE = " (CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \"advise and consent.\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions."
EXPECTED_SUMMARY_IRAN = "The U.S. and its negotiating partners reached a very strong framework agreement with Iran. Peter Bergen: The debate that has already begun will likely result in more heat than light. He says the agreement limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Bergen says the most important aim of a nuclear deal is preventing a nuclear Iran."
ARTICLE_SUBWAY = ' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.'
EXPECTED_SUMMARY_SUBWAY = "Liana Barrientos has been married 10 times, sometimes within two weeks of each other. Prosecutors say the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx. She was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the subway."
dct = tok.batch_encode_plus(
[FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY],
max_length=1024,
pad_to_max_length=True,
return_tensors="pt",
)
max_length = 140
min_length = 55
self.assertEqual(1024, dct["input_ids"].shape[1])
hypotheses_batch = hf.generate(
input_ids=dct["input_ids"].to(torch_device),
attention_mask=dct["attention_mask"].to(torch_device),
num_beams=4,
length_penalty=2.0,
max_length=max_length + 2,
min_length=min_length + 1,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
decoder_start_token_id=hf.config.eos_token_id,
)
decoded = [
tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch
]
self.assertListEqual(
[EXPECTED_SUMMARY_FRANCE, EXPECTED_SUMMARY_SHORTER, EXPECTED_SUMMARY_IRAN, EXPECTED_SUMMARY_SUBWAY],
decoded,
)
# TODO(SS): run fairseq again with num_beams=2, min_len=20.
# TODO(SS): add test case that hits max_length
@require_torch
class TestSinusoidalPositionalEmbeddings(unittest.TestCase):
desired_weights = [
[0, 0, 0, 0, 0],
[0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374],
[0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258],
]
def test_positional_emb_cache_logic(self):
pad = 1
input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device)
emb1 = SinusoidalPositionalEmbedding(num_positions=32, embedding_dim=6, padding_idx=pad).to(torch_device)
no_cache = emb1(input_ids, use_cache=False)
yes_cache = emb1(input_ids, use_cache=True)
self.assertEqual((1, 1, 6), yes_cache.shape) # extra dim to allow broadcasting, feel free to delete!
self.assertListEqual(no_cache[-1].tolist(), yes_cache[0][0].tolist())
def test_odd_embed_dim(self):
with self.assertRaises(NotImplementedError):
SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=0).to(torch_device)
# odd num_positions is allowed
SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=0).to(torch_device)
def test_positional_emb_weights_against_marian(self):
pad = 1
emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=pad).to(torch_device)
weights = emb1.weight.data[:3, :5].tolist()
for i, (expected_weight, actual_weight) in enumerate(zip(self.desired_weights, weights)):
for j in range(5):
self.assertAlmostEqual(expected_weight[j], actual_weight[j], places=3)
# test that forward pass is just a lookup, there is no ignore padding logic
input_ids = torch.tensor([[4, 10, pad, pad, pad]], dtype=torch.long, device=torch_device)
no_cache_pad_zero = emb1(input_ids)
self.assertTrue(
torch.allclose(
torch.tensor(self.desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3
)
)
| 68.973427 | 7,208 | 0.716745 |
67744973014925ac78de5620bf55f51876ca6c95 | 3,062 | py | Python | setup.py | teemtee/tmt | 1dfc764c7aed0837a0db59af362946b26ba0485b | [
"MIT"
] | 2 | 2022-03-04T05:57:48.000Z | 2022-03-09T18:49:02.000Z | setup.py | teemtee/tmt | 1dfc764c7aed0837a0db59af362946b26ba0485b | [
"MIT"
] | 91 | 2022-02-15T14:09:03.000Z | 2022-03-31T16:05:28.000Z | setup.py | teemtee/tmt | 1dfc764c7aed0837a0db59af362946b26ba0485b | [
"MIT"
] | 5 | 2022-02-21T08:02:23.000Z | 2022-03-26T16:43:46.000Z | #!/usr/bin/env python
# coding: utf-8
import re
import sys
from io import open
from setuptools import setup
# Parse version from the spec file
with open('tmt.spec', encoding='utf-8') as specfile:
lines = "\n".join(line.rstrip() for line in specfile)
version = re.search('Version: (.+)', lines).group(1).rstrip()
# acceptable version schema: major.minor[.patch][sub]
__version__ = version
__pkg__ = 'tmt'
__pkgdir__ = {}
__pkgs__ = [
'tmt',
'tmt/plugins',
'tmt/steps',
'tmt/steps/discover',
'tmt/steps/provision',
'tmt/steps/prepare',
'tmt/steps/execute',
'tmt/steps/report',
'tmt/steps/finish',
]
__provides__ = ['tmt']
__desc__ = 'Test Management Tool'
__scripts__ = ['bin/tmt']
# Prepare install requires and extra requires
install_requires = [
'fmf>=1.0.0',
'click',
'requests',
'ruamel.yaml',
]
# typing_extensions is needed with Python 3.7 and older, types imported
# from that package (Literal, Protocol, TypedDict, ...) become available
# from typing since Python 3.8.
if sys.version_info.minor <= 7:
install_requires.append('typing-extensions>=3.7.4.3')
# dataclasses is needed with Python 3.6
if sys.version_info.minor <= 6:
install_requires.append('dataclasses')
extras_require = {
'docs': [
'sphinx>=3',
'sphinx_rtd_theme'],
'tests': [
'pytest',
'python-coveralls',
'requre',
'pre-commit',
'mypy'
],
'provision': ['testcloud>=0.7.0'],
'convert': [
'nitrate',
'markdown',
'python-bugzilla',
'html2text'],
'report-html': ['jinja2'],
'report-junit': ['junit_xml'],
}
extras_require['all'] = [
dependency
for extra in extras_require.values()
for dependency in extra]
pip_src = 'https://pypi.python.org/packages/source'
__deplinks__ = []
# README is in the parent directory
readme = 'README.rst'
with open(readme, encoding='utf-8') as _file:
readme = _file.read()
github = 'https://github.com/teemtee/tmt'
download_url = '{0}/archive/main.zip'.format(github)
default_setup = dict(
url=github,
license='MIT',
author='Petr Splichal',
author_email='psplicha@redhat.com',
maintainer='Petr Splichal',
maintainer_email='psplicha@redhat.com',
download_url=download_url,
long_description=readme,
data_files=[],
classifiers=[
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Utilities',
],
keywords=['metadata', 'testing'],
dependency_links=__deplinks__,
description=__desc__,
install_requires=install_requires,
extras_require=extras_require,
name=__pkg__,
package_dir=__pkgdir__,
packages=__pkgs__,
provides=__provides__,
scripts=__scripts__,
version=__version__,
)
setup(**default_setup)
| 25.305785 | 72 | 0.638472 |
f4c9927b909dfb773cd0b0bd92024436cca40bcc | 324 | py | Python | web/templatetags/tags.py | codeschule/koodikoulu-site | dde9932564f36dce6f4dbfd31e7923f1bae83293 | [
"MIT"
] | 5 | 2015-09-16T10:50:53.000Z | 2016-01-16T09:10:37.000Z | web/templatetags/tags.py | codeschule/koodikoulu-site | dde9932564f36dce6f4dbfd31e7923f1bae83293 | [
"MIT"
] | 10 | 2015-09-07T05:58:03.000Z | 2019-02-15T10:36:48.000Z | web/templatetags/tags.py | codeschule/koodikoulu-site | dde9932564f36dce6f4dbfd31e7923f1bae83293 | [
"MIT"
] | 6 | 2015-09-06T19:42:46.000Z | 2019-12-29T21:31:07.000Z | from django import template
register = template.Library()
@register.inclusion_tag('templatetags/form_field.html')
def show_form_field(field, icon=False):
return {'field': field, 'icon': icon}
@register.inclusion_tag('templatetags/learning_resource.html')
def show_resource(resource):
return {'resource': resource} | 29.454545 | 62 | 0.771605 |
1fec91296fed9265870912942c74cd456940777c | 599 | py | Python | intro_django/intro_django/encuestas/admin.py | txtbits/daw-python | 5dde1207e2791e90aa5e9ce2b6afc4116129efab | [
"MIT"
] | null | null | null | intro_django/intro_django/encuestas/admin.py | txtbits/daw-python | 5dde1207e2791e90aa5e9ce2b6afc4116129efab | [
"MIT"
] | null | null | null | intro_django/intro_django/encuestas/admin.py | txtbits/daw-python | 5dde1207e2791e90aa5e9ce2b6afc4116129efab | [
"MIT"
] | null | null | null | # -*-coding: cp1252 -*-
from models import Encuesta, Opcion
from django.contrib import admin
class OpcionesEnLinea(admin.TabularInline):
model = Opcion
extra = 1
class EncuestaAdmin(admin.ModelAdmin):
list_display = ['pregunta', 'fecha_pub', 'es_reciente']
fieldsets = [
(None, {'fields': ['pregunta']}),
('Publicacion', {'fields': ['fecha_pub'],
'classes': ['collapse']}),
]
inlines = [OpcionesEnLinea]
list_filter = ['fecha_pub']
search_fields = ['pregunta']
admin.site.register(Encuesta, EncuestaAdmin)
admin.site.register(Opcion)
| 26.043478 | 59 | 0.651085 |
b195342b8f13e6c7a25dfd4916e91a0bc56fb21a | 762 | py | Python | extra/scripts/segment_extract.py | heinsm/qira | 3726403c4bf4a4f3cf74e1514001333b3fa0e6b4 | [
"MIT"
] | 2,056 | 2018-03-17T01:46:40.000Z | 2022-03-31T20:06:48.000Z | extra/scripts/segment_extract.py | heinsm/qira | 3726403c4bf4a4f3cf74e1514001333b3fa0e6b4 | [
"MIT"
] | 45 | 2018-05-15T08:50:51.000Z | 2022-01-01T10:51:40.000Z | extra/scripts/segment_extract.py | heinsm/qira | 3726403c4bf4a4f3cf74e1514001333b3fa0e6b4 | [
"MIT"
] | 365 | 2018-04-09T05:46:11.000Z | 2022-03-20T22:57:18.000Z | from qira_log import *
from pymongo import MongoClient
db = MongoClient('localhost', 3001).meteor
mem_addrs = set()
ins_addrs = set()
# page level granularity
dat = read_log("/tmp/qira_log")
for (address, data, clnum, flags) in dat:
if flags & IS_MEM:
mem_addrs.add(address & 0xFFFFF000)
if flags & IS_START:
ins_addrs.add(address & 0xFFFFF000)
pmaps = []
print "instructions"
for i in sorted(ins_addrs):
pmaps.append({"address": i, "type": "instruction"})
print "memory"
for i in sorted(mem_addrs):
if i not in ins_addrs:
pmaps.append({"address": i, "type": "memory"})
coll = db.pmaps
print "doing db insert"
coll.drop()
coll.insert(pmaps)
print "db insert done, building indexes"
coll.ensure_index("address")
print "indexes built"
| 20.594595 | 53 | 0.706037 |
a5ca47854d4928f143e9a2e43fba71eb69f19569 | 2,480 | py | Python | python/matmul1.py | odu-rcs/ODU-HPC-samples | 1812bfe3b5fddd7de4ee5a733b9fad3c13a5dc72 | [
"Apache-2.0"
] | null | null | null | python/matmul1.py | odu-rcs/ODU-HPC-samples | 1812bfe3b5fddd7de4ee5a733b9fad3c13a5dc72 | [
"Apache-2.0"
] | null | null | null | python/matmul1.py | odu-rcs/ODU-HPC-samples | 1812bfe3b5fddd7de4ee5a733b9fad3c13a5dc72 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Created: 20170227
# Note: In a python program or function, if the first statement
# encountered is a string literal like this, it will be taken as the
# documentation (__doc__) of the program/function.
"""
matmul1.py
Demo program to read two matrices, performs multiplication, and prints
the output to standard output.
This program has some pedagogical value in demonstrating the effect of
hand-crafting loops in python as opposed to using
matrix-multiplication kernel (canned routine).
"""
from __future__ import print_function
import sys
import numpy
from np_helper import loadmatrix1, printmatrix1
def matmul_manual(A, B):
"""Performs manual matrix multiplication with python.
Avoid doing this in python because of low performance!
"""
from numpy import asarray, sum
A = asarray(A)
B = asarray(B)
M, K = A.shape
N = B.shape[1]
assert A.shape[1] == B.shape[0]
# Caveat: works only if A & B dtypes are the same:
C = numpy.zeros((M,N), dtype=A.dtype)
for i in range(M):
for j in range(N):
Cij = 0
for k in range(K):
Cij += A[i,k] * B[k,j]
C[i,j] = Cij
return C
def matmul_vecdot(A, B):
"""Performs semi-manual matrix multiplication with python,
using dot product in the innnermost loop.
"""
from numpy import asarray, sum
A = asarray(A)
B = asarray(B)
M, K = A.shape
N = B.shape[1]
assert A.shape[1] == B.shape[0]
# Caveat: works only if A & B dtypes are the same:
C = numpy.zeros((M,N), dtype=A.dtype)
for i in range(M):
for j in range(N):
C[i,j] = sum(A[i,:] * B[:,j])
return C
def matmul_matdot(A, B):
"""Performs matrix multiplication with python using numpy.dot.
Note that numpy.dot is equivalent to matrix-matrix multiplication
for 2-D arrays; we explot this fact here!
"""
from numpy import asarray, sum
A = asarray(A)
B = asarray(B)
M, K = A.shape
N = B.shape[1]
assert A.shape[1] == B.shape[0]
C = numpy.dot(A, B)
return C
def matmul1(argv):
if len(argv) < 3:
print("Needs an input file name on arg1", file=sys.stderr)
sys.exit(1)
matfile1 = argv[1]
matfile2 = argv[2]
A = loadmatrix1(matfile1)
B = loadmatrix1(matfile2)
C = matmul_matdot(A,B)
printmatrix1(C, float_fmt=" %12.6f")
#printmatrix1(C)
if __name__ == "__main__":
matmul1(sys.argv)
| 25.306122 | 70 | 0.632661 |
b55ca415b7f5c8d9785400aec4d8b65c7b08158c | 335 | py | Python | apps/local_apps/profiles/urls.py | google-code-export/django-hotclub | d783a5bbcc06816289565f3eae6d99461188ca4a | [
"MIT"
] | 4 | 2016-05-08T06:07:50.000Z | 2021-11-21T19:41:40.000Z | apps/local_apps/profiles/urls.py | pombreda/django-hotclub | d783a5bbcc06816289565f3eae6d99461188ca4a | [
"MIT"
] | null | null | null | apps/local_apps/profiles/urls.py | pombreda/django-hotclub | d783a5bbcc06816289565f3eae6d99461188ca4a | [
"MIT"
] | 3 | 2017-07-09T02:14:54.000Z | 2021-07-13T19:16:59.000Z | from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'profiles.views.profiles', name='profile_list'),
url(r'^(?P<username>[\w]+)/$', 'profiles.views.profile', name='profile_detail'),
url(r'^username_autocomplete/$', 'profiles.views.username_autocomplete', name='profile_username_autocomplete'),
)
| 41.875 | 115 | 0.704478 |
438ac75e7a209d57339a2e9ce1efd380383b79a9 | 9,777 | py | Python | portfolio/admin.py | open-risk/equinox | 0503e716b566ff7c776f04a611879f88d86e1cc6 | [
"Apache-2.0"
] | 10 | 2021-03-21T22:05:33.000Z | 2022-03-15T18:26:58.000Z | portfolio/admin.py | open-risk/equinox | 0503e716b566ff7c776f04a611879f88d86e1cc6 | [
"Apache-2.0"
] | 2 | 2021-10-30T15:15:41.000Z | 2021-11-11T12:35:02.000Z | portfolio/admin.py | open-risk/equinox | 0503e716b566ff7c776f04a611879f88d86e1cc6 | [
"Apache-2.0"
] | 1 | 2022-03-16T18:59:36.000Z | 2022-03-16T18:59:36.000Z | # Copyright (c) 2021 Open Risk (https://www.openriskmanagement.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from django import forms
from django.contrib.gis import admin
from django.forms.widgets import NumberInput
from django.http import HttpResponseRedirect
from django.urls import reverse
from treebeard.admin import TreeAdmin
from treebeard.forms import movenodeform_factory
from portfolio.Asset import Asset
from portfolio.EmissionsSource import EmissionsSource
from portfolio.Contractor import Contractor
from portfolio.Loan import Loan
from portfolio.Operator import Operator
from portfolio.Portfolios import Portfolio, PortfolioData, LimitStructure
from portfolio.PrimaryEffect import PrimaryEffect
from portfolio.Project import Project
from portfolio.ProjectActivity import ProjectActivity
from portfolio.ProjectCategory import ProjectCategory
from portfolio.ProjectCompany import ProjectCompany
from portfolio.Revenue import Revenue
from portfolio.SecondaryEffect import SecondaryEffect
from portfolio.Sponsor import Sponsor
from portfolio.Stakeholders import Stakeholders
from portfolio.Swap import Swap
from portfolio.models import PointSource, AreaSource
#
# Tree Objects
#
class ProjectCategoryAdmin(TreeAdmin):
view_on_site = False
list_display = ('name',)
form = movenodeform_factory(ProjectCategory)
admin.site.register(ProjectCategory, ProjectCategoryAdmin)
#
# Geospatial Objects (Source Geometries)
#
@admin.register(PointSource)
class PointSourceAdmin(admin.OSMGeoAdmin):
"""Point Source admin."""
list_display = ("name",)
view_on_site = False
save_as = True
search_fields = ['name']
# list_filter = ('location',)
date_hierarchy = ('creation_date')
@admin.register(AreaSource)
class AreaSourceAdmin(admin.OSMGeoAdmin):
"""Project Region admin."""
list_display = ("name",)
view_on_site = False
save_as = True
search_fields = ['name']
date_hierarchy = ('creation_date')
#
# Regular Objects
#
@admin.register(Asset)
class AssetAdmin(admin.ModelAdmin):
"""Project Asset admin"""
view_on_site = False
save_as = True
search_fields = ['name']
list_filter = ('asset_class', 'project')
date_hierarchy = ('creation_date')
fieldsets = (
('Identification', {
'fields': ('asset_identifier', 'description', 'asset_class')
}),
('Relations', {
'fields': ('project', 'legal_owner'),
}),
('GHG Emissions', {
'fields': ('asset_ghg_emissions',),
}),
('Financial', {
'fields': ('latest_valuation_amount',),
}),
('Other', {
'classes': ('collapse',),
'fields': ('activation_of_guarantee',),
}),
)
@admin.register(EmissionsSource)
class EmissionsSourceAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
"""Project admin"""
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(PrimaryEffect)
class PrimaryEffectAdmin(admin.ModelAdmin):
"""Primary Effect admin"""
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(SecondaryEffect)
class SecondaryEffectAdmin(admin.ModelAdmin):
"""Secondary Effect admin"""
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(ProjectActivity)
class ProjectActivityAdmin(admin.ModelAdmin):
"""Project Activity admin"""
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(ProjectCompany)
class ProjectCompanyAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Revenue)
class RevenueAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Loan)
class LoanAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Stakeholders)
class StakeholdersAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Sponsor)
class SponsorAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Contractor)
class ContractorAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Operator)
class OperatorAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
@admin.register(Swap)
class SwapAdmin(admin.ModelAdmin):
view_on_site = False
save_as = True
date_hierarchy = ('creation_date')
class PortfolioDataAdminForm(forms.ModelForm):
EAD = forms.fields.FloatField(min_value=0, widget=NumberInput(attrs={'step': 0.01}), label="EAD",
help_text="Exposure at Default")
LGD = forms.fields.IntegerField(min_value=0, max_value=5, label="LGD", help_text="Loss Given Default Class (0 - 5)")
Tenor = forms.fields.IntegerField(min_value=1, max_value=10, help_text="Tenor (Maturity) in Integer Years")
def __init__(self, *args, **kwargs):
super(PortfolioDataAdminForm, self).__init__(*args, **kwargs)
class Meta:
model = PortfolioData
fields = '__all__'
# exclude = ('portfolio_id', 'Obligor_ID')
# widgets = {
# 'Obligor_ID': TextInput(attrs={'disabled': True}),
# }
class LimitStructureAdmin(admin.ModelAdmin):
search_fields = ['notes']
list_display = ('name', 'creation_date', 'notes')
save_as = True
view_on_site = False
date_hierarchy = ('creation_date')
def changelist_view(self, request, extra_context=None):
extra_context = {
'message': 'LimitStructure Administration: Overview of User Generated Limit Structures and their Properties',
}
return super(LimitStructureAdmin, self).changelist_view(request, extra_context=extra_context)
class PortfolioAdmin(admin.ModelAdmin):
search_fields = ['notes']
list_display = ('name', 'portfolio_type', 'generation', 'creation_date', 'notes')
list_filter = ('portfolio_type', 'generation')
save_as = True
view_on_site = False
date_hierarchy = ('creation_date')
def changelist_view(self, request, extra_context=None):
extra_context = {
'message': 'Portfolio Administration: Overview of User Generated Portfolios and their Properties',
}
return super(PortfolioAdmin, self).changelist_view(request, extra_context=extra_context)
def response_delete(self, request, obj_display, obj_id):
return HttpResponseRedirect(reverse("portfolio_explorer:portfolio_list"))
class PortfolioDataAdmin(admin.ModelAdmin):
# readonly_fields = ('portfolio_id')
fields = ('portfolio_id', 'Obligor_ID', 'EAD', 'LGD', 'Tenor', 'Sector', 'Country')
form = PortfolioDataAdminForm
list_display = ('Obligor_ID', 'EAD', 'LGD', 'Tenor', 'Sector', 'Country')
list_filter = ('portfolio_id',)
save_as = True
view_on_site = False
date_hierarchy = ('creation_date')
def response_change(self, request, obj, post_url_continue=None):
"""This makes the response after adding go to another apps changelist for some model"""
return HttpResponseRedirect(reverse("portfolio_explorer:portfolio_view", args=[obj.portfolio_id.pk]))
# def response_delete(self, request, obj, post_url_continue=None):
# return HttpResponseRedirect(reverse("portfolio_view", args=[obj.portfolio_id.pk]))
def changelist_view(self, request, extra_context=None):
extra_context = {
'message': 'Portfolio Data Administration: Overview of User Generated Portfolio Data',
}
return super(PortfolioDataAdmin, self).changelist_view(request, extra_context=extra_context)
# Hack to be able to return to parent portfolio after item delete
deleted_fk = None
def delete_view(self, request, object_id, extra_context=None):
self.deleted_fk = PortfolioData.objects.get(id=object_id).portfolio_id.pk
return super(PortfolioDataAdmin, self).delete_view(request, object_id, extra_context)
def response_delete(self, request, obj_display, obj_id):
return HttpResponseRedirect(reverse("portfolio_explorer:portfolio_view", args=[self.deleted_fk]))
admin.site.register(Portfolio, PortfolioAdmin)
admin.site.register(PortfolioData, PortfolioDataAdmin)
admin.site.register(LimitStructure, LimitStructureAdmin)
| 32.161184 | 121 | 0.719137 |
9e528087cd7c536a10c1c3dd856d02f525b57726 | 1,004 | py | Python | tools/testing/kunit/qemu_configs/riscv.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | 44 | 2022-03-16T08:32:31.000Z | 2022-03-31T16:02:35.000Z | tools/testing/kunit/qemu_configs/riscv.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | 1 | 2021-01-27T01:29:47.000Z | 2021-01-27T01:29:47.000Z | tools/testing/kunit/qemu_configs/riscv.py | jainsakshi2395/linux | 7ccb860232bb83fb60cd6bcf5aaf0c008d903acb | [
"Linux-OpenIB"
] | 18 | 2022-03-19T04:41:04.000Z | 2022-03-31T03:32:12.000Z | from ..qemu_config import QemuArchParams
import os
import os.path
import sys
GITHUB_OPENSBI_URL = 'https://github.com/qemu/qemu/raw/master/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin'
OPENSBI_FILE = os.path.basename(GITHUB_OPENSBI_URL)
if not os.path.isfile(OPENSBI_FILE):
print('\n\nOpenSBI file is not in the current working directory.\n'
'Would you like me to download it for you from:\n' + GITHUB_OPENSBI_URL + ' ?\n')
response = input('yes/[no]: ')
if response.strip() == 'yes':
os.system('wget ' + GITHUB_OPENSBI_URL)
else:
sys.exit()
QEMU_ARCH = QemuArchParams(linux_arch='riscv',
kconfig='''
CONFIG_SOC_VIRT=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''',
qemu_arch='riscv64',
kernel_path='arch/riscv/boot/Image',
kernel_command_line='console=ttyS0',
extra_qemu_params=[
'-machine virt',
'-cpu rv64',
'-bios opensbi-riscv64-generic-fw_dynamic.bin'])
| 31.375 | 109 | 0.723108 |
27265738ad2558e1e22252e919f77d41da121054 | 1,260 | py | Python | google/ads/googleads/v4/services/types/ad_group_simulation_service.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/services/types/ad_group_simulation_service.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v4/services/types/ad_group_simulation_service.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v4.services",
marshal="google.ads.googleads.v4",
manifest={"GetAdGroupSimulationRequest",},
)
class GetAdGroupSimulationRequest(proto.Message):
r"""Request message for
[AdGroupSimulationService.GetAdGroupSimulation][google.ads.googleads.v4.services.AdGroupSimulationService.GetAdGroupSimulation].
Attributes:
resource_name (str):
Required. The resource name of the ad group
simulation to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1)
__all__ = tuple(sorted(__protobuf__.manifest))
| 30 | 132 | 0.733333 |
e7ebb63d54c8fa3b879abef8dd6c0f642dd4c018 | 92,453 | py | Python | mindspore/numpy/array_creations.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | mindspore/numpy/array_creations.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | mindspore/numpy/array_creations.py | Vincent34/mindspore | a39a60878a46e7e9cb02db788c0bca478f2fa6e5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""array operations, the function docs are adapted from Numpy API."""
import math
import operator
import numpy as onp
from ..common import Tensor
from ..common import dtype as mstype
from ..ops import operations as P
from ..ops import functional as F
from ..ops.primitive import constexpr
from ..nn.layer.basic import tril as nn_tril
from ..nn.layer.basic import triu as nn_triu
from .._c_expression import Tensor as Tensor_
from .utils import _check_input_for_asarray, _deep_list, _deep_tensor_to_nparray, \
_check_input_tensor, _convert_64_to_32, _get_dtype_from_scalar, \
_expand, _to_tensor, _slice_along_axis, _callable
from .utils_const import _raise_value_error, _empty, _max, _min, \
_check_same_type, _is_shape_empty, _check_shape, _check_dtype, _tile_size, _abs, \
_raise_type_error, _expanded_shape, _check_is_float, _iota, _type_convert, \
_canonicalize_axis, _list_comprehensions, _ceil, _tuple_slice, _raise_unimplemented_error, \
_tuple_setitem
from .array_ops import ravel, concatenate, broadcast_arrays, reshape, broadcast_to, flip, \
apply_along_axis, where, moveaxis
from .dtypes import nan, pi
# According to official numpy reference, the dimension of a numpy array must be less
# than 32
MAX_NUMPY_DIMS = 32
# All types that can be accepted as "array_like" parameters in graph mode.
ARRAY_TYPES = (int, float, bool, list, tuple, Tensor)
_reduce_min_keepdims = P.ReduceMin(True)
_reduce_max_keepdims = P.ReduceMax(True)
_reduce_mean_keepdims = P.ReduceMean(True)
def array(obj, dtype=None, copy=True, ndmin=0):
"""
Creates a tensor.
This function creates tensors from an array-like object.
Args:
obj (Union[int, float, bool, list, tuple]): Input data, in any form that
can be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
of the new tensor will be inferred from obj. Default is :class:`None`.
copy (bool): If `True`, then the object is copied. Otherwise, a copy will
only be made if necessary. Default: `True`.
ndmin (int): Specifies the minimum number of dimensions that the resulting
tensor should have. Ones will be pre-pended to the shape as needed to
meet this requirement. Default: 0
Returns:
Tensor, generated tensor with the specified dtype.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `obj` has different sizes at different dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.array([1,2,3]))
[1 2 3]
"""
res = asarray(obj, dtype)
if ndmin > res.ndim:
if res.size == 0:
_raise_value_error("Empty tensor cannot be expanded beyond the current dimension.")
res = _expand(res, ndmin)
if copy:
res = copy_(res)
elif dtype is not None and dtype != res.dtype:
res = res.astype(dtype)
return res
@constexpr
def asarray_const(a, dtype=None):
"""Converts the input to tensor. Note here `a` cannot be tensor itself."""
_check_input_for_asarray(a)
if dtype is not None:
dtype = _check_dtype(dtype)
if isinstance(a, (float, int, bool)) and dtype is None:
dtype = _get_dtype_from_scalar(a)
if isinstance(a, (list, tuple)):
# Convert all tuple/nested tuples to lists
a = _deep_list(a)
# Convert all tensor sub-elements to numpy arrays
a = _deep_tensor_to_nparray(a)
a = onp.asarray(a)
if a.dtype is onp.dtype('object'):
raise ValueError('Input array must have the same size across all dimensions.')
# If dtype is not specified, we keep consistent with numpy decision
# only exceptions are: we use int/float32
if dtype is None:
dtype = mstype.pytype_to_dtype(a.dtype)
if dtype == mstype.float64:
dtype = mstype.float32
elif dtype == mstype.int64:
dtype = mstype.int32
if isinstance(a, onp.ndarray) and dtype is None:
if a.dtype is onp.dtype('object'):
raise TypeError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
dtype = mstype.pytype_to_dtype(a.dtype)
a = Tensor.from_numpy(a)
return Tensor(a, dtype=dtype)
def asarray(a, dtype=None):
"""
Converts the input to tensor.
This function converts tensors from an array-like object.
Args:
a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
of the new tensor will be inferred from obj. Default is :class:`None`.
Returns:
Tensor, generated tensor with the specified dtype.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `a` has different sizes at different dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.asarray([1,2,3]))
[1 2 3]
"""
if isinstance(a, Tensor):
if dtype is None or dtype == a.dtype:
return a
return a.astype(dtype)
return asarray_const(a, dtype)
@constexpr
def asfarray_const(a, dtype=mstype.float32):
"""Converts the input to tensor. Note here `a` cannot be tensor itself."""
_check_input_for_asarray(a)
if isinstance(a, (list, tuple)):
# Convert all tuple/nested tuples to lists
a = _deep_list(a)
# Convert all tensor sub-elements to numpy arrays
a = _deep_tensor_to_nparray(a)
a = onp.asarray(a)
if a.dtype is onp.dtype('object'):
raise ValueError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
a = Tensor.from_numpy(a)
return Tensor(a, dtype)
def asfarray(a, dtype=mstype.float32):
"""
Similar to asarray, converts the input to a float tensor.
If non-float dtype is defined, this function will return a float32 tensor instead.
Args:
a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type
of the new tensor will be inferred from `a`. Default is :class:`mindspore.float32`.
Returns:
Tensor, generated tensor with the specified float dtype.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `a` has different sizes at different dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.asfarray([1,2,3]))
[1. 2. 3.]
"""
if dtype is None:
return asarray(a)
dtype = _check_dtype(dtype)
if dtype not in (mstype.float16, mstype.float32, mstype.float64):
dtype = mstype.float32
if isinstance(a, Tensor):
return a.astype(dtype)
return asfarray_const(a, dtype)
def copy_(a):
"""
Returns a tensor copy of the given object.
Args:
a (Union[int, float, bool, list, tuple, Tensor]): Input data, in any form that can
be converted to a `Tensor`. This includes Tensor, list, tuple and numbers.
Returns:
Tensor, has the same data as `a`.
Raises:
TypeError: If input `a` has type not specified above.
ValueError: If input `a` has different sizes at different dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.ones((2,2))
>>> print(np.copy(x))
[[1. 1.]
[1. 1.]]
"""
a = asarray(a)
return a.copy()
def ones(shape, dtype=mstype.float32):
"""
Returns a new tensor of given shape and type, filled with ones.
Args:
shape (Union[int, tuple, list]): the shape of the new tensor.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
Default is :class:`mstype.float32`.
Returns:
Tensor, with the designated `shape` and `dtype`, filled with ones.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If `shape` entries have values :math:`< 0`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.ones((2,2)))
[[1. 1.]
[1. 1.]]
"""
shape = _check_shape(shape)
dtype = _check_dtype(dtype)
if _is_shape_empty(shape):
return full(shape, 1.0, dtype)
output = F.fill(dtype, shape, 1)
return output
def zeros(shape, dtype=mstype.float32):
"""
Returns a new tensor of given shape and type, filled with zeros.
Args:
shape (Union[int, tuple, list]): the shape of the new tensor.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
Default is :class:`mstype.float32`.
Returns:
Tensor, with the designated `shape` and `dtype`, filled with zeros.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If `shape` entries have values :math:`< 0`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.zeros((2,2)))
[[0. 0.]
[0. 0.]]
"""
shape = _check_shape(shape)
dtype = _check_dtype(dtype)
if _is_shape_empty(shape):
return full(shape, 0.0, dtype)
output = F.fill(dtype, shape, 0)
return output
def full(shape, fill_value, dtype=None):
"""
Returns a new tensor of given shape and type, filled with `fill_value`.
Args:
shape (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g.,
:math:`(2, 3)` or :math:`2`.
fill_value (Union[int, float, bool, list, tuple]): Scalar or array_like
fill value.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype,
if `dtype` is :class:`None`, the data type of the new tensor will be inferred from
`fill_value`. Default is :class:`None`.
Returns:
Tensor, with the designated shape and dtype, filled with `fill_value`.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If `shape` has entries < 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.full((2,2), True))
[[True True]
[True True]]
"""
shape = _check_shape(shape)
if not isinstance(fill_value, ARRAY_TYPES):
_raise_type_error("fill value should be int, float, bool, list, tuple, Tensor, but got", fill_value)
if dtype is not None:
dtype = _check_dtype(dtype)
else:
if isinstance(fill_value, (int, float, bool)):
dtype = _get_dtype_from_scalar(fill_value)
if isinstance(fill_value, Tensor):
dtype = fill_value.dtype
if not _is_shape_empty(shape):
if isinstance(fill_value, (int, float, bool)):
return F.fill(dtype, shape, fill_value)
if isinstance(fill_value, (list, tuple)):
fill_value = asarray_const(fill_value)
return broadcast_to(fill_value, shape)
# if shape contains zero, use c.Tensor()
return _convert_64_to_32(empty_compile(dtype, shape))
def arange(start, stop=None, step=None, dtype=None):
"""
Returns evenly spaced values within a given interval.
Args:
start(Union[int, float]): Start of interval. The interval includes this value.
When `stop` is provided as a position argument, `start` must be given, when `stop`
is a normal argument, `start` can be optional, and default is 0.
Please see additional examples below.
stop(Union[int, float], optional): End of interval. The interval does not
include this value, except in some cases where `step` is not an integer
and floating point round-off affects the length of out.
step(Union[int, float], optional): Spacing between values. For any output
`out`, this is the distance between two adjacent values, :math:`out[i+1] - out[i]`.
The default step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
If dtype is None, the data type of the new tensor will be inferred from start,
stop and step. Default is None.
Returns:
Tensor with evenly spaced values.
Raises:
TypeError(PyNative Mode) or RuntimeError(Graph Mode): If input arguments
have types not specified above, or arguments are not given in the correct
orders specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.arange(0, 5, 1))
[0 1 2 3 4]
>>> print(np.arange(3))
[0 1 2]
>>> print(np.arange(start=0, stop=3))
[0 1 2]
>>> print(np.arange(0, stop=3, step=0.5))
[0. 0.5 1. 1.5 2. 2.5]
>>> print(np.arange(stop=3)) # This will lead to TypeError
"""
# This implementation was inspired by jax.numpy.arange
# infer the dtype
if dtype is None:
dtype = _get_dtype_from_scalar(start, stop, step)
if stop is None and step is None: # (start, stop, step) -> (0, start, 1)
num = _ceil(start)
out = _iota(mstype.float32, num)
elif step is None: # (start, stop, step) -> (start, stop, 1)
num = _ceil(stop - start)
out = _iota(mstype.float32, num) + start
elif stop is None: # (start, stop, step) -> (0, start, step)
num = _ceil((start + 0.0) / step)
out = _iota(mstype.float32, num) * step
else:
num = _ceil((stop - start + 0.0) / step)
out = _iota(mstype.float32, num) * step + start
return out.astype(dtype)
def _type_checking_for_xspace(start, stop, num, endpoint, dtype):
"""utility parameter checking function for linspace, logspace, geomspace."""
if not isinstance(start, ARRAY_TYPES):
_raise_type_error("start should be int, float, bool, list, tuple, Tensor, but got", start)
if not isinstance(stop, ARRAY_TYPES):
_raise_type_error("end should be int, float, bool, list, tuple, Tensor, but got", stop)
if not isinstance(start, Tensor):
start = _type_convert(Tensor, start).astype(mstype.float32)
if not isinstance(stop, Tensor):
stop = _type_convert(Tensor, stop).astype(mstype.float32)
if not isinstance(num, int):
_raise_type_error("num should be an integer, but got ", num)
if not isinstance(endpoint, bool):
_raise_type_error("endpoint should be an boolean, but got ", endpoint)
if dtype is not None:
dtype = _check_dtype(dtype)
else:
dtype = mstype.float32
start, stop = broadcast_arrays(start, stop)
return start, stop, num, endpoint, dtype
def _compute_shapes(start, axis, num, endpoint):
"""Computes shapes for local variables for np.linspace"""
bounds_shape = start.shape
bounds_shape = _tuple_slice(bounds_shape, None, axis) + (1,) + _tuple_slice(bounds_shape, axis, None)
iota_shape = _list_comprehensions(start.ndim+1, 1, True)
iota_shape = _tuple_slice(iota_shape, None, axis) + (num,) + _tuple_slice(iota_shape, axis+1, None)
num_tensor = _type_convert(Tensor, num).astype(mstype.float32)
div = (num_tensor - 1) if endpoint else num_tensor
return bounds_shape, iota_shape, div
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0):
"""
Returns evenly spaced values within a given interval.
Args:
start (Union[int, list(int), tuple(int), tensor]): The starting value of the sequence.
stop (Union[int, list(int), tuple(int), tensor]): The end value of the sequence,
unless `endpoint` is set to False. In that case, the sequence consists
of all but the last of `num + 1` evenly spaced samples, so that `stop`
is excluded. Note that the step size changes when `endpoint` is False.
num (int, optional): Number of samples to generate. Default is 50.
endpoint (bool, optional): If True, `stop` is the last sample. Otherwise, it is
not included. Default is True.
retstep (bool, optional): If True, return (`samples`, `step`), where `step` is
the spacing between samples.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype,
If `dtype` is None, infer the data type from other input arguments. Default is None.
axis (int, optional): The axis in the result to store the samples. Relevant
only if start or stop are array-like. By default :math:`(0)`, the samples will
be along a new axis inserted at the beginning. Use :math:`-1` to get an axis at the end.
Default is :math:`0`.
Returns:
Tensor, with `num` equally spaced samples in the closed interval
:math:`[start, stop]` or the half-open interval :math:`[start, stop)`
(depending on whether `endpoint` is True or False).
Step, the size of spacing between samples, only returned if `retstep` is True.
Raises:
TypeError: If input arguments have types not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.linspace(0, 5, 6))
[0. 1. 2. 3. 4. 5.]
"""
# This implementation was inspired by jax.numpy.linspace and numpy.linspace
start, stop, num, endpoint, dtype = _type_checking_for_xspace(start, stop, num, endpoint, dtype)
axis = _canonicalize_axis(axis, start.ndim+1)
if not isinstance(retstep, bool):
_raise_type_error("retstep should be an boolean, but got ", retstep)
bounds_shape, iota_shape, div = _compute_shapes(start, axis, num, endpoint)
out = None
delta = None
if num > 1:
delta = (stop - start) / div
# This is similar to how numpy and jax compute linspace
start_expand = reshape(start, bounds_shape)
incremental_expand = reshape(_iota(mstype.float32, num), iota_shape)
delta_expand = reshape(delta, bounds_shape)
start_expand, incremental_expand, delta_expand = broadcast_arrays(
start_expand, incremental_expand, delta_expand)
out = start_expand + (incremental_expand * delta_expand)
# recover endpoint
if endpoint:
out = moveaxis(out, axis, 0)
out[-1] = stop
out = moveaxis(out, 0, axis)
elif num == 1:
delta = nan if endpoint else stop - start
out = reshape(start, bounds_shape)
else: # num == 0
_raise_value_error("cannot support Tensor with num=0.")
if retstep:
return out.astype(dtype), delta
return out.astype(dtype)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
"""
Returns numbers spaced evenly on a log scale.
In linear space, the sequence starts at base ** start (base to the power of
start) and ends with base ** stop (see endpoint below).
Args:
start (Union[int, list(int), tuple(int), tensor]): ``base ** start`` is the starting
value of the sequence.
stop (Union[int, list(int), tuple(int), tensor]): ``base ** stop`` is the final value of
the sequence, unless `endpoint` is False. In that case, ``num + 1`` values are spaced
over the interval in log-space, of which all but the last (a sequence of length num)
are returned.
num (int, optional): Number of samples to generate. Default is 50.
endpoint (bool, optional): If True, `stop` is the last sample. Otherwise, it is
not included. Default is True.
base (Union[int, float], optional): The base of the log space. The step size
between the elements in :math:`ln(samples) / ln(base)` (or :math:`log_{base}(samples)`)
is uniform. Default is :math:`10.0`.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
If `dtype` is None, infer the data type from other input arguments. Default is None.
axis (int, optional): The axis in the result to store the samples. Relevant
only if start or stop is array-like. By default (:math:`0`), the samples will
be along a new axis inserted at the beginning. Use :math:`-1` to get an axis at the end.
Default is :math:`0`.
Returns:
Tensor, equally spaced on a log scale.
Raises:
TypeError: If input arguments have types not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.logspace(0, 5, 6, base=2.0))
[ 1. 2. 4. 8. 16. 32.]
"""
# This implementation was inspired by jax.numpy.linspace and numpy.linspace
start, stop, num, endpoint, dtype = _type_checking_for_xspace(start, stop, num, endpoint, dtype)
axis = _canonicalize_axis(axis, start.ndim+1)
if not isinstance(base, (int, float, bool)):
_raise_type_error("base should be a number, but got ", base)
linspace_res = linspace(start, stop, num, endpoint=endpoint, retstep=False, dtype=None, axis=axis)
return F.tensor_pow(base, linspace_res).astype(dtype)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""
Returns numbers spaced evenly on a log scale (a geometric progression).
This is similar to logspace, but with endpoints specified directly. Each output sample
is a constant multiple of the previous.
Args:
start (Union[int, list(int), tuple(int), tensor]): The starting value of the sequence.
stop (Union[int, list(int), tuple(int), tensor]): The final value of the sequence,
unless endpoint is False. In that case, num + 1 values are spaced over the
interval in log-space, of which all but the last (a sequence of length num) are
returned.
num (int, optional): Number of samples to generate. Default is 50.
endpoint (bool, optional): If True, `stop` is the last sample. Otherwise, it is
not included. Default is True.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can
be in format of np.float32, or `float32`.If `dtype` is None, infer the data
type from other input arguments. Default is None.
axis (int, optional): The axis in the result to store the samples. Relevant
only if start or stop is array-like. By default (0), the samples will
be along a new axis inserted at the beginning. Use -1 to get an axis at the end.
Default is 0.
Returns:
Tensor, with samples equally spaced on a log scale.
Raises:
TypeError: If input arguments have types not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> output = np.geomspace(1, 256, num=9)
>>> print(output)
[ 1. 2. 4. 8. 16. 32. 64. 128. 256.]
>>> output = np.geomspace(1, 256, num=8, endpoint=False)
>>> print(output)
[ 1. 2. 4. 8. 16. 32. 64. 128.]
"""
start, stop, num, endpoint, dtype = _type_checking_for_xspace(start, stop, num, endpoint, dtype)
axis = _canonicalize_axis(axis, start.ndim+1)
root = num
if endpoint:
root -= 1
bases = F.tensor_pow(F.tensor_div(stop, start), asarray_const(1./(root)))
exponents = linspace(zeros(F.shape(bases)), F.fill(F.dtype(bases), F.shape(bases), root),
num, endpoint=endpoint, dtype=dtype, axis=axis)
shape = F.shape(bases)
axis = axis + F.rank(bases) + 1 if axis < 0 else axis
expanded_shape = _tuple_slice(shape, None, axis) + (1,) + _tuple_slice(shape, axis, None)
bases = F.reshape(bases, expanded_shape)
start = F.reshape(start, expanded_shape)
res = F.tensor_mul(F.tensor_pow(bases, exponents), start)
if dtype is not None:
res = F.cast(res, dtype)
return res
def eye(N, M=None, k=0, dtype=mstype.float32):
"""
Returns a 2-D tensor with ones on the diagnoal and zeros elsewhere.
Args:
N (int): Number of rows in the output, must be larger than 0.
M (int, optional): Number of columns in the output. If is :class:`None`, defaults to `N`,
if defined, must be larger than 0. Deault is :class:`None`.
k (int, optional): Index of the diagonal: 0 (the default) refers to the main
diagonal, a positive value refers to an upper diagonal, and a negative value
to a lower diagonal. Default is 0.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype.
Default is mstype.float32.
Returns:
A tensor of shape (N, M). A tensor where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
Raises:
TypeError: If input arguments have types not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.eye(2, 2))
[[1. 0.]
[0. 1.]]
"""
dtype = _check_dtype(dtype)
if M is None:
M = N
if not (isinstance(M, int) and isinstance(N, int) and isinstance(k, int)):
_raise_type_error("Input tensor dimensions should be integers.")
out = None
if N == 0 or M == 0:
# Fill the shape with any value is fine.
return full((N, M), 0, dtype)
out = F.eye(N, M, dtype)
if k >= M or k <= -N:
return full((N, M), 0, dtype)
if k != 0:
out = out.astype(mstype.float32)
if k > 0:
out_left = full((N, k), 0, dtype)
out_right = out[..., 0:M-k:1]
return concatenate((out_left, out_right), 1).astype(dtype)
if k < 0:
out_upper = full((-k, M), 0, dtype)
out_lower = out[0:N+k:1, ...]
return concatenate((out_upper, out_lower), 0).astype(dtype)
return out
def identity(n, dtype=mstype.float32):
"""
Returns the identity tensor.
Args:
n (int): Number of rows and columns in the output, must be larger than 0.
dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype,
default is :class:`mstype.float32`.
Returns:
A tensor of shape `(n, n)`, where all elements are equal to zero,
except for the diagonal, whose values are equal to one.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Raises:
TypeError: If input arguments have types not specified above.
Examples:
>>> import mindspore.numpy as np
>>> print(np.identity(2))
[[1. 0.]
[0. 1.]]
"""
if not isinstance(n, int):
_raise_type_error("Input tensor dimensions should be integers.")
dtype = _check_dtype(dtype)
return eye(n, dtype=dtype)
@constexpr
def empty_compile(dtype, shape):
"""Returns an empty Tensor."""
return Tensor_(dtype, shape)
def empty(shape, dtype=mstype.float32):
"""
Returns a new array of given shape and type, without initializing
entries.
Note:
Numpy argument `order` is not supported.
Object arrays are not supported.
Args:
shape (Union[int, tuple(int)]): Shape of the empty array, e.g.,
(2, 3) or 2.
dtype (:class:`mindspore.dtype`, optional): Desired output data-type for the
array, e.g, mstype.int8. Default is mstype.float32.
Returns:
Tensor, array of uninitialized (arbitrary) data of the given
shape and dtype.
Raises:
TypeError: if the input shape or dtype is invalid.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> output = np.empty((2, 3))
>>> print(output)
# result may vary
Tensor(shape=[2, 3], dtype=Float32, value=
<uninitialized>)
"""
shape = _check_shape(shape)
dtype = _check_dtype(dtype)
return empty_compile(dtype, shape)
def _get_shape(array_like):
"""Returns the shape of the array like object."""
if isinstance(array_like, Tensor):
return array_like.shape
return asarray_const(array_like).shape
def _get_dtype(array_like):
"""Returns the data type of the array like object."""
if isinstance(array_like, Tensor):
return array_like.dtype
return asarray_const(array_like).dtype
def _x_like(prototype, dtype, shape, constructor, fill_value=None):
"""
Returns a tensor with the same shape and type as prototype,
using constructor.
"""
if not isinstance(prototype, ARRAY_TYPES):
_raise_type_error("prototype should be int, float, bool, list, tuple, Tensor, but got", prototype)
dtype_out = dtype
shape_out = shape
if dtype_out is None:
dtype_out = _get_dtype(prototype)
if shape_out is None or isinstance(shape_out, (list, tuple)) and not shape_out:
shape_out = _get_shape(prototype)
if fill_value is not None:
return constructor(shape_out, fill_value, dtype_out)
return constructor(shape_out, dtype_out)
def empty_like(prototype, dtype=None, shape=None):
"""
Returns a new array with the same shape and type as a given array.
Note:
Input array must have the same size across a dimension.
If `prototype` is not a Tensor, dtype is float32 by default if not provided.
Args:
prototype (Union[Tensor, list, tuple]): The shape and data-type of `prototype`
define these same attributes of the returned array.
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
result.
shape (int or sequence of ints, optional): Overrides the shape
of the result.
Returns:
Tensor, array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
Raises:
ValueError: if `prototype` is not a Tensor, list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((4,1,2))
>>> output = np.empty_like(a)
>>> print(output)
# result may vary
Tensor(shape=[4, 1, 2], dtype=Float32, value=
<uninitialized>)
"""
return _x_like(prototype, dtype, shape, empty)
def ones_like(a, dtype=None, shape=None):
"""
Returns an array of ones with the same shape and type as a given array.
Note:
Input array must have the same size across a dimension.
If `a` is not a Tensor, dtype is float32 by default if not provided.
Args:
a (Union[Tensor, list, tuple]): The shape and data-type of a define these same
attributes of the returned array.
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
result.
shape (int or sequence of ints, optional): Overrides the shape
of the result.
Returns:
Tensor, array of ones with the same shape and type as `a`.
Raises:
ValueError: if `a` is not a Tensor, list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((4,1,2))
>>> output = np.ones_like(a)
>>> print(output)
[[[1. 1.]]
[[1. 1.]]
[[1. 1.]]
[[1. 1.]]]
"""
return _x_like(a, dtype, shape, ones)
def zeros_like(a, dtype=None, shape=None):
"""
Returns an array of zeros with the same shape and type as a given array.
Note:
Input array must have the same size across a dimension.
If `a` is not a Tensor, dtype is float32 by default if not provided.
Args:
a (Union[Tensor, list, tuple]): The shape and data-type of a define these same
attributes of the returned array.
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
result.
shape (int or sequence of ints, optional): Overrides the shape
of the result.
Returns:
Tensor, array of zeros with the same shape and type as `a`.
Raises:
ValueError: if `a` is not a Tensor, list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((4,1,2))
>>> output = np.zeros_like(a)
>>> print(output)
[[[0. 0.]]
[[0. 0.]]
[[0. 0.]]
[[0. 0.]]]
"""
return _x_like(a, dtype, shape, zeros)
def full_like(a, fill_value, dtype=None, shape=None):
"""
Returns a full array with the same shape and type as a given array.
Note:
Input array must have the same size across a dimension.
If `a` is not a Tensor, dtype is float32 by default if not provided.
Args:
a (Union[Tensor, list, tuple]): The shape and data-type of `a` define these same
attributes of the returned array.
fill_value (scalar): Fill value.
dtype (:class:`mindspore.dtype`, optional): Overrides the data type of the
result.
shape (int or sequence of ints, optional): Overrides the shape
of the result.
Returns:
Tensor, array of fill_value with the same shape and type as `a`.
Raises:
ValueError: if `a` is not a Tensor, list or tuple.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.ones((4,1,2))
>>> output = np.full_like(a, 0.5)
>>> print(output)
[[[0.5 0.5]]
[[0.5 0.5]]
[[0.5 0.5]]
[[0.5 0.5]]]
"""
return _x_like(a, dtype, shape, full, fill_value=fill_value)
def tri(N, M=None, k=0, dtype=mstype.float32):
"""
Returns a tensor with ones at and below the given diagonal and zeros elsewhere.
Args:
N(int): Number of rows in the array.
M(int, optional): Number of columns in the array. By default, `M` is taken
equal to N.
k(int, optional): The sub-diagonal at and below which the array is filled.
:math:`k = 0` is the main diagonal, while :math:`k < 0` is below it, and :math:`k > 0` is above.
The default is 0.
dtype(:class:`mindspore.dtype`, optional): Data type of the returned array. The default
is :class:`mindspore.dtype`.
Returns:
Tensor with shape `(N, M)`, with its lower triangle filled with
ones and zeros elsewhere; in other words :math:`T[i,j] = 1` for :math:`j <= i + k`,
:math:`0` otherwise.
Raises:
TypeError: If input arguments have types not specified above.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> output = np.tri(3, 3, 1)
>>> print(output)
[[1. 1. 0.]
[1. 1. 1.]
[1. 1. 1.]]
"""
if M is None:
M = N
return nn_tril((N, M), dtype, k)
def tril(m, k=0):
"""
Returns a lower triangle of a tensor.
Returns a copy of a tensor with elements above the `k-th` diagonal zeroed.
Args:
m (Union[Tensor, list, tuple]): The shape and data-type of `m` define these same
attributes of the returned tensor.
k (int, optional): Diagonal above which to zero elements. :math:`k = 0` (the default)
is the main diagonal, :math:`k < 0` is below it and :math:`k > 0` is above.
Returns:
Lower triangle of `m`, of same shape and data-type as `m`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `m`\'s rank :math:`< 1`.
Examples:
>>> import mindspore.numpy as np
>>> output = np.tril(np.ones((3, 3)))
>>> print(output)
[[1. 0. 0.]
[1. 1. 0.]
[1. 1. 1.]]
"""
if not isinstance(m, Tensor):
m = asarray_const(m)
dtype = m.dtype
m = m.astype(mstype.float32)
assist = nn_tril(m.shape, mstype.float32, k)
return F.tensor_mul(assist, m).astype(dtype)
def triu(m, k=0):
"""
Returns an upper triangle of a tensor.
Returns a copy of a tensor with elements below the `k-th` diagonal zeroed.
Args:
m (Union[Tensor, list, tuple]): The shape and data-type of `m` define these same
attributes of the returned tensor.
k (int, optional): Diagonal below which to zero elements. :math:`k = 0` (the default)
is the main diagonal, :math:`k < 0` is below it and :math:`k > 0` is above.
Returns:
Upper triangle of `m`, of same shape and data-type as `m`.
Raises:
TypeError: If input arguments have types not specified above.
ValueError: If input `m`\'s rank < 1.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> output = np.triu(np.ones((3, 3)))
>>> print(output)
[[1. 1. 1.]
[0. 1. 1.]
[0. 0. 1.]]
"""
if not isinstance(m, Tensor):
m = asarray_const(m)
dtype = m.dtype
m = m.astype(mstype.float32)
assist = nn_triu(m.shape, mstype.float32, k)
return F.tensor_mul(assist, m).astype(dtype)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Returns specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset, i.e., the
collection of elements of the form ``a[i, i+offset]``. If `a` has more than two
dimensions, then the axes specified by `axis1` and `axis2` are used to determine
the 2-D sub-array whose diagonal is returned. The shape of the resulting
array can be determined by removing `axis1` and `axis2` and appending an index
to the right equal to the size of the resulting diagonals.
Args:
a (Tensor): Array from which the diagonals are taken.
offset (int, optional): Offset of the diagonal from the main diagonal.
Can be positive or negative. Defaults to main diagonal.
axis1 (int, optional): Axis to be used as the first axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
first axis (0).
axis2 (int, optional): Axis to be used as the second axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
second axis.
Returns:
Tensor, if `a` is 2-D, then `a` 1-D array containing the diagonal. If
``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` are removed,
and a new axis inserted at the end corresponding to the diagonal.
Raises:
ValueError: if the input tensor has less than two dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.arange(4).reshape(2,2)
>>> print(a)
[[0 1]
[2 3]]
>>> output = np.diagonal(a)
>>> print(output)
[0 3]
>>> output = np.diagonal(a, 1)
>>> print(output)
[1]
>>> a = np.arange(8).reshape(2, 2, 2)
>>> print(a)
[[[0 1]
[2 3]]
[[4 5]
[6 7]]]
>>> output = np.diagonal(a, 0, 0, 1)
>>> print(output)
[[0 6]
[1 7]]
"""
return a.diagonal(offset=offset, axis1=axis1, axis2=axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None):
"""
Returns the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset is returned,
i.e., the sum of elements ``a[i,i+offset]`` for all `i`.
If `a` has more than two dimensions, then the axes specified by `axis1` and
`axis2` are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of a with `axis1` and
`axis2` removed.
Note:
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, np.float32, and np.float64.
Args:
a (Tensor): Array from which the diagonals are taken.
offset (int, optional): Offset of the diagonal from the main diagonal.
Can be positive or negative. Defaults to main diagonal.
axis1 (int, optional): Axis to be used as the first axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
first axis (0).
axis2 (int, optional): Axis to be used as the second axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
second axis.
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
output Tensor.
Returns:
Tensor, sum_along_diagonals. If `a` is 2-D, the sum along the diagonal
is returned. If `a` has larger dimensions, then an array of sums along
diagonals is returned.
Raises:
ValueError: if the input tensor has less than two dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> output = np.trace(np.eye(3))
>>> print(output)
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> output = np.trace(a)
>>> print(output)
[6 8]
>>> a = np.arange(24).reshape((2,2,2,3))
>>> output = np.trace(a).shape
>>> print(output)
(2, 3)
"""
return a.trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
def _index(i, size, cartesian=True):
"""If cartesian=True, index 0 is swapped with index 1."""
if cartesian:
if i == 1:
return 0
if i == 0 and size >= 2:
return 1
return i
def meshgrid(*xi, sparse=False, indexing='xy'):
"""
Returns coordinate matrices from coordinate vectors.
Make `N-D` coordinate arrays for vectorized evaluations of `N-D`
scalar/vector fields over `N-D` grids, given one-dimensional
coordinate arrays `x1, x2,…, xn`.
Note:
Numpy argument copy is not supported, and a copy is always
returned.
Args:
*xi (Tensor): 1-D arrays representing the coordinates
of a grid.
indexing (‘xy’, ‘ij’, optional): Cartesian (‘xy’, default) or
matrix (‘ij’) indexing of output. In the 2-D case with
inputs of length `M` and `N`, the outputs are of shape `(N, M)`
for ‘xy’ indexing and `(M, N)` for ‘ij’ indexing. In the 3-D
case with inputs of length `M`, `N` and `P`, outputs are of shape
`(N, M, P)` for ‘xy’ indexing and `(M, N, P)` for ‘ij’ indexing.
sparse (bool, optional): If True a sparse grid is returned in
order to conserve memory. Default is False.
Returns:
Tuple of tensors, for vectors `x1, x2,…, xn` with lengths
``Ni=len(xi)``, return `(N1, N2, N3,...Nn)` shaped arrays if
``indexing=’ij’`` or `(N2, N1, N3,...Nn)` shaped arrays if
``indexing=’xy’`` with the elements of `xi` repeated to fill the matrix
along the first dimension for `x1`, the second for `x2` and so on.
Raises:
TypeError: if the input is not a tensor, or sparse is not boolean, or
indexing is not 'xy' or 'ij'.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.linspace(0, 1, 3)
>>> y = np.linspace(0, 1, 2)
>>> xv, yv = np.meshgrid(x, y)
>>> print(xv)
[[0. 0.5 1. ]
[0. 0.5 1. ]]
>>> print(yv)
[[0. 0. 0.]
[1. 1. 1.]]
>>> xv, yv = np.meshgrid(x, y, sparse=True)
>>> print(xv)
[[0. 0.5 1. ]]
>>> print(yv)
[[0.]
[1.]]
"""
_check_input_tensor(*xi)
if not isinstance(sparse, bool):
_raise_type_error('argument sparse should be boolean')
if indexing not in ('xy', 'ij'):
_raise_type_error("Valid values for `indexing` are 'xy' and 'ij'.")
shape_out = ()
for x in xi:
shape_out += (x.size,)
if _is_shape_empty(shape_out):
return ones(shape_out)
grids = []
for x in xi:
if F.rank(x) == 1:
grids.append(x)
else:
grids.append(ravel(x))
ndim = len(grids)
cartesian = indexing == 'xy'
shape_out = ()
for i in range(len(grids)):
grid_index = _index(i, ndim, cartesian=cartesian)
shape_out += (F.shape(grids[grid_index])[0],)
res = []
for i, x in enumerate(grids):
grid_index = _index(i, ndim, cartesian=cartesian)
shape_expanded = _expanded_shape(ndim, shape_out[grid_index], grid_index)
x = x.reshape(shape_expanded)
if not sparse:
x = F.tile(x, _tile_size(shape_expanded, shape_out, ndim))
res.append(x)
return res
class NdGrid:
"""
Construct a multi-dimensional "meshgrid".
``grid = NdGrid()`` creates an instance which will return a mesh-grid
when indexed.
If instantiated with an argument of ``sparse=True``, the mesh-grid is
open (or not fleshed out) so that only one-dimension of each
returned argument is greater than 1.
Args:
sparse (bool): Whether the grid is sparse or not. Default is
False.
Returns:
Tensor or tuple of tensor, a meshgrid. If ``sparse=False``, returns
tensors are all of the same dimensions; and if ``sparse=True``,
returns tensors with only one dimension not equal to `1`.
"""
def __init__(self, sparse=False):
self.sparse = sparse
def __getitem__(self, keys):
if isinstance(keys, slice):
keys = (keys,)
xi = []
for k in keys:
if not isinstance(k.start, int) or not isinstance(k.stop, int):
_raise_type_error('slice indices must be integers')
if k.step:
step = k.step
else:
step = 1
if isinstance(step, complex):
v = linspace(k.start, k.stop, int(abs(step)))
else:
v = arange(k.start, k.stop, step)
xi.append(v)
grids = meshgrid(*xi, sparse=self.sparse, indexing='ij')
if len(grids) == 1:
return grids[0]
if self.sparse:
return grids
if isinstance(grids, Tensor_):
return grids
expanded = []
for grid in grids:
expanded.append(F.expand_dims(grid, 0))
res = concatenate(tuple(expanded))
return res
class MGridClass(NdGrid):
"""
mgrid is an :class:`NdGrid` instance with ``sparse=False``.
The dimension and number of the output arrays are equal to the number
of indexing dimensions. If the step length is not a complex number,
then the stop is not inclusive. However, if the step length is a complex
number (e.g. 5j), then the integer part of its magnitude is interpreted
as specifying the number of points to create between the start and
stop values, where the stop value is inclusive.
Note:
Not supported in graph mode.
Unlike Numpy, if the step length is a complex number with a real
component, the step length is handled as equivalent to
``int(abs(step))``.
Returns:
Tensor or tuple of tensor, a meshgrid.
Raises:
TypeError: if slicing indices are not integers.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore.numpy import mgrid
>>> output = mgrid[0:5, 0:5]
>>> print(output)
[[[0 0 0 0 0]
[1 1 1 1 1]
[2 2 2 2 2]
[3 3 3 3 3]
[4 4 4 4 4]]
[[0 1 2 3 4]
[0 1 2 3 4]
[0 1 2 3 4]
[0 1 2 3 4]
[0 1 2 3 4]]]
>>> output = mgrid[-1:1:5j]
>>> print(output)
[-1. -0.5 0. 0.5 1. ]
"""
def __init__(self):
super(MGridClass, self).__init__(sparse=False)
class OGridClass(NdGrid):
"""
ogrid is an :class:`NdGrid` instance with ``sparse=True``.
The dimension and number of the output arrays are equal to the number
of indexing dimensions. If the step length is not a complex number,
then the stop is not inclusive. However, if the step length is a complex
number (e.g. 5j), then the integer part of its magnitude is interpreted
as specifying the number of points to create between the start and
stop values, where the stop value is inclusive.
Note:
Not supported in graph mode.
Unlike Numpy, if the step length is a complex number with a real
component, the step length is handled as equivalent to
``int(abs(step))``.
Raises:
TypeError: if slicing indices are not integers.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> from mindspore.numpy import ogrid
>>> output = ogrid[0:5,0:5]
>>> print(output)
[Tensor(shape=[5, 1], dtype=Int32, value=
[[0],
[1],
[2]
[3],
[4]]), Tensor(shape=[1, 5], dtype=Int32, value=
[[0, 1, 2, 3, 4]])]
>>> output = ogrid[-1:1:5j]
>>> print(output)
[-1. -0.5 0. 0.5 1. ]
"""
def __init__(self):
super(OGridClass, self).__init__(sparse=True)
mgrid = MGridClass()
ogrid = OGridClass()
def diag(v, k=0):
"""
Extracts a diagonal or construct a diagonal array.
Args:
v (Tensor): If `v` is a 2-D array, return a copy of its `k-th` diagonal.
If `v` is a 1-D array, return a 2-D array with v on the `k-th` diagonal.
k (int, optional): Diagonal in question. The default is 0. Use ``k>0`` for
diagonals above the main diagonal, and ``k<0`` for diagonals below the
main diagonal.
Returns:
Tensor, the extracted diagonal or constructed diagonal array.
Raises:
ValueError: if input is not 1-D or 2-D.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> x = np.arange(9).reshape((3,3))
>>> print(x)
[[0 1 2]
[3 4 5]
[6 7 8]]
>>> output = np.diag(x)
>>> print(output)
[0 4 8]
>>> output = np.diag(x, k=1)
>>> print(output)
[1 5]
>>> output = np.diag(x, k=-1)
>>> print(output)
[3 7]
"""
ndim = F.rank(v)
if ndim == 1:
return diagflat(v, k=k)
if ndim == 2:
shape = F.shape(v)
dtype = F.dtype(v)
if _is_shape_empty(shape):
return _empty(dtype, (0,))
e = eye(shape[0], shape[1], k, dtype)
prod = F.tensor_mul(v, e)
cast_type = dtype
if not _check_is_float(dtype):
# reduce sum only supports float types
cast_type = mstype.float32
prod = F.cast(prod, cast_type)
res = F.reduce_sum(prod, 1)
res = res[_max(0, -k): _min(shape[0], _max(0, shape[1] - k))]
if not _check_same_type(cast_type, dtype):
res = F.cast(res, dtype)
return res
return _raise_value_error("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Creates a two-dimensional array with the flattened input as a diagonal.
Note:
On GPU, the supported dtypes are np.float16, and np.float32.
Args:
v (Tensor): Input data, which is flattened and set as the `k-th` diagonal
of the output.
k (int, optional): Diagonal to set; 0, the default, corresponds to the
“main” diagonal, a positive (negative) `k` giving the number of the
diagonal above (below) the main.
Returns:
Tensor, The 2-D output array.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> output = np.diagflat(np.asarray([[1,2], [3,4]]))
>>> print(output)
[[1 0 0 0]
[0 2 0 0]
[0 0 3 0]
[0 0 0 4]]
>>> output = np.diagflat(np.asarray([1,2]), 1)
>>> print(output)
[[0 1 0]
[0 0 2]
[0 0 0]]
"""
_check_input_tensor(v)
dtype = F.dtype(v)
k_abs = _abs(k)
if _is_shape_empty(F.shape(v)):
return zeros((k_abs, k_abs), dtype)
v = ravel(v)
size = F.shape(v)[0]
e = eye(size, size, 0, dtype)
res = F.tensor_mul(v, e)
if k != 0:
pad_y = zeros((size, k_abs), dtype)
pad_x = zeros((k_abs, size + k_abs), dtype)
if k < 0:
res = concatenate((res, pad_y), axis=1)
res = concatenate((pad_x, res), axis=0)
else:
res = concatenate((pad_y, res), axis=1)
res = concatenate((res, pad_x), axis=0)
return res
def diag_indices(n, ndim=2):
"""
Returns the indices to access the main diagonal of an array.
This returns a tuple of indices that can be used to access the main
diagonal of an array a with ``a.ndim >= 2`` dimensions and shape `(n, n, …, n)`.
For ``a.ndim = 2`` this is the usual diagonal, for ``a.ndim > 2`` this is the set
of indices to access ``a[i, i, ..., i]`` for ``i = [0..n-1]``.
Args:
n (int): The size, along each dimension, of the arrays for which
the returned indices can be used.
ndim (int, optional): The number of dimensions.
Returns:
Tuple of Tensor.
Raises:
TypeError: if input are not integers.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> output = np.diag_indices(5, 3)
>>> print(output)
(Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]),
Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]),
Tensor(shape=[5], dtype=Int32, value= [0, 1, 2, 3, 4]))
"""
if not isinstance(n, int) or not isinstance(ndim, int):
_raise_type_error('input must be integers')
return _list_comprehensions(ndim, arange(start=0, stop=n), True)
def ix_(*args):
r"""
Constructs an open mesh from multiple sequences.
This function takes `N` 1-D sequences and returns `N` outputs with `N`
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using ix\_ one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Note:
Boolean masks are not supported.
Args:
*args (Tensor): 1-D sequences.
Returns:
Tuple of Tensor, `N` arrays with `N` dimensions each, with `N` the
number of input sequences. Together these arrays form an open
mesh.
Raises:
TypeError: if the input is not a tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> ixgrid = np.ix_(np.array([0, 1]), np.array([2, 4]))
>>> print(ixgrid)
(Tensor(shape=[2, 1], dtype=Int32, value=
[[0],
[1]]), Tensor(shape=[1, 2], dtype=Int32, value=
[[2, 4]]))
"""
_check_input_tensor(*args)
ndim = len(args)
res = ()
for i, arr in enumerate(args):
if F.rank(arr) != 1:
return _raise_value_error('Cross index must be 1 dimensional')
res += (F.reshape(arr, _expanded_shape(ndim, arr.size, i)),)
return res
def vander(x, N=None, increasing=False):
"""
Generates a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The order of
the powers is determined by the increasing boolean argument. Specifically, when
increasing is `False`, the i-th output column is the input vector raised element-wise
to the power of :math:`N - i - 1`. Such a matrix with a geometric progression in each row
is named for Alexandre-Theophile Vandermonde.
Args:
x (Union[list, tuple, Tensor]): 1-D input array.
N (int, optional): Number of columns in the output. If N is not specified, a
square array is returned (``N = len(x)``).
increasing (bool, optional): Order of the powers of the columns. If True, the
powers increase from left to right, if False (the default) they are reversed.
Returns:
Vandermonde matrix. If `increasing` is `False`, the first column is :math:`x^{(N-1)}`,
the second :math:`x^{(N-2)}` and so forth. If `increasing` is `True`, the columns are
:math:`x^0, x^1, ..., x^{(N-1)}`.
Raises:
TypeError: If inputs have types not specified above.
ValueError: If `x` is not 1-D, or `N` < 0.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.vander([1., 2., 3., 4., 5.]))
[[ 1 1 1 1 1]
[ 16 8 4 2 1]
[ 81 27 9 3 1]
[256 64 16 4 1]
[625 125 25 5 1]]
"""
if isinstance(x, (list, tuple)):
x = asarray_const(x)
elif not isinstance(x, Tensor):
_raise_type_error("Input x must be list, tuple or Tensor, but got ", x)
if x.ndim != 1:
_raise_value_error("Input x must be 1-D, but got dimension=", x.ndim)
N = N or x.size
if not isinstance(N, int):
_raise_type_error("Input N must be an integer.")
if N <= 0:
_raise_value_error("Input N must > 0.")
if not isinstance(increasing, bool):
_raise_type_error("increasing must be a bool.")
exponent = _iota(x.dtype, N, increasing)
x = F.expand_dims(x, 1)
exponent = F.expand_dims(exponent, 0)
return F.tensor_pow(x, exponent)
def indices(dimensions, dtype=mstype.int32, sparse=False):
"""
Returns an array representing the indices of a grid.
Computes an array where the subarrays contain index values 0, 1, …
varying only along the corresponding axis.
Args:
dimensions (tuple or list of ints): The shape of the grid.
dtype (:class:`mindspore.dtype`, optional): Data type of the result.
sparse (boolean, optional): Defaults to False. Return a sparse
representation of the grid instead of a dense representation.
Returns:
Tensor or tuple of Tensor, If `sparse` is False, returns one array
of grid indices, ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True, returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
``dimensions[i]`` in the `ith` place
Raises:
TypeError: if input dimensions is not a tuple or list.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> grid = np.indices((2, 3))
>>> print(grid)
[Tensor(shape=[2, 3], dtype=Int32, value=
[[0, 0, 0],
[1, 1, 1]]), Tensor(shape=[2, 3], dtype=Int32, value=
[[0, 1, 2],
[0, 1, 2]])]
"""
if not isinstance(dimensions, (tuple, list)):
_raise_type_error('Shape of the grid must be tuple or list')
grids = ()
for d in dimensions:
grids += (arange(d, dtype=dtype),)
return meshgrid(*grids, sparse=sparse, indexing='ij')
def _check_window_size(x):
"""Returns True if window size is greater than 1."""
if not isinstance(x, int):
_raise_type_error('the number fo points should be an int')
return x > 1
def bartlett(M):
"""
Returns the Bartlett window.
The Bartlett window is very similar to a triangular window, except that the
end points are at zero. It is often used in signal processing for tapering a
signal, without generating too much ripple in the frequency domain.
Args:
M (int): Number of points in the output window. If zero or less, an empty
array is returned.
Returns:
Tensor, the triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with the
first and last samples equal to zero.
Raises:
TypeError: if `M` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.bartlett(12))
[0. 0.18181819 0.36363637 0.5454545 0.72727275 0.9090909
0.9090909 0.72727275 0.5454545 0.36363637 0.18181819 0. ]
"""
if not _check_window_size(M):
return ones(_max(0, M))
n = _iota(mstype.float32, M)
m_minus_one = _to_tensor(M - 1)
return _to_tensor(1) - F.absolute(_to_tensor(2)*n - m_minus_one)/m_minus_one
def blackman(M):
"""
Returns the Blackman window.
The Blackman window is a taper formed by using the first three terms of a
summation of cosines. It was designed to have close to the minimal leakage
possible. It is close to optimal, only slightly worse than a Kaiser window.
Args:
M (int): Number of points in the output window. If zero or less, an empty
array is returned.
Returns:
Tensor, the window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
Raises:
TypeError: if `M` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.blackman(12))
[-1.4901161e-08 3.2606430e-02 1.5990365e-01 4.1439798e-01
7.3604518e-01 9.6704674e-01 9.6704674e-01 7.3604518e-01
4.1439798e-01 1.5990365e-01 3.2606430e-02 -1.4901161e-08]
"""
if not _check_window_size(M):
return ones(_max(0, M))
n_doubled = arange(1 - M, M, 2, dtype=mstype.float32)
return (_to_tensor(0.42) + _to_tensor(0.5)*F.cos(_to_tensor(pi/(M - 1))*n_doubled) +
_to_tensor(0.08)*F.cos(_to_tensor(2*pi/(M - 1))*n_doubled))
def hamming(M):
"""
Returns the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Args:
M (int): Number of points in the output window. If zero or less, an empty
array is returned.
Returns:
Tensor, the window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
Raises:
TypeError: if `M` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.hamming(12))
[0.08000001 0.15302339 0.34890914 0.6054648 0.841236 0.9813669
0.9813668 0.8412359 0.6054647 0.34890908 0.15302327 0.08000001]
"""
if not _check_window_size(M):
return ones(_max(0, M))
n = _iota(mstype.float32, M)
return _to_tensor(0.54) - _to_tensor(0.46)*F.cos(_to_tensor(2*pi/(M - 1))*n)
def hanning(M):
"""
Returns the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Args:
M (int): Number of points in the output window. If zero or less, an empty
array is returned.
Returns:
Tensor, the window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
Raises:
TypeError: if `M` is not an int.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.hanning(12))
[0. 0.07937324 0.29229254 0.5711574 0.8274304 0.9797465
0.97974646 0.82743025 0.5711573 0.29229245 0.07937312 0. ]
"""
if not _check_window_size(M):
return ones(_max(0, M))
n = _iota(mstype.float32, M)
return _to_tensor(0.5) - _to_tensor(0.5)*F.cos(_to_tensor(2*pi/(M - 1))*n)
@constexpr
def tri_indices(n, k=0, m=None, upper=True):
"""Returns triu/tril indices in o(nm) time."""
if not isinstance(n, (int, float, bool)):
raise TypeError("Input n must be a number.")
if not isinstance(k, (int, float, bool)):
raise TypeError("Input k must be a number.")
if m is None:
m = n
elif not isinstance(m, (int, float, bool)):
raise TypeError("Input m must be a number.")
if upper:
compare = operator.ge
else:
compare = operator.le
x_coordinate = []
y_coordinate = []
# math.ceil is used to match numpy's behaviour
for i in range(math.ceil(n)):
curr_limit = i + k
for j in range(math.ceil(m)):
if compare(j, curr_limit):
x_coordinate.append(i)
y_coordinate.append(j)
return asarray_const(x_coordinate), asarray_const(y_coordinate)
def triu_indices(n, k=0, m=None):
"""
Returns the indices for the upper-triangle of an (n, m) array.
Args:
n (int): The size of the arrays for which the returned indices will be valid.
k (int, optional): Diagonal offset.
m (int, optional): The column dimension of the arrays for which the returned
arrays will be valid. By default `m` is taken equal to `n`.
Returns:
The indices for the triangle. The returned tuple contains two tensors, each
with the indices along one dimension of the tensor.
Raises:
TypeError: if `n`, `k`, `m` are not numbers.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.triu_indices(3))
(Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
"""
return tri_indices(n, k, m, True)
def tril_indices(n, k=0, m=None):
"""
Returns the indices for the lower-triangle of an (n, m) array.
Args:
n (int): The size of the arrays for which the returned indices will be valid.
k (int, optional): Diagonal offset.
m (int, optional): The column dimension of the arrays for which the returned
arrays will be valid. By default `m` is taken equal to `n`.
Returns:
The indices for the triangle. The returned tuple contains two tensors, each
with the indices along one dimension of the tensor.
Raises:
TypeError: if `n`, `k`, `m` are not numbers.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> print(np.tril_indices(3))
(Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 2, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 0, 1, 0, 1, 2]))
"""
return tri_indices(n, k, m, False)
def triu_indices_from(arr, k=0):
"""
Returns the indices for the upper-triangle of `arr`.
Args:
arr (Union[Tensor, list, tuple]): 2-dimensional array.
k (int, optional): Diagonal offset.
Returns:
triu_indices_from, tuple of 2 tensor, shape(N)
Indices for the upper-triangle of `arr`.
Raises:
TypeError: if `arr` cannot be converted to tensor, or `k` is not a number.
ValueError: if `arr` cannot be converted to a 2-dimensional tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> tensor = np.ones((3,3))
>>> print(np.triu_indices_from(tensor))
(Tensor(shape=[6], dtype=Int32, value= [0, 0, 0, 1, 1, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 1, 2, 1, 2, 2]))
"""
arr = asarray(arr)
if arr.ndim != 2:
_raise_value_error("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def tril_indices_from(arr, k=0):
"""
Returns the indices for the lower-triangle of `arr`.
Args:
arr (Union[Tensor, list, tuple]): 2-dimensional array.
k (int, optional): Diagonal offset.
Returns:
triu_indices_from, tuple of 2 tensor, shape(N)
Indices for the upper-triangle of `arr`.
Raises:
TypeError: if `arr` cannot be converted to tensor, or `k` is not a number.
ValueError: if `arr` cannot be converted to a 2-dimensional tensor.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> tensor = np.ones((3,3))
>>> print(np.tril_indices_from(tensor))
(Tensor(shape=[6], dtype=Int32, value= [0, 1, 1, 2, 2, 2]),
Tensor(shape=[6], dtype=Int32, value= [0, 0, 1, 0, 1, 2]))
"""
arr = asarray(arr)
if arr.ndim != 2:
_raise_value_error("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def histogram_bin_edges(a, bins=10, range=None, weights=None): # pylint: disable=redefined-builtin
"""
Function to calculate only the edges of the bins used by the histogram function.
Note:
String values for `bins` is not supported.
Args:
a (Union[int, float, bool, list, tuple, Tensor]): Input data. The histogram
is computed over the flattened array.
bins ((Union[int, tuple, list, Tensor])): If `bins` is an int, it defines the number
of equal-width bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost edge,
allowing for non-uniform bin widths.
range((float, float), optional): The lower and upper range of the bins. If
not provided, `range` is simply ``(a.min(), a.max())``. Values outside
the range are ignored. The first element of the range must be less than
or equal to the second. Default is None.
weights(Union[int, float, bool, list, tuple, Tensor], optional): An array of weights,
of the same shape as `a`. Each value in `a` only contributes its associated weight
towards the bin count (instead of 1). This is currently not used by any of the bin
estimators, but may be in the future. Default is None.
Returns:
Tensor, the edges to pass into `histogram`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Raises:
TypeError: if `bins` is an array and not one-dimensional.
Examples:
>>> import mindspore.numpy as np
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> print(np.histogram_bin_edges(arr, bins=2))
[0. 2.5 5. ]
"""
a = _to_tensor(a)
if weights is not None:
weights = _to_tensor(weights)
if F.shape(a) != F.shape(weights):
_raise_value_error('weights should have the same shape as a')
if isinstance(bins, (tuple, list, Tensor)):
bins = _to_tensor(bins)
if F.rank(bins) != 1:
_raise_value_error('`bins` must be 1d, when an array')
return bins
if isinstance(bins, str):
# linspace does not support Tensor for num
_raise_unimplemented_error('string value for `bins` not implemented')
a = a.ravel().astype(mstype.float32)
if range is None:
start = F.reduce_min(a)
end = F.reduce_max(a)
else:
if not isinstance(range, (list, tuple)) or len(range) != 2:
_raise_value_error('`range` should take the form (start, end)')
start, end = range
if start > end:
_raise_value_error('max must be larger than min in range parameter')
start, end = _to_tensor(start, end)
no_range = (end - start) == 0
start = where(no_range, start - 0.5, start)
end = where(no_range, end + 0.5, end)
return linspace(start, end, bins + 1)
def _pad_empty(arr, pad_width):
"""
pads the array with constant values, used in mode: "empty"
"""
dtype = arr.dtype
for i in range(arr.ndim):
shape = arr.shape
pad_before = ()
pad_after = ()
# To avoid any memory issues, we don't make tensor with 0s in their shapes
if pad_width[i][0] > 0:
pad_before += (empty(_tuple_setitem(shape, i, pad_width[i][0]), dtype=dtype),)
if pad_width[i][1] > 0:
pad_after += (empty(_tuple_setitem(shape, i, pad_width[i][1]), dtype=dtype),)
tensor_with_pad = pad_before + (arr,) + pad_after
arr = concatenate(tensor_with_pad, axis=i)
return arr
def _pad_constant(arr, pad_width, value):
"""
pads the array with constant values, used in mode: "constant"
"""
dtype = arr.dtype
for i in range(arr.ndim):
shape = arr.shape
pad_before = ()
pad_after = ()
# To avoid any memory issues, we don't make tensor with 0s in their shapes
if pad_width[i][0] > 0:
pad_before += (full(_tuple_setitem(shape, i, pad_width[i][0]), value[i][0], dtype=dtype),)
if pad_width[i][1] > 0:
pad_after += (full(_tuple_setitem(shape, i, pad_width[i][1]), value[i][1], dtype=dtype),)
tensor_with_pad = pad_before + (arr,) + pad_after
arr = concatenate(tensor_with_pad, axis=i)
return arr
def _pad_statistic(arr, pad_width, stat_length, stat_op):
"""
pads the array with values calculated along the given axis, used in mode: "maximum",
"minimum", "mean"
"""
ndim = arr.ndim
shape = arr.shape
if stat_length is None:
stat_length = _make_stat_length(shape)
else:
stat_length = _convert_pad_to_nd(stat_length, ndim)
stat_length = _limit_stat_length(stat_length, shape)
for i in range(ndim):
pad_before = stat_op(_slice_along_axis(arr, i, 0, stat_length[i][0]), i)
pad_before = (F.tile(pad_before, _tuple_setitem((1,)*ndim, i, pad_width[i][0])),)
pad_after = stat_op(_slice_along_axis(arr, i, shape[i]-stat_length[i][1], shape[i]), i)
pad_after = (F.tile(pad_after, _tuple_setitem((1,)*ndim, i, pad_width[i][1])),)
tensor_with_pad = pad_before + (arr,) + pad_after
arr = concatenate(tensor_with_pad, axis=i)
return arr
def _pad_edge(arr, pad_width):
"""pad_edge is equivalent to pad_statistic with stat_lenght=1, used in mode:"edge"."""
def identity_op(arr, axis):
return arr
return _pad_statistic(arr, pad_width, 1, identity_op)
def _pad_wrap(arr, pad_width):
"""The behaviour of wrap mode is consistent with jax.numpy, used in mode:"wrap"."""
ndim = arr.ndim
shape = arr.shape
for i in range(ndim):
padsize_before = pad_width[i][0] % shape[i]
padsize_after = pad_width[i][1] % shape[i]
total_repeats = pad_width[i][0] // shape[i] + 1 + pad_width[i][1] // shape[i]
tensor_with_pad = ()
# To avoid any memory issues, we don't make tensor with 0s in their shapes
if padsize_before > 0:
tensor_with_pad += (_slice_along_axis(arr, i, shape[i]-padsize_before, shape[i]),)
tensor_with_pad += (F.tile(arr, _tuple_setitem((1,)*ndim, i, total_repeats)),)
if padsize_after > 0:
tensor_with_pad += (_slice_along_axis(arr, i, 0, padsize_after),)
arr = concatenate(tensor_with_pad, axis=i)
return arr
def _pad_linear(arr, pad_width, end_values):
"""Pads the arr with linear range values, used in mode: "linear_ramp"."""
ndim = arr.ndim
shape = arr.shape
dtype = arr.dtype
end_values = _convert_pad_to_nd(end_values, ndim)
for i in range(ndim):
# shape [..., 1, ...]
left_value = _slice_along_axis(arr, i, 0, 1)
right_value = _slice_along_axis(arr, i, shape[i]-1, shape[i])
pad_before = ()
pad_after = ()
if pad_width[i][0] > 0:
# shape [..., pad_width[i][0], ...]
pad_before = (linspace(end_values[i][0], left_value, num=pad_width[i][0],
endpoint=False, dtype=dtype, axis=i).squeeze(i+1),)
if pad_width[i][1] > 0:
# shape [..., pad_width[i][1], ...]
pad_after = linspace(right_value, end_values[i][1], num=pad_width[i][1]+1,
endpoint=True, dtype=dtype, axis=i).squeeze(i+1)
pad_after = (_slice_along_axis(pad_after, i, 1, pad_width[i][1]+1),)
tensor_with_pad = pad_before + (arr,) + pad_after
arr = concatenate(tensor_with_pad, axis=i)
return arr
def _pad_symmetric(arr, pad_width, reflect_type):
"""pad the array with symmetric paddings"""
for i in range(arr.ndim):
array_length = arr.shape[i]
has_pad_before = (pad_width[i][0] > 0)
has_pad_after = (pad_width[i][1] > 0)
edge_before = _slice_along_axis(arr, i, 0, 1)
edge_end = _slice_along_axis(arr, i, array_length-1, array_length)
times_to_pad_before = pad_width[i][0] // array_length + 1
additional_pad_before = pad_width[i][0] % array_length
times_to_pad_after = pad_width[i][1] // array_length + 1
additional_pad_after = pad_width[i][1] % array_length
curr_pad = None
if has_pad_before:
# Deal with paddings before the original array
for times in range(times_to_pad_before):
if times < times_to_pad_before - 1:
endpoint = array_length
else:
endpoint = additional_pad_before
if endpoint != 0:
curr_pad = _slice_along_axis(arr, i, 0, endpoint)
curr_pad = flip(curr_pad, axis=i)
if reflect_type == "odd":
curr_pad = 2 * edge_before - curr_pad
arr = P.Concat(i)((curr_pad, arr))
edge_before = _slice_along_axis(arr, i, 0, 1)
if has_pad_after:
# Deal with paddings after the original array
for times in range(times_to_pad_after):
if times < times_to_pad_after - 1:
startpoint = arr.shape[i] - array_length
else:
startpoint = arr.shape[i] - additional_pad_after
if startpoint != arr.shape[i]:
curr_pad = _slice_along_axis(arr, i, startpoint, arr.shape[i])
curr_pad = flip(curr_pad, axis=i)
if reflect_type == "odd":
curr_pad = 2 * edge_end - curr_pad
arr = P.Concat(i)((arr, curr_pad))
edge_end = _slice_along_axis(arr, i, arr.shape[i]-1, arr.shape[i])
return arr
def _pad_reflect(arr, pad_width, reflect_type):
"""
pad the array with reflect paddings, this is very similar to symmetric paddings,
but differs at how edges are selected.
"""
# pylint: disable=too-many-nested-blocks
for i in range(arr.ndim):
array_length = arr.shape[i]
if array_length == 1:
total_repeats = pad_width[i][0] + pad_width[i][1] + 1
arr = F.tile(arr, _tuple_setitem((1,)*arr.ndim, i, total_repeats))
else:
has_pad_before = (pad_width[i][0] > 0)
has_pad_after = (pad_width[i][1] > 0)
edge_before = _slice_along_axis(arr, i, 0, 1)
edge_end = _slice_along_axis(arr, i, array_length-1, array_length)
pad_size = array_length - 1
times_to_pad_before = pad_width[i][0] // pad_size + 1
additional_pad_before = pad_width[i][0] % pad_size
times_to_pad_after = pad_width[i][1] // pad_size + 1
additional_pad_after = pad_width[i][1] % pad_size
curr_pad = None
if has_pad_before:
# Deal with paddings before the original array
for times in range(times_to_pad_before):
if times < times_to_pad_before - 1:
endpoint = array_length
else:
endpoint = additional_pad_before + 1
if endpoint != 1:
curr_pad = _slice_along_axis(arr, i, 1, endpoint)
curr_pad = flip(curr_pad, axis=i)
if reflect_type == "odd":
curr_pad = 2 * edge_before - curr_pad
arr = P.Concat(i)((curr_pad, arr))
edge_before = _slice_along_axis(arr, i, 0, 1)
if has_pad_after:
# Deal with paddings after the original array
for times in range(times_to_pad_after):
if times < times_to_pad_after - 1:
startpoint = arr.shape[i] - array_length
else:
startpoint = arr.shape[i] - additional_pad_after - 1
if startpoint != arr.shape[i]-1:
curr_pad = _slice_along_axis(arr, i, startpoint, arr.shape[i]-1)
curr_pad = flip(curr_pad, axis=i)
if reflect_type == "odd":
curr_pad = 2 * edge_end - curr_pad
arr = P.Concat(i)((arr, curr_pad))
edge_end = _slice_along_axis(arr, i, arr.shape[i]-1, arr.shape[i])
return arr
def _pad_func(arr, pad_width, func, **kwargs):
"""applies padding function over different axis."""
# first creates a padded array with fixed length.
arr_dim = arr.ndim
pad_width = _convert_pad_to_nd(pad_width, arr_dim)
arr = _pad_empty(arr, pad_width)
for i in range(arr_dim):
# function signature: padding_func(tensor, iaxis_pad_width, iaxis, kwargs)
arr = apply_along_axis(func, i, arr, pad_width[i], i, kwargs)
return arr
@constexpr
def _make_stat_length(shape):
"""converts the stat_length values."""
return tuple((shape[i], shape[i]) for i, _ in enumerate(shape))
@constexpr
def _limit_stat_length(stat_length, shape):
"""limits the stat_length to current array length along given dimension."""
return tuple((min(stat_pair[0], shape[i]), min(stat_pair[1], shape[i])) for i, stat_pair in enumerate(stat_length))
@constexpr
def _convert_pad_to_nd(pad_values, ndim):
"""broadcasts the pad_values to (ndim * 2)"""
if not isinstance(pad_values, (int, list, tuple, Tensor)):
raise TypeError(
"pad_width, stat_length, constant_values or end_values should only be int, list, tuple or tensor")
pad_tensor = _to_tensor(pad_values)
pad_shape = pad_tensor.shape
if not pad_shape:
pad_values = tuple((((pad_values,) * 2) for i in range(ndim)))
elif pad_shape == (1,):
pad_values = tuple((tuple(pad_values) * 2) for i in range(ndim))
elif pad_shape == (2,):
pad_values = tuple(tuple(pad_values) for i in range(ndim))
elif pad_shape == (1, 2):
pad_values = tuple(tuple(pad_values[0]) for i in range(ndim))
elif pad_shape == (ndim, 2):
pad_values = tuple(tuple(pad_pair) for pad_pair in pad_values)
else:
raise ValueError(f"input values must be able to broadcast to {(ndim, 2)}")
return pad_values
def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
end_values=0, reflect_type="even", **kwargs):
"""
Pads an array.
Note:
Currently, `median` mode is not supported. `reflect` and `symmetric` mode
only supports GPU backend.
Args:
arr (Union[list, tuple, Tensor]): The array to pad.
pad_width (Union[int, tuple, list]): Number of values padded to the edges of
each axis. :class:`((before_1, after_1), ... (before_N, after_N))` creates
unique pad widths for each axis. :class:`((before, after),)` yields same
before and after pad for each axis. :class:`(pad,)` or int is a shortcut
for :class:`before = after = pad width` for all axes.
mode (string, optional):
One of the following string values:
- constant (default): Pads with a constant value.
- edge: Pads with the edge values of `arr`.
- linear_ramp: Pads with the linear ramp between end_value and the `arr` edge value.
- maximum: Pads with the maximum value of all or part of the vector along each axis.
- mean: Pads with the mean value of all or part of the vector along each axis.
- median: Pads with the median value of all or part of the vector along each axis.
- minimum: Pads with the minimum value of all or part of the vector along each axis.
- reflect: Pads with the reflection of the vector mirrored on the first
and last values of the vector along each axis.
- symmetric: Pads with the reflection of the vector mirrored along the edge
of the `arr`.
- wrap: Pads with the wrap of the vector along the axis. The first values
are used to pad the end and the end values are used to pad the beginning.
- empty: Pads with undefined values.
- <function>: The padding function, if used, should modify and return a new 1-d tensor.
It has the following signature: :class:`padding_func(tensor, iaxis_pad_width, iaxis, kwargs)`
stat_length (Union[tuple, list, int], optional): Used in \'maximum\', \'mean\',
\'median\', and \'minimum\'. Number of values at edge of each axis used
to calculate the statistic value. :class:`((before_1, after_1), ... (before_N, after_N))`
creates unique statistic lengths for each axis. :class:`((before, after),)`
yields same before and after statistic lengths for each axis. :class:`(stat_length,)`
or int is a shortcut for :class:`before = after = statistic length` for all
axes. Default is :class:`None`, to use the entire axis.
constant_values (Union[tuple, list, int], optional):
Used in :class:`constant mode`. The values to set the padded values for each
axis. :class:`((before_1, after_1), ... (before_N, after_N))` creates unique pad
constants for each axis. :class:`((before, after),)` yields same before and
after constants for each axis. :class:`(constant,)` or :class:`constant` is
a shortcut for :class:`before = after = constant` for all axes. Default is 0.
end_values (Union[tuple, list, int], optional): Used in 'linear_ramp'. The values
used for the ending value of the linear_ramp and that will form the edge of
the padded `arr`. :class:`((before_1, after_1), ... (before_N, after_N))`
unique end values for each axis. :class:`((before, after),)` yields same before
and after end values for each axis. :class:`(constant,)` or :class:`constant`
is a shortcut for :class:`before = after = constant` for all axes. Default is 0.
reflect_type(string, optional) can choose between \'even\' and \'odd\'. Used in
\'reflect\', and \'symmetric\'. The \'even\' style is the default with an
unaltered reflection around the edge value. For the \'odd\' style, the extended
part of the `arr` is created by subtracting the reflected values from two times
the edge value.
Returns:
Padded tensor of rank equal to `arr` with shape increased according to `pad_width`.
Raises:
TypeError: if `arr`, `pad_width`, `stat_length`, `constant_values` or `end_values`
have types not specified above.
ValueError: if `mode` cannot be recognized, or if `pad_width`, `stat_length`,
`constant_values`, `end_values` cannot broadcast to :class:`(arr.ndim, 2)`,
or if keyword arguments got unexpected inputs.
NotImplementedError: if mode is function or '/median'/.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> tensor = np.array([1., 2., 3., 4., 5.])
>>> print(np.pad(tensor, (3, 4)))
[0. 0. 0. 1. 2. 3. 4. 5. 0. 0. 0. 0.]
>>> print(np.pad(tensor, (3, 4), mode="wrap"))
[3. 4. 5. 1. 2. 3. 4. 5. 1. 2. 3. 4.]
>>> print(np.pad(tensor, (3, 4), mode="linear_ramp", end_values=(10, 10)))
[10. 7. 4. 1. 2. 3. 4. 5. 6.25 7.5 8.75 10. ]
"""
arr = _to_tensor(arr)
if arr.ndim == 0:
return arr
pad_width = _convert_pad_to_nd(pad_width, arr.ndim)
stat_func = {"maximum": _reduce_max_keepdims,
"minimum": _reduce_min_keepdims,
"mean": _reduce_mean_keepdims,
"median": "not implemented"}
if mode not in ("constant", "maximum", "minimum", "mean", "median", "edge",
"wrap", "linear_ramp", "symmetric", "reflect", "empty") and \
not _callable(arr, mode):
_raise_value_error("Input mode not supported.")
if mode == "constant":
constant_values = _convert_pad_to_nd(constant_values, arr.ndim)
return _pad_constant(arr, pad_width, constant_values)
if mode in ("maximum", "minimum", "mean", "median"):
# TODO: support median mode once P.Sort/P.Median is supported on GPU/CPU
if mode == "median":
_raise_unimplemented_error("median mode is not supported yet")
return _pad_statistic(arr, pad_width, stat_length, stat_func[mode])
if mode == "edge":
return _pad_edge(arr, pad_width)
if mode == "wrap":
return _pad_wrap(arr, pad_width)
if mode == "linear_ramp":
return _pad_linear(arr, pad_width, end_values)
if mode == "symmetric":
return _pad_symmetric(arr, pad_width, reflect_type)
if mode == "reflect":
return _pad_reflect(arr, pad_width, reflect_type)
if mode == 'empty':
return _pad_empty(arr, pad_width)
return _pad_func(arr, pad_width, mode, **kwargs)
| 37.040465 | 119 | 0.603377 |
1680f79c5d9a0daf027550f99e72307d39492c2b | 1,460 | py | Python | data.py | nitishmishra617/Python-Face_Recognition_Attendance_System | 596e68a2100d212961096a17c2c3c17fc229647d | [
"MIT"
] | null | null | null | data.py | nitishmishra617/Python-Face_Recognition_Attendance_System | 596e68a2100d212961096a17c2c3c17fc229647d | [
"MIT"
] | null | null | null | data.py | nitishmishra617/Python-Face_Recognition_Attendance_System | 596e68a2100d212961096a17c2c3c17fc229647d | [
"MIT"
] | null | null | null | import cv2
import os
def main(id):
cam = cv2.VideoCapture(0)
cam.set(3, 640) # set video width
cam.set(4, 480) # set video height
face_detector = cv2.CascadeClassifier(
'haarcascade_frontalface_default.xml')
# For each person, enter one numeric face id
#face_id = input('\n enter user id end press <return> ==> ')
face_id = id
print("\n [INFO] Initializing face capture. Look the camera and wait ...")
# Initialize individual sampling face count
count = 0
while(True):
ret, img = cam.read()
# 1img = cv2.flip(img, -1) # flip video image vertically
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(face_id) + '.' +
str(count) + ".jpg", gray[y:y+h, x:x+w])
cv2.imshow('image', img)
k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
elif count >= 30: # Take 30 face sample and stop video
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
| 31.73913 | 79 | 0.55411 |
39b579074587f2c4d07dc5d76d0a08c3c8e937a1 | 2,560 | py | Python | test/record/parser/test_response_whois_nic_cx_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_whois_nic_cx_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null | test/record/parser/test_response_whois_nic_cx_status_registered.py | huyphan/pyyawhois | 77fb2f73a9c67989f1d41d98f37037406a69d136 | [
"MIT"
] | null | null | null |
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.nic.cx/status_registered
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisNicCxStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.nic.cx/status_registered.txt"
host = "whois.nic.cx"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.cx")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns3.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns4.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns2.google.com")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2010-07-29 18:15:42 UTC'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.id, None)
eq_(self.record.registrar.name, "MarkMonitor")
eq_(self.record.registrar.organization, None)
eq_(self.record.registrar.url, None)
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2014-06-28 09:18:02 UTC'))
def test_domain_id(self):
eq_(self.record.domain_id, "447518-CoCCA")
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2015-07-29 18:15:42 UTC'))
| 37.101449 | 83 | 0.684766 |
ac2b15bc287c6cfd46dd4899658b784943068720 | 6,534 | py | Python | theano/gof/lazylinker_c.py | brandonwillard/Theano | f375a0e999b950a81824a003f685b0bfd1c4e405 | [
"BSD-3-Clause"
] | null | null | null | theano/gof/lazylinker_c.py | brandonwillard/Theano | f375a0e999b950a81824a003f685b0bfd1c4e405 | [
"BSD-3-Clause"
] | null | null | null | theano/gof/lazylinker_c.py | brandonwillard/Theano | f375a0e999b950a81824a003f685b0bfd1c4e405 | [
"BSD-3-Clause"
] | 1 | 2020-08-15T17:09:10.000Z | 2020-08-15T17:09:10.000Z | import errno
import logging
import os
import sys
import warnings
from six.moves import reload_module as reload
import theano
from theano import config
from theano.gof import cmodule
from theano.gof.compilelock import get_lock, release_lock
_logger = logging.getLogger("theano.gof.lazylinker_c")
force_compile = False
version = 0.211 # must match constant returned in function get_version()
lazylinker_ext = None
def try_import():
global lazylinker_ext
sys.path[0:0] = [config.compiledir]
import lazylinker_ext # noqa
del sys.path[0]
def try_reload():
sys.path[0:0] = [config.compiledir]
reload(lazylinker_ext)
del sys.path[0]
try:
# See gh issue #728 for why these lines are here. Summary: compiledir must
# be at the beginning of the path to avoid conflicts with any other
# lazylinker_ext modules that might exist (this step handled in try_import
# and try_reload). An __init__.py file must be created for the same reason.
# Note that these lines may seem redundant (they are repeated in
# compile_str()) but if another lazylinker_ext does exist then it will be
# imported and compile_str won't get called at all.
location = os.path.join(config.compiledir, "lazylinker_ext")
if not os.path.exists(location):
try:
# Try to make the location
os.mkdir(location)
except OSError as e:
# If we get an error, verify that the error was # 17, the
# path already exists, and that it is a directory Note: we
# can't check if it exists before making it, because we
# are not holding the lock right now, so we could race
# another process and get error 17 if we lose the race
assert e.errno == errno.EEXIST
assert os.path.isdir(location)
init_file = os.path.join(location, "__init__.py")
if not os.path.exists(init_file):
try:
open(init_file, "w").close()
except OSError as e:
if os.path.exists(init_file):
pass # has already been created
else:
e.args += ("{} exist? {}".format(location, os.path.exists(location)),)
raise
_need_reload = False
if force_compile:
raise ImportError()
else:
try_import()
_need_reload = True
actual_version = getattr(lazylinker_ext, "_version", None)
if version != actual_version:
raise ImportError(
"Version check of the existing lazylinker compiled file."
" Looking for version %s, but found %s. "
"Extra debug information: force_compile=%s, _need_reload=%s"
% (version, actual_version, force_compile, _need_reload)
)
except ImportError:
get_lock()
try:
# Maybe someone else already finished compiling it while we were
# waiting for the lock?
try:
if force_compile:
raise ImportError()
if _need_reload:
# The module was successfully imported earlier: we need to
# reload it to check if the version was updated.
try_reload()
else:
try_import()
_need_reload = True
actual_version = getattr(lazylinker_ext, "_version", None)
if version != actual_version:
raise ImportError(
"Version check of the existing lazylinker compiled file."
" Looking for version %s, but found %s. "
"Extra debug information: force_compile=%s,"
" _need_reload=%s"
% (version, actual_version, force_compile, _need_reload)
)
except ImportError:
# It is useless to try to compile if there isn't any
# compiler! But we still want to try to load it, in case
# the cache was copied from another computer.
if not theano.config.cxx:
raise
_logger.info("Compiling new CVM")
dirname = "lazylinker_ext"
cfile = os.path.join(theano.__path__[0], "gof", "c_code", "lazylinker_c.c")
if not os.path.exists(cfile):
# This can happen in not normal case. We just
# disable the c clinker. If we are here the user
# didn't disable the compiler, so print a warning.
warnings.warn(
"The file lazylinker_c.c is not available. This do"
"not happen normally. You are probably in a strange"
"setup. This mean Theano can not use the cvm:"
"our c execution engine for Theano function. If you"
"want to remove this warning, use the Theano flag"
"'cxx=' (set to an empty string) to disable all c"
"code generation."
)
raise ImportError("The file lazylinker_c.c is not available.")
code = open(cfile).read()
loc = os.path.join(config.compiledir, dirname)
if not os.path.exists(loc):
try:
os.mkdir(loc)
except OSError as e:
assert e.errno == errno.EEXIST
assert os.path.exists(loc)
args = cmodule.GCC_compiler.compile_args()
cmodule.GCC_compiler.compile_str(dirname, code, location=loc, preargs=args)
# Save version into the __init__.py file.
init_py = os.path.join(loc, "__init__.py")
with open(init_py, "w") as f:
f.write("_version = %s\n" % version)
# If we just compiled the module for the first time, then it was
# imported at the same time: we need to make sure we do not
# reload the now outdated __init__.pyc below.
init_pyc = os.path.join(loc, "__init__.pyc")
if os.path.isfile(init_pyc):
os.remove(init_pyc)
try_import()
try_reload()
from lazylinker_ext import lazylinker_ext as lazy_c
assert lazylinker_ext._version == lazy_c.get_version()
_logger.info("New version %s", lazylinker_ext._version)
finally:
# Release lock on compilation directory.
release_lock()
from lazylinker_ext.lazylinker_ext import * # noqa
assert force_compile or (version == get_version()) # noqa
| 39.841463 | 87 | 0.59045 |
cd29a071cd4a9fa6f2124dd2c9eb0a5a6d0a3410 | 5,355 | py | Python | mysite/myapp/views.py | CSUChico-CINS465/CINS465-F19-Examples | 10a177cb942ace351445ae37ba2c30ee0fc93230 | [
"MIT"
] | null | null | null | mysite/myapp/views.py | CSUChico-CINS465/CINS465-F19-Examples | 10a177cb942ace351445ae37ba2c30ee0fc93230 | [
"MIT"
] | 11 | 2020-06-05T22:49:57.000Z | 2022-03-12T00:01:45.000Z | mysite/myapp/views.py | CSUChico-CINS465/CINS465-F19-Examples | 10a177cb942ace351445ae37ba2c30ee0fc93230 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout
# from django.contrib.auth.models import User
from . import models
from . import forms
# Create your views here.
def index(request, page=0):
if request.method == "POST":
if request.user.is_authenticated:
form_instance = forms.SuggestionForm(request.POST)
if form_instance.is_valid():
new_sugg = models.Suggestion(suggestion=form_instance.cleaned_data["suggestion"])
new_sugg.author = request.user
new_sugg.save()
form_instance = forms.SuggestionForm()
else:
form_instance = forms.SuggestionForm()
else:
form_instance = forms.SuggestionForm()
suggestion_query = models.Suggestion.objects.all()
suggestion_list = {"suggestions":[]}
for s_q in suggestion_query:
comment_query = models.Comment.objects.filter(suggestion=s_q)
comment_list = []
for c_q in comment_query:
can_delete=False
if request.user == c_q.author:
can_delete=True
comment_list += [{
"comment":c_q.comment,
"author":c_q.author.username,
"created_on":c_q.created_on,
"id":c_q.id,
"delete":can_delete
}]
suggestion_list["suggestions"] += [{
"id":s_q.id,
"suggestion":s_q.suggestion,
"author":s_q.author.username,
"created_on":s_q.created_on,
"comments":comment_list
}]
context = {
"variable":"Hello World",
"title":"Index",
"form":form_instance,
"some_list":suggestion_list["suggestions"]
}
return render(request, "index.html", context=context)
@csrf_exempt
@login_required(login_url='/login/')
def suggestions_view(request):
if request.method == "GET":
suggestion_query = models.Suggestion.objects.all().order_by('-created_on')
suggestion_list = {"suggestions":[]}
for s_q in suggestion_query:
comment_query = models.Comment.objects.filter(suggestion=s_q)
comment_list = []
for c_q in comment_query:
can_delete=False
if request.user == c_q.author:
can_delete=True
comment_list += [{
"comment":c_q.comment,
"author":c_q.author.username,
"created_on":c_q.created_on,
"id":c_q.id,
"delete":can_delete
}]
url = ""
if not str(s_q.image)=="":
url=s_q.image.url
suggestion_list["suggestions"] += [{
"id":s_q.id,
"suggestion":s_q.suggestion,
"author":s_q.author.username,
"created_on":s_q.created_on,
"comments":comment_list,
"image":url,
"image_description":s_q.image_description
}]
return JsonResponse(suggestion_list)
return HttpResponse("Unsupported HTTP Method")
@login_required(login_url='/login/')
def suggestion_form_view(request):
if request.method == "POST":
if request.user.is_authenticated:
form_instance = forms.SuggestionForm(request.POST, request.FILES)
if form_instance.is_valid():
new_sugge = form_instance.save(request=request)
return redirect("/")
else:
return redirect("/")
else:
form_instance = forms.SuggestionForm()
context = {
"title":"Suggestion Form",
"form":form_instance
}
return render(request, "suggestion.html", context=context)
@login_required(login_url='/login/')
def comments_view(request, instance_id, delete=0):
if delete==1:
print("Should delete the comment here")
instance = models.Comment.objects.get(id=instance_id)
if request.user == instance.author:
instance.delete()
return redirect("/")
if request.method == "POST":
if request.user.is_authenticated:
form_instance = forms.CommentForm(request.POST)
if form_instance.is_valid():
new_comm = form_instance.save(request=request, sugg_id=instance_id)
return redirect("/")
else:
form_instance = forms.CommentForm()
else:
form_instance = forms.CommentForm()
context = {
"title":"Comment Form",
"form":form_instance,
"sugg_id":instance_id
}
return render(request, "comment.html", context=context)
def logout_view(request):
logout(request)
return redirect("/login/")
def register(request):
if request.method == "POST":
form_instance = forms.RegistrationForm(request.POST)
if form_instance.is_valid():
form_instance.save()
return redirect("/login/")
# print("Hi")
else:
form_instance = forms.RegistrationForm()
context = {
"form":form_instance,
}
return render(request, "registration/register.html", context=context)
| 35 | 97 | 0.593838 |
86c7b89f0068dbfe2f1db2af080044a6842d0b26 | 2,273 | py | Python | src/clilib/__init__.py | markediez/clilib | 46753645b96b6482e38c14fc707a300e10faf330 | [
"MIT"
] | null | null | null | src/clilib/__init__.py | markediez/clilib | 46753645b96b6482e38c14fc707a300e10faf330 | [
"MIT"
] | 5 | 2020-08-27T17:40:29.000Z | 2020-09-20T19:09:20.000Z | src/clilib/__init__.py | markediez/clilib | 46753645b96b6482e38c14fc707a300e10faf330 | [
"MIT"
] | null | null | null | import clilib.util
import clilib.decorator as decorator
import argparse
import logging
_root_parser = argparse.ArgumentParser()
_subparsers = _root_parser.add_subparsers(dest='_cmd')
_subparsers.required = True
_args = None
logger = clilib.util.get_logger(f"[{__name__}]")
def build_parser(parser):
return parser
def register_verb(resource, func):
logger.debug('Registering resource verb')
logger.debug(_subparsers.choices)
resource_name = clilib.util.to_kebab(resource.__name__)
verb = getattr(func, '_action')
args = getattr(func, '_args', []) + getattr(resource, '_args', [])
logger.debug(f"resource: {resource}")
logger.debug(f"func : {func}")
logger.debug(f"verb : {verb}")
logger.debug(f"args : {args}")
if verb not in _subparsers.choices:
logger.debug(f"Adding verb, '{verb}', in _subparsers")
_subparsers.add_parser(verb)
if verb not in resource._parsers:
logger.debug(f"Adding verb, '{verb}', in resource._parsers")
if verb in _subparsers.choices:
logger.debug(f"Referencing subparser of _subparsers.choices['{verb}']")
resource._parsers[verb] = clilib.util.get_subparser(_subparsers.choices[verb])
else:
logger.debug(f"Creating _subparsers.choices['{verb}']")
resource._parsers[verb] = _subparsers.choices[verb].add_subparsers()
logger.debug(resource._parsers[verb].choices)
if resource_name not in resource._parsers[verb].choices:
logger.debug(f"Adding resource, {resource}, target for verb, {verb}, as {resource_name}")
resource_parser = resource._parsers[verb].add_parser(resource_name)
resource_parser.set_defaults(_func=func, _klass=resource)
for arg in args:
rargs, rkwargs = arg
logger.debug(f"Adding arg to '{verb} {resource_name}': {rargs}, {rkwargs}")
resource_parser.add_argument(*rargs, **rkwargs)
# Primarily for testing
# Future: Reading from config file or something
def init(prog):
global _root_parser
_root_parser.prog = prog
def run(prog):
global _args
global _subparsers
_args = _root_parser.parse_args()
if hasattr(_args, '_func'):
_args._func(_args._klass, args=_args)
| 31.136986 | 97 | 0.682798 |
d42fbec41b644a0680cc8bbb79c1ced00e52ef6b | 2,705 | py | Python | Sketches/AM/KPIPackage/Kamaelia/Community/AM/Kamaelia/KPIFramework/Examples/KPITextClient.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/AM/KPIPackage/Kamaelia/Community/AM/Kamaelia/KPIFramework/Examples/KPITextClient.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/AM/KPIPackage/Kamaelia/Community/AM/Kamaelia/KPIFramework/Examples/KPITextClient.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
"""
====================================
KPI Client that recieves and prints text data
====================================
How does it work?
-----------------
The KPITextClient establishes TCP connection with the
KPITextServer. Upon successful authentication,
receives session key and uses session key decrypt the
encrypted stream. The decrypted stream is printed by
MyDataSink component
"""
import Axon
from Kamaelia.Util.Graphline import *
from Kamaelia.Community.AM.Kamaelia.KPIFramework.KPI.Server.KPIServer import *
from Kamaelia.Community.AM.Kamaelia.KPIFramework.KPI.Client.KPIClient import KPIClient
from Kamaelia.Community.AM.Kamaelia.KPIFramework.KPI.DB import KPIDBI
from Kamaelia.Internet.TCPClient import TCPClient as _TCPClient
#from Kamaelia.Util.ConsoleEcho import consoleEchoer
class MyDataSink(Axon.Component.component):
""" prints received text
"""
def main(self):
while 1:
yield 1
while self.dataReady("inbox"):
print "datasink received:", self.recv("inbox")
import sys
if __name__=="__main__":
if len(sys.argv) != 4:
print "Usage:", sys.argv[0], "kpiserver port usercfg"
print "default values used: kpiserver=localhost, port=1256 and usercfg = user3"
server = "localhost"
port = 1256
usercfg = "user3"
else:
server = sys.argv[1]
port = int(sys.argv[2])
usercfg = sys.argv[3]
Graphline(
#c=KPIClient(usercfg, consoleEchoer()),
c=KPIClient(usercfg, MyDataSink()),
cc = _TCPClient(server,port),
linkages = {
("c","outbox") : ("cc","inbox"),
("cc","outbox") : ("c","inbox"),
}
).run()
| 34.240506 | 88 | 0.626987 |
2fd51f3302037d3acade6261953998850516870b | 1,621 | py | Python | autodc/components/feature_engineering/transformations/generator/kitchen_sinks.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | autodc/components/feature_engineering/transformations/generator/kitchen_sinks.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | autodc/components/feature_engineering/transformations/generator/kitchen_sinks.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter
from autodc.components.feature_engineering.transformations.base_transformer import *
class KitchenSinks(Transformer):
def __init__(self, gamma=1.0, n_components=100, random_state=1):
super().__init__("kitchen_sinks", 13, random_state=random_state)
self.input_type = [NUMERICAL, DISCRETE, CATEGORICAL]
self.compound_mode = 'only_new'
self.output_type = NUMERICAL
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
@ease_trans
def operate(self, input_datanode, target_fields=None):
X, y = input_datanode.data
X_new = X[:, target_fields]
if not self.model:
import sklearn.kernel_approximation
self.model = sklearn.kernel_approximation.RBFSampler(
gamma=self.gamma, n_components=self.n_components, random_state=self.random_state)
self.model.fit(X_new)
_X = self.model.transform(X_new)
return _X
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
gamma = UniformFloatHyperparameter(
"gamma", 3.0517578125e-05, 8, default_value=1.0, log=True)
n_components = UniformIntegerHyperparameter(
"n_components", 50, 5000, default_value=100, log=True)
cs = ConfigurationSpace()
cs.add_hyperparameters([gamma, n_components])
return cs
| 37.697674 | 97 | 0.702036 |
e3cb10433a516d1c6d2a2ca8431f8d396cd46567 | 249,143 | py | Python | xlsxwriter/worksheet.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/worksheet.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/worksheet.py | adgear/XlsxWriter | 79bcaad28d57ac29038b1c74bccc6d611b7a385e | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Worksheet - A class for writing the Excel XLSX Worksheet file.
#
# Copyright 2013-2019, John McNamara, jmcnamara@cpan.org
#
# Standard packages.
import codecs
import datetime
import os
import re
import sys
import tempfile
from collections import defaultdict
from collections import namedtuple
from math import isnan
from math import isinf
from warnings import warn
# Standard packages in Python 2/3 compatibility mode.
from .compatibility import StringIO
from .compatibility import force_unicode
from .compatibility import num_types, str_types
# Package imports.
from . import xmlwriter
from .format import Format
from .drawing import Drawing
from .shape import Shape
from .xmlwriter import XMLwriter
from .utility import xl_rowcol_to_cell
from .utility import xl_rowcol_to_cell_fast
from .utility import xl_cell_to_rowcol
from .utility import xl_col_to_name
from .utility import xl_range
from .utility import xl_color
from .utility import get_sparkline_style
from .utility import supported_datetime
from .utility import datetime_to_excel_datetime
from .utility import quote_sheetname
from .exceptions import DuplicateTableName
###############################################################################
#
# Decorator functions.
#
###############################################################################
def convert_cell_args(method):
"""
Decorator function to convert A1 notation in cell method calls
to the default row/col notation.
"""
def cell_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
first_arg = args[0]
int(first_arg)
except ValueError:
# First arg isn't an int, convert to A1 notation.
new_args = xl_cell_to_rowcol(first_arg)
args = new_args + args[1:]
return method(self, *args, **kwargs)
return cell_wrapper
def convert_range_args(method):
"""
Decorator function to convert A1 notation in range method calls
to the default row/col notation.
"""
def cell_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
if ':' in args[0]:
cell_1, cell_2 = args[0].split(':')
row_1, col_1 = xl_cell_to_rowcol(cell_1)
row_2, col_2 = xl_cell_to_rowcol(cell_2)
else:
row_1, col_1 = xl_cell_to_rowcol(args[0])
row_2, col_2 = row_1, col_1
new_args = [row_1, col_1, row_2, col_2]
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return cell_wrapper
def convert_column_args(method):
"""
Decorator function to convert A1 notation in columns method calls
to the default row/col notation.
"""
def column_wrapper(self, *args, **kwargs):
try:
# First arg is an int, default to row/col notation.
if len(args):
int(args[0])
except ValueError:
# First arg isn't an int, convert to A1 notation.
cell_1, cell_2 = [col + '1' for col in args[0].split(':')]
_, col_1 = xl_cell_to_rowcol(cell_1)
_, col_2 = xl_cell_to_rowcol(cell_2)
new_args = [col_1, col_2]
new_args.extend(args[1:])
args = new_args
return method(self, *args, **kwargs)
return column_wrapper
###############################################################################
#
# Named tuples used for cell types.
#
###############################################################################
cell_string_tuple = namedtuple('String', 'string, format')
cell_number_tuple = namedtuple('Number', 'number, format')
cell_blank_tuple = namedtuple('Blank', 'format')
cell_boolean_tuple = namedtuple('Boolean', 'boolean, format')
cell_formula_tuple = namedtuple('Formula', 'formula, format, value')
cell_arformula_tuple = namedtuple('ArrayFormula',
'formula, format, value, range')
###############################################################################
#
# Worksheet Class definition.
#
###############################################################################
class Worksheet(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Worksheet file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self):
"""
Constructor.
"""
super(Worksheet, self).__init__()
self.name = None
self.index = None
self.str_table = None
self.palette = None
self.constant_memory = 0
self.tmpdir = None
self.is_chartsheet = False
self.ext_sheets = []
self.fileclosed = 0
self.excel_version = 2007
self.excel2003_style = False
self.xls_rowmax = 1048576
self.xls_colmax = 16384
self.xls_strmax = 32767
self.dim_rowmin = None
self.dim_rowmax = None
self.dim_colmin = None
self.dim_colmax = None
self.colinfo = {}
self.selections = []
self.hidden = 0
self.active = 0
self.tab_color = 0
self.panes = []
self.active_pane = 3
self.selected = 0
self.page_setup_changed = False
self.paper_size = 0
self.orientation = 1
self.print_options_changed = False
self.hcenter = False
self.vcenter = False
self.print_gridlines = False
self.screen_gridlines = True
self.print_headers = False
self.row_col_headers = False
self.header_footer_changed = False
self.header = ''
self.footer = ''
self.header_footer_aligns = True
self.header_footer_scales = True
self.header_images = []
self.footer_images = []
self.header_images_list = []
self.margin_left = 0.7
self.margin_right = 0.7
self.margin_top = 0.75
self.margin_bottom = 0.75
self.margin_header = 0.3
self.margin_footer = 0.3
self.repeat_row_range = ''
self.repeat_col_range = ''
self.print_area_range = ''
self.page_order = 0
self.black_white = 0
self.draft_quality = 0
self.print_comments = 0
self.page_start = 0
self.fit_page = 0
self.fit_width = 0
self.fit_height = 0
self.hbreaks = []
self.vbreaks = []
self.protect_options = {}
self.set_cols = {}
self.set_rows = defaultdict(dict)
self.zoom = 100
self.zoom_scale_normal = 1
self.print_scale = 100
self.is_right_to_left = 0
self.show_zeros = 1
self.leading_zeros = 0
self.outline_row_level = 0
self.outline_col_level = 0
self.outline_style = 0
self.outline_below = 1
self.outline_right = 1
self.outline_on = 1
self.outline_changed = False
self.original_row_height = 15
self.default_row_height = 15
self.default_row_pixels = 20
self.default_col_width = 8.43
self.default_col_pixels = 64
self.default_row_zeroed = 0
self.names = {}
self.write_match = []
self.table = defaultdict(dict)
self.merge = []
self.row_spans = {}
self.has_vml = False
self.has_header_vml = False
self.has_comments = False
self.comments = defaultdict(dict)
self.comments_list = []
self.comments_author = ''
self.comments_visible = 0
self.vml_shape_id = 1024
self.buttons_list = []
self.vml_header_id = 0
self.autofilter_area = ''
self.autofilter_ref = None
self.filter_range = []
self.filter_on = 0
self.filter_cols = {}
self.filter_type = {}
self.col_sizes = {}
self.row_sizes = {}
self.col_formats = {}
self.col_size_changed = False
self.row_size_changed = False
self.last_shape_id = 1
self.rel_count = 0
self.hlink_count = 0
self.hlink_refs = []
self.external_hyper_links = []
self.external_drawing_links = []
self.external_comment_links = []
self.external_vml_links = []
self.external_table_links = []
self.drawing_links = []
self.vml_drawing_links = []
self.charts = []
self.images = []
self.tables = []
self.sparklines = []
self.shapes = []
self.shape_hash = {}
self.drawing = 0
self.drawing_rels = {}
self.drawing_rels_id = 0
self.rstring = ''
self.previous_row = 0
self.validations = []
self.cond_formats = {}
self.data_bars_2010 = []
self.use_data_bars_2010 = False
self.dxf_priority = 1
self.page_view = 0
self.vba_codename = None
self.date_1904 = False
self.hyperlinks = defaultdict(dict)
self.strings_to_numbers = False
self.strings_to_urls = True
self.nan_inf_to_errors = False
self.strings_to_formulas = True
self.default_date_format = None
self.default_url_format = None
self.remove_timezone = False
self.max_url_length = 2079
self.row_data_filename = None
self.row_data_fh = None
self.worksheet_meta = None
self.vml_data_id = None
self.vml_shape_id = None
self.row_data_filename = None
self.row_data_fh = None
self.row_data_fh_closed = False
self.vertical_dpi = 0
self.horizontal_dpi = 0
self.write_handlers = {}
# Utility function for writing different types of strings.
def _write_token_as_string(self, token, row, col, *args):
# Map the data to the appropriate write_*() method.
if token == '':
return self._write_blank(row, col, *args)
if self.strings_to_formulas and token.startswith('='):
return self._write_formula(row, col, *args)
if token.startswith('{=') and token.endswith('}'):
return self._write_formula(row, col, *args)
if ':' in token:
if self.strings_to_urls and re.match('(ftp|http)s?://', token):
return self._write_url(row, col, *args)
elif self.strings_to_urls and re.match('mailto:', token):
return self._write_url(row, col, *args)
elif self.strings_to_urls and re.match('(in|ex)ternal:', token):
return self._write_url(row, col, *args)
if self.strings_to_numbers:
try:
f = float(token)
if (self.nan_inf_to_errors or
(not isnan(f) and not isinf(f))):
return self._write_number(row, col, f, *args[1:])
except ValueError:
# Not a number, write as a string.
pass
return self._write_string(row, col, *args)
else:
# We have a plain string.
return self._write_string(row, col, *args)
@convert_cell_args
def write(self, row, col, *args):
"""
Write data to a worksheet cell by calling the appropriate write_*()
method based on the type of data being passed.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
*args: Args to pass to sub functions.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of called method.
"""
return self._write(row, col, *args)
# Undecorated version of write().
def _write(self, row, col, *args):
# Check the number of args passed.
if not len(args):
raise TypeError("write() takes at least 4 arguments (3 given)")
# The first arg should be the token for all write calls.
token = args[0]
# Write None as a blank cell.
if token is None:
return self._write_blank(row, col, *args)
# Avoid isinstance() for better performance.
token_type = type(token)
# Check for any user defined type handlers with callback functions.
if token_type in self.write_handlers:
write_handler = self.write_handlers[token_type]
function_return = write_handler(self, row, col, *args)
# If the return value is None then the callback has returned
# control to this function and we should continue as
# normal. Otherwise we return the value to the caller and exit.
if function_return is None:
pass
else:
return function_return
# Check for standard Python types.
if token_type is bool:
return self._write_boolean(row, col, *args)
if token_type in num_types:
return self._write_number(row, col, *args)
if token_type is str:
return self._write_token_as_string(token, row, col, *args)
if token_type in (datetime.datetime,
datetime.date,
datetime.time,
datetime.timedelta):
return self._write_datetime(row, col, *args)
if sys.version_info < (3, 0, 0):
if token_type is unicode:
try:
return self._write_token_as_string(str(token),
row, col, *args)
except (UnicodeEncodeError, NameError):
pass
# Resort to isinstance() for subclassed primitives.
# Write number types.
if isinstance(token, num_types):
return self._write_number(row, col, *args)
# Write string types.
if isinstance(token, str_types):
return self._write_token_as_string(token, row, col, *args)
# Write boolean types.
if isinstance(token, bool):
return self._write_boolean(row, col, *args)
# Write datetime objects.
if supported_datetime(token):
return self._write_datetime(row, col, *args)
# We haven't matched a supported type. Try float.
try:
f = float(token)
return self._write_number(row, col, f, *args[1:])
except ValueError:
pass
except TypeError:
raise TypeError("Unsupported type %s in write()" % type(token))
# Finally try string.
try:
str(token)
return self._write_string(row, col, *args)
except ValueError:
raise TypeError("Unsupported type %s in write()" % type(token))
@convert_cell_args
def write_string(self, row, col, string, cell_format=None):
"""
Write a string to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string: Cell data. Str.
format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
"""
return self._write_string(row, col, string, cell_format)
# Undecorated version of write_string().
def _write_string(self, row, col, string, cell_format=None):
str_error = 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Check that the string is < 32767 chars.
if len(string) > self.xls_strmax:
string = string[:self.xls_strmax]
str_error = -2
# Write a shared string or an in-line string in constant_memory mode.
if not self.constant_memory:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_string_tuple(string_index, cell_format)
return str_error
@convert_cell_args
def write_number(self, row, col, number, cell_format=None):
"""
Write a number to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
number: Cell data. Int or float.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_number(row, col, number, cell_format)
# Undecorated version of write_number().
def _write_number(self, row, col, number, cell_format=None):
if isnan(number) or isinf(number):
if self.nan_inf_to_errors:
if isnan(number):
return self._write_formula(row, col, '#NUM!', cell_format,
'#NUM!')
elif isinf(number):
return self._write_formula(row, col, '1/0', cell_format,
'#DIV/0!')
else:
raise TypeError(
"NAN/INF not supported in write_number() "
"without 'nan_inf_to_errors' Workbook() option")
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_number_tuple(number, cell_format)
return 0
@convert_cell_args
def write_blank(self, row, col, blank, cell_format=None):
"""
Write a blank cell with formatting to a worksheet cell. The blank
token is ignored and the format only is written to the cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
blank: Any value. It is ignored.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_blank(row, col, blank, cell_format)
# Undecorated version of write_blank().
def _write_blank(self, row, col, blank, cell_format=None):
# Don't write a blank cell unless it has a format.
if cell_format is None:
return 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_blank_tuple(cell_format)
return 0
@convert_cell_args
def write_formula(self, row, col, formula, cell_format=None, value=0):
"""
Write a formula to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check that row and col are valid and store max and min values.
return self._write_formula(row, col, formula, cell_format, value)
# Undecorated version of write_formula().
def _write_formula(self, row, col, formula, cell_format=None, value=0):
if self._check_dimensions(row, col):
return -1
# Hand off array formulas.
if formula.startswith('{') and formula.endswith('}'):
return self._write_array_formula(row, col, row, col, formula,
cell_format, value)
# Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_formula_tuple(formula, cell_format, value)
return 0
@convert_range_args
def write_array_formula(self, first_row, first_col, last_row, last_col,
formula, cell_format=None, value=0):
"""
Write a formula to a worksheet cell.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_array_formula(first_row, first_col, last_row,
last_col, formula, cell_format, value)
# Undecorated version of write_array_formula().
def _write_array_formula(self, first_row, first_col, last_row, last_col,
formula, cell_format=None, value=0):
# Swap last row/col with first row/col as necessary.
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Check that row and col are valid and store max and min values
if self._check_dimensions(last_row, last_col):
return -1
# Define array range
if first_row == last_row and first_col == last_col:
cell_range = xl_rowcol_to_cell(first_row, first_col)
else:
cell_range = (xl_rowcol_to_cell(first_row, first_col) + ':'
+ xl_rowcol_to_cell(last_row, last_col))
# Remove array formula braces and the leading =.
if formula[0] == '{':
formula = formula[1:]
if formula[0] == '=':
formula = formula[1:]
if formula[-1] == '}':
formula = formula[:-1]
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and first_row > self.previous_row:
self._write_single_row(first_row)
# Store the cell data in the worksheet data table.
self.table[first_row][first_col] = cell_arformula_tuple(formula,
cell_format,
value,
cell_range)
# Pad out the rest of the area with formatted zeroes.
if not self.constant_memory:
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row != first_row or col != first_col:
self._write_number(row, col, 0, cell_format)
return 0
@convert_cell_args
def write_datetime(self, row, col, date, cell_format=None):
"""
Write a date or time to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
date: Date and/or time as a datetime object.
cell_format: A cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_datetime(row, col, date, cell_format)
# Undecorated version of write_datetime().
def _write_datetime(self, row, col, date, cell_format=None):
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Convert datetime to an Excel date.
number = self._convert_date_time(date)
# Add the default date format.
if cell_format is None:
cell_format = self.default_date_format
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_number_tuple(number, cell_format)
return 0
@convert_cell_args
def write_boolean(self, row, col, boolean, cell_format=None):
"""
Write a boolean value to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
boolean: Cell data. bool type.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_boolean(row, col, boolean, cell_format)
# Undecorated version of write_boolean().
def _write_boolean(self, row, col, boolean, cell_format=None):
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
if boolean:
value = 1
else:
value = 0
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_boolean_tuple(value, cell_format)
return 0
# Write a hyperlink. This is comprised of two elements: the displayed
# string and the non-displayed link. The displayed string is the same as
# the link unless an alternative string is specified. The display string
# is written using the write_string() method. Therefore the max characters
# string limit applies.
#
# The hyperlink can be to a http, ftp, mail, internal sheet, or external
# directory urls.
@convert_cell_args
def write_url(self, row, col, url, cell_format=None,
string=None, tip=None):
"""
Write a hyperlink to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
url: Hyperlink url.
format: An optional cell Format object.
string: An optional display string for the hyperlink.
tip: An optional tooltip.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32767 characters.
-3: URL longer than Excel limit of 255 characters.
-4: Exceeds Excel limit of 65,530 urls per worksheet.
"""
return self._write_url(row, col, url, cell_format, string, tip)
# Undecorated version of write_url().
def _write_url(self, row, col, url, cell_format=None,
string=None, tip=None):
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Set the displayed string to the URL unless defined by the user.
if string is None:
string = url
# Default to external link type such as 'http://' or 'external:'.
link_type = 1
# Remove the URI scheme from internal links.
if url.startswith('internal:'):
url = url.replace('internal:', '')
string = string.replace('internal:', '')
link_type = 2
# Remove the URI scheme from external links and change the directory
# separator from Unix to Dos.
external = False
if url.startswith('external:'):
url = url.replace('external:', '')
url = url.replace('/', '\\')
string = string.replace('external:', '')
string = string.replace('/', '\\')
external = True
# Strip the mailto header.
string = string.replace('mailto:', '')
# Check that the string is < 32767 chars
str_error = 0
if len(string) > self.xls_strmax:
warn("Ignoring URL since it exceeds Excel's string limit of "
"32767 characters")
return -2
# Copy string for use in hyperlink elements.
url_str = string
# External links to URLs and to other Excel workbooks have slightly
# different characteristics that we have to account for.
if link_type == 1:
# Split url into the link and optional anchor/location.
if '#' in url:
url, url_str = url.split('#', 1)
else:
url_str = None
url = self._escape_url(url)
if url_str is not None and not external:
url_str = self._escape_url(url_str)
# Add the file:/// URI to the url for Windows style "C:/" link and
# Network shares.
if re.match(r'\w:', url) or re.match(r'\\', url):
url = 'file:///' + url
# Convert a .\dir\file.xlsx link to dir\file.xlsx.
url = re.sub(r'^\.\\', '', url)
# Excel limits the escaped URL and location/anchor to 255 characters.
tmp_url_str = url_str or ''
max_url = self.max_url_length
if len(url) > max_url or len(tmp_url_str) > max_url:
warn("Ignoring URL '%s' with link or location/anchor > %d "
"characters since it exceeds Excel's limit for URLS" %
(force_unicode(url), max_url))
return -3
# Check the limit of URLS per worksheet.
self.hlink_count += 1
if self.hlink_count > 65530:
warn("Ignoring URL '%s' since it exceeds Excel's limit of "
"65,530 URLS per worksheet." % force_unicode(url))
return -4
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Add the default URL format.
if cell_format is None:
cell_format = self.default_url_format
# Write the hyperlink string.
self._write_string(row, col, string, cell_format)
# Store the hyperlink data in a separate structure.
self.hyperlinks[row][col] = {
'link_type': link_type,
'url': url,
'str': url_str,
'tip': tip}
return str_error
@convert_cell_args
def write_rich_string(self, row, col, *args):
"""
Write a "rich" string with multiple formats to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string_parts: String and format pairs.
cell_format: Optional Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
-3: 2 consecutive formats used.
-4: Empty string used.
-5: Insufficient parameters.
"""
return self._write_rich_string(row, col, *args)
# Undecorated version of write_rich_string().
def _write_rich_string(self, row, col, *args):
tokens = list(args)
cell_format = None
str_length = 0
string_index = 0
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# If the last arg is a format we use it as the cell format.
if isinstance(tokens[-1], Format):
cell_format = tokens.pop()
# Create a temp XMLWriter object and use it to write the rich string
# XML to a string.
fh = StringIO()
self.rstring = XMLwriter()
self.rstring._set_filehandle(fh)
# Create a temp format with the default font for unformatted fragments.
default = Format()
# Convert list of format, string tokens to pairs of (format, string)
# except for the first string fragment which doesn't require a default
# formatting run. Use the default for strings without a leading format.
fragments = []
previous = 'format'
pos = 0
if len(tokens) <= 2:
warn("You must specify more then 2 format/fragments for rich "
"strings. Ignoring input in write_rich_string().")
return -5
for token in tokens:
if not isinstance(token, Format):
# Token is a string.
if previous != 'format':
# If previous token wasn't a format add one before string.
fragments.append(default)
fragments.append(token)
else:
# If previous token was a format just add the string.
fragments.append(token)
if token == '':
warn("Excel doesn't allow empty strings in rich strings. "
"Ignoring input in write_rich_string().")
return -4
# Keep track of actual string str_length.
str_length += len(token)
previous = 'string'
else:
# Can't allow 2 formats in a row.
if previous == 'format' and pos > 0:
warn("Excel doesn't allow 2 consecutive formats in rich "
"strings. Ignoring input in write_rich_string().")
return -3
# Token is a format object. Add it to the fragment list.
fragments.append(token)
previous = 'format'
pos += 1
# If the first token is a string start the <r> element.
if not isinstance(fragments[0], Format):
self.rstring._xml_start_tag('r')
# Write the XML elements for the $format $string fragments.
for token in fragments:
if isinstance(token, Format):
# Write the font run.
self.rstring._xml_start_tag('r')
self._write_font(token)
else:
# Write the string fragment part, with whitespace handling.
attributes = []
if re.search(r'^\s', token) or re.search(r'\s$', token):
attributes.append(('xml:space', 'preserve'))
self.rstring._xml_data_element('t', token, attributes)
self.rstring._xml_end_tag('r')
# Read the in-memory string.
string = self.rstring.fh.getvalue()
# Check that the string is < 32767 chars.
if str_length > self.xls_strmax:
return -2
# Write a shared string or an in-line string in constant_memory mode.
if not self.constant_memory:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = cell_string_tuple(string_index, cell_format)
return 0
def add_write_handler(self, user_type, user_function):
"""
Add a callback function to the write() method to handle user defined
types.
Args:
user_type: The user type() to match on.
user_function: The user defined function to write the type data.
Returns:
Nothing.
"""
self.write_handlers[user_type] = user_function
@convert_cell_args
def write_row(self, row, col, data, cell_format=None):
"""
Write a row of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
col += 1
return 0
@convert_cell_args
def write_column(self, row, col, data, cell_format=None):
"""
Write a column of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
row += 1
return 0
@convert_cell_args
def insert_image(self, row, col, filename, options=None):
"""
Insert an image with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
filename: Path and filename for image in PNG, JPG or BMP format.
options: Position, scale, url and data stream of the image.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert image at (%d, %d).' % (row, col))
return -1
if options is None:
options = {}
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
url = options.get('url', None)
tip = options.get('tip', None)
anchor = options.get('object_position', 2)
image_data = options.get('image_data', None)
# For backward compatibility with older parameter name.
anchor = options.get('positioning', anchor)
if not image_data and not os.path.exists(filename):
warn("Image file '%s' not found." % force_unicode(filename))
return -1
self.images.append([row, col, filename, x_offset, y_offset,
x_scale, y_scale, url, tip, anchor, image_data])
@convert_cell_args
def insert_textbox(self, row, col, text, options=None):
"""
Insert an textbox with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
text: The text for the textbox.
options: Textbox options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert textbox at (%d, %d).' % (row, col))
return -1
if text is None:
text = ''
if options is None:
options = {}
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
anchor = options.get('object_position', 1)
self.shapes.append([row, col, x_offset, y_offset,
x_scale, y_scale, text, anchor, options])
@convert_cell_args
def insert_chart(self, row, col, chart, options=None):
"""
Insert an chart with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
chart: Chart object.
options: Position and scale of the chart.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert chart at (%d, %d).' % (row, col))
return -1
if options is None:
options = {}
# Ensure a chart isn't inserted more than once.
if (chart.already_inserted or chart.combined
and chart.combined.already_inserted):
warn('Chart cannot be inserted in a worksheet more than once.')
return
else:
chart.already_inserted = True
if chart.combined:
chart.combined.already_inserted = True
x_offset = options.get('x_offset', 0)
y_offset = options.get('y_offset', 0)
x_scale = options.get('x_scale', 1)
y_scale = options.get('y_scale', 1)
anchor = options.get('object_position', 1)
# Allow Chart to override the scale and offset.
if chart.x_scale != 1:
x_scale = chart.x_scale
if chart.y_scale != 1:
y_scale = chart.y_scale
if chart.x_offset:
x_offset = chart.x_offset
if chart.y_offset:
y_offset = chart.y_offset
self.charts.append([row, col, chart,
x_offset, y_offset,
x_scale, y_scale,
anchor])
@convert_cell_args
def write_comment(self, row, col, comment, options=None):
"""
Write a comment to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
comment: Cell comment. Str.
options: Comment formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32k characters.
"""
if options is None:
options = {}
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Check that the comment string is < 32767 chars.
if len(comment) > self.xls_strmax:
return -2
self.has_vml = 1
self.has_comments = 1
# Store the options of the cell comment, to process on file close.
self.comments[row][col] = [row, col, comment, options]
def show_comments(self):
"""
Make any comments in the worksheet visible.
Args:
None.
Returns:
Nothing.
"""
self.comments_visible = 1
def set_comments_author(self, author):
"""
Set the default author of the cell comments.
Args:
author: Comment author name. String.
Returns:
Nothing.
"""
self.comments_author = author
def get_name(self):
"""
Retrieve the worksheet name.
Args:
None.
Returns:
Nothing.
"""
# There is no set_name() method. Name must be set in add_worksheet().
return self.name
def activate(self):
"""
Set this worksheet as the active worksheet, i.e. the worksheet that is
displayed when the workbook is opened. Also set it as selected.
Note: An active worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0
self.selected = 1
self.worksheet_meta.activesheet = self.index
def select(self):
"""
Set current worksheet as a selected worksheet, i.e. the worksheet
has its tab highlighted.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.selected = 1
self.hidden = 0
def hide(self):
"""
Hide the current worksheet.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 1
# A hidden worksheet shouldn't be active or selected.
self.selected = 0
# TODO. Should add a check to see if the sheet is the global
# activesheet or firstsheet and reset them.
def set_first_sheet(self):
"""
Set current worksheet as the first visible sheet. This is necessary
when there are a large number of worksheets and the activated
worksheet is not visible on the screen.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0 # Active worksheet can't be hidden.
self.worksheet_meta.firstsheet = self.index
@convert_column_args
def set_column(self, first_col, last_col, width=None, cell_format=None,
options=None):
"""
Set the width, and other properties of a single column or a
range of columns.
Args:
first_col: First column (zero-indexed).
last_col: Last column (zero-indexed). Can be same as first_col.
width: Column width. (optional).
cell_format: Column cell_format. (optional).
options: Dict of options such as hidden and level.
Returns:
0: Success.
-1: Column number is out of worksheet bounds.
"""
if options is None:
options = {}
# Ensure 2nd col is larger than first.
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Don't modify the row dimensions when checking the columns.
ignore_row = True
# Set optional column values.
hidden = options.get('hidden', False)
collapsed = options.get('collapsed', False)
level = options.get('level', 0)
# Store the column dimension only in some conditions.
if cell_format or (width and hidden):
ignore_col = False
else:
ignore_col = True
# Check that each column is valid and store the max and min values.
if self._check_dimensions(0, last_col, ignore_row, ignore_col):
return -1
if self._check_dimensions(0, first_col, ignore_row, ignore_col):
return -1
# Set the limits for the outline levels (0 <= x <= 7).
if level < 0:
level = 0
if level > 7:
level = 7
if level > self.outline_col_level:
self.outline_col_level = level
# Store the column data. Padded for sorting.
self.colinfo["%05d" % first_col] = [first_col, last_col, width,
cell_format, hidden, level,
collapsed]
# Store the column change to allow optimizations.
self.col_size_changed = True
if width is None:
width = self.default_col_width
# Store the col sizes for use when calculating image vertices taking
# hidden columns into account. Also store the column formats.
for col in range(first_col, last_col + 1):
self.col_sizes[col] = [width, hidden]
if cell_format:
self.col_formats[col] = cell_format
return 0
def set_row(self, row, height=None, cell_format=None, options=None):
"""
Set the width, and other properties of a row.
Args:
row: Row number (zero-indexed).
height: Row width. (optional).
cell_format: Row cell_format. (optional).
options: Dict of options such as hidden, level and collapsed.
Returns:
0: Success.
-1: Row number is out of worksheet bounds.
"""
if options is None:
options = {}
# Use minimum col in _check_dimensions().
if self.dim_colmin is not None:
min_col = self.dim_colmin
else:
min_col = 0
# Check that row is valid.
if self._check_dimensions(row, min_col):
return -1
if height is None:
height = self.default_row_height
# Set optional row values.
hidden = options.get('hidden', False)
collapsed = options.get('collapsed', False)
level = options.get('level', 0)
# If the height is 0 the row is hidden and the height is the default.
if height == 0:
hidden = 1
height = self.default_row_height
# Set the limits for the outline levels (0 <= x <= 7).
if level < 0:
level = 0
if level > 7:
level = 7
if level > self.outline_row_level:
self.outline_row_level = level
# Store the row properties.
self.set_rows[row] = [height, cell_format, hidden, level, collapsed]
# Store the row change to allow optimizations.
self.row_size_changed = True
# Store the row sizes for use when calculating image vertices.
self.row_sizes[row] = [height, hidden]
def set_default_row(self, height=None, hide_unused_rows=False):
"""
Set the default row properties.
Args:
height: Default height. Optional, defaults to 15.
hide_unused_rows: Hide unused rows. Optional, defaults to False.
Returns:
Nothing.
"""
if height is None:
height = self.default_row_height
if height != self.original_row_height:
# Store the row change to allow optimizations.
self.row_size_changed = True
self.default_row_height = height
if hide_unused_rows:
self.default_row_zeroed = 1
@convert_range_args
def merge_range(self, first_row, first_col, last_row, last_col,
data, cell_format=None):
"""
Merge a range of cells.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
data: Cell data.
cell_format: Cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of write().
"""
# Merge a range of cells. The first cell should contain the data and
# the others should be blank. All cells should have the same format.
# Excel doesn't allow a single cell to be merged
if first_row == last_row and first_col == last_col:
warn("Can't merge single cell")
return
# Swap last row/col with first row/col as necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Check that column number is valid and store the max value
if self._check_dimensions(last_row, last_col) == -1:
return
# Store the merge range.
self.merge.append([first_row, first_col, last_row, last_col])
# Write the first cell
self._write(first_row, first_col, data, cell_format)
# Pad out the rest of the area with formatted blank cells.
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
continue
self._write_blank(row, col, '', cell_format)
@convert_range_args
def autofilter(self, first_row, first_col, last_row, last_col):
"""
Set the autofilter area in the worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
Nothing.
"""
# Reverse max and min values if necessary.
if last_row < first_row:
(first_row, last_row) = (last_row, first_row)
if last_col < first_col:
(first_col, last_col) = (last_col, first_col)
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col,
last_row, last_col)
ref = xl_range(first_row, first_col, last_row, last_col)
self.autofilter_area = area
self.autofilter_ref = ref
self.filter_range = [first_col, last_col]
def filter_column(self, col, criteria):
"""
Set the column filter criteria.
Args:
col: Filter column (zero-indexed).
criteria: Filter criteria.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + '1')
if col >= self.xls_colmax:
warn("Invalid column '%s'" % col_letter)
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn("Column '%d' outside autofilter() column range (%d, %d)"
% (col, col_first, col_last))
return
tokens = self._extract_filter_tokens(criteria)
if not (len(tokens) == 3 or len(tokens) == 7):
warn("Incorrect number of tokens in criteria '%s'" % criteria)
tokens = self._parse_filter_expression(criteria, tokens)
# Excel handles single or double custom filters as default filters.
# We need to check for them and handle them accordingly.
if len(tokens) == 2 and tokens[0] == 2:
# Single equality.
self.filter_column_list(col, [tokens[1]])
elif (len(tokens) == 5 and tokens[0] == 2 and tokens[2] == 1
and tokens[3] == 2):
# Double equality with "or" operator.
self.filter_column_list(col, [tokens[1], tokens[4]])
else:
# Non default custom filter.
self.filter_cols[col] = tokens
self.filter_type[col] = 0
self.filter_on = 1
def filter_column_list(self, col, filters):
"""
Set the column filter criteria in Excel 2007 list style.
Args:
col: Filter column (zero-indexed).
filters: List of filter criteria to match.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + '1')
if col >= self.xls_colmax:
warn("Invalid column '%s'" % col_letter)
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn("Column '%d' outside autofilter() column range "
"(%d,%d)" % (col, col_first, col_last))
return
self.filter_cols[col] = filters
self.filter_type[col] = 1
self.filter_on = 1
@convert_range_args
def data_validation(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add a data validation to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Data validation options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameters = {
'validate': True,
'criteria': True,
'value': True,
'source': True,
'minimum': True,
'maximum': True,
'ignore_blank': True,
'dropdown': True,
'show_input': True,
'input_title': True,
'input_message': True,
'show_error': True,
'error_title': True,
'error_message': True,
'error_type': True,
'other_cells': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in data_validation()" % param_key)
return -2
# Map alternative parameter names 'source' or 'minimum' to 'value'.
if 'source' in options:
options['value'] = options['source']
if 'minimum' in options:
options['value'] = options['minimum']
# 'validate' is a required parameter.
if 'validate' not in options:
warn("Parameter 'validate' is required in data_validation()")
return -2
# List of valid validation types.
valid_types = {
'any': 'none',
'any value': 'none',
'whole number': 'whole',
'whole': 'whole',
'integer': 'whole',
'decimal': 'decimal',
'list': 'list',
'date': 'date',
'time': 'time',
'text length': 'textLength',
'length': 'textLength',
'custom': 'custom',
}
# Check for valid validation types.
if not options['validate'] in valid_types:
warn("Unknown validation type '%s' for parameter "
"'validate' in data_validation()" % options['validate'])
return -2
else:
options['validate'] = valid_types[options['validate']]
# No action is required for validation type 'any' if there are no
# input messages to display.
if (options['validate'] == 'none'
and options.get('input_title') is None
and options.get('input_message') is None):
return -2
# The any, list and custom validations don't have a criteria so we use
# a default of 'between'.
if (options['validate'] == 'none'
or options['validate'] == 'list'
or options['validate'] == 'custom'):
options['criteria'] = 'between'
options['maximum'] = None
# 'criteria' is a required parameter.
if 'criteria' not in options:
warn("Parameter 'criteria' is required in data_validation()")
return -2
# Valid criteria types.
criteria_types = {
'between': 'between',
'not between': 'notBetween',
'equal to': 'equal',
'=': 'equal',
'==': 'equal',
'not equal to': 'notEqual',
'!=': 'notEqual',
'<>': 'notEqual',
'greater than': 'greaterThan',
'>': 'greaterThan',
'less than': 'lessThan',
'<': 'lessThan',
'greater than or equal to': 'greaterThanOrEqual',
'>=': 'greaterThanOrEqual',
'less than or equal to': 'lessThanOrEqual',
'<=': 'lessThanOrEqual',
}
# Check for valid criteria types.
if not options['criteria'] in criteria_types:
warn("Unknown criteria type '%s' for parameter "
"'criteria' in data_validation()" % options['criteria'])
return -2
else:
options['criteria'] = criteria_types[options['criteria']]
# 'Between' and 'Not between' criteria require 2 values.
if (options['criteria'] == 'between' or
options['criteria'] == 'notBetween'):
if 'maximum' not in options:
warn("Parameter 'maximum' is required in data_validation() "
"when using 'between' or 'not between' criteria")
return -2
else:
options['maximum'] = None
# Valid error dialog types.
error_types = {
'stop': 0,
'warning': 1,
'information': 2,
}
# Check for valid error dialog types.
if 'error_type' not in options:
options['error_type'] = 0
elif not options['error_type'] in error_types:
warn("Unknown criteria type '%s' for parameter 'error_type' "
"in data_validation()" % options['error_type'])
return -2
else:
options['error_type'] = error_types[options['error_type']]
# Convert date/times value if required.
if options['validate'] == 'date' or options['validate'] == 'time':
if options['value']:
if supported_datetime(options['value']):
date_time = self._convert_date_time(options['value'])
# Format date number to the same precision as Excel.
options['value'] = "%.16g" % date_time
if options['maximum']:
if supported_datetime(options['maximum']):
date_time = self._convert_date_time(options['maximum'])
options['maximum'] = "%.16g" % date_time
# Check that the input title doesn't exceed the maximum length.
if options.get('input_title') and len(options['input_title']) > 32:
warn("Length of input title '%s' exceeds Excel's limit of 32"
% force_unicode(options['input_title']))
return -2
# Check that the error title doesn't exceed the maximum length.
if options.get('error_title') and len(options['error_title']) > 32:
warn("Length of error title '%s' exceeds Excel's limit of 32"
% force_unicode(options['error_title']))
return -2
# Check that the input message doesn't exceed the maximum length.
if (options.get('input_message')
and len(options['input_message']) > 255):
warn("Length of input message '%s' exceeds Excel's limit of 255"
% force_unicode(options['input_message']))
return -2
# Check that the error message doesn't exceed the maximum length.
if (options.get('error_message')
and len(options['error_message']) > 255):
warn("Length of error message '%s' exceeds Excel's limit of 255"
% force_unicode(options['error_message']))
return -2
# Check that the input list doesn't exceed the maximum length.
if options['validate'] == 'list' and type(options['value']) is list:
formula = self._csv_join(*options['value'])
if len(formula) > 255:
warn("Length of list items '%s' exceeds Excel's limit of "
"255, use a formula range instead"
% force_unicode(formula))
return -2
# Set some defaults if they haven't been defined by the user.
if 'ignore_blank' not in options:
options['ignore_blank'] = 1
if 'dropdown' not in options:
options['dropdown'] = 1
if 'show_input' not in options:
options['show_input'] = 1
if 'show_error' not in options:
options['show_error'] = 1
# These are the cells to which the validation is applied.
options['cells'] = [[first_row, first_col, last_row, last_col]]
# A (for now) undocumented parameter to pass additional cell ranges.
if 'other_cells' in options:
options['cells'].extend(options['other_cells'])
# Store the validation information until we close the worksheet.
self.validations.append(options)
@convert_range_args
def conditional_format(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add a conditional format to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Conditional format options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameter = {
'type': True,
'format': True,
'criteria': True,
'value': True,
'minimum': True,
'maximum': True,
'stop_if_true': True,
'min_type': True,
'mid_type': True,
'max_type': True,
'min_value': True,
'mid_value': True,
'max_value': True,
'min_color': True,
'mid_color': True,
'max_color': True,
'min_length': True,
'max_length': True,
'multi_range': True,
'bar_color': True,
'bar_negative_color': True,
'bar_negative_color_same': True,
'bar_solid': True,
'bar_border_color': True,
'bar_negative_border_color': True,
'bar_negative_border_color_same': True,
'bar_no_border': True,
'bar_direction': True,
'bar_axis_position': True,
'bar_axis_color': True,
'bar_only': True,
'data_bar_2010': True,
'icon_style': True,
'reverse_icons': True,
'icons_only': True,
'icons': True}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn("Unknown parameter '%s' in conditional_format()" %
param_key)
return -2
# 'type' is a required parameter.
if 'type' not in options:
warn("Parameter 'type' is required in conditional_format()")
return -2
# Valid types.
valid_type = {
'cell': 'cellIs',
'date': 'date',
'time': 'time',
'average': 'aboveAverage',
'duplicate': 'duplicateValues',
'unique': 'uniqueValues',
'top': 'top10',
'bottom': 'top10',
'text': 'text',
'time_period': 'timePeriod',
'blanks': 'containsBlanks',
'no_blanks': 'notContainsBlanks',
'errors': 'containsErrors',
'no_errors': 'notContainsErrors',
'2_color_scale': '2_color_scale',
'3_color_scale': '3_color_scale',
'data_bar': 'dataBar',
'formula': 'expression',
'icon_set': 'iconSet'}
# Check for valid types.
if options['type'] not in valid_type:
warn("Unknown value '%s' for parameter 'type' "
"in conditional_format()" % options['type'])
return -2
else:
if options['type'] == 'bottom':
options['direction'] = 'bottom'
options['type'] = valid_type[options['type']]
# Valid criteria types.
criteria_type = {
'between': 'between',
'not between': 'notBetween',
'equal to': 'equal',
'=': 'equal',
'==': 'equal',
'not equal to': 'notEqual',
'!=': 'notEqual',
'<>': 'notEqual',
'greater than': 'greaterThan',
'>': 'greaterThan',
'less than': 'lessThan',
'<': 'lessThan',
'greater than or equal to': 'greaterThanOrEqual',
'>=': 'greaterThanOrEqual',
'less than or equal to': 'lessThanOrEqual',
'<=': 'lessThanOrEqual',
'containing': 'containsText',
'not containing': 'notContains',
'begins with': 'beginsWith',
'ends with': 'endsWith',
'yesterday': 'yesterday',
'today': 'today',
'last 7 days': 'last7Days',
'last week': 'lastWeek',
'this week': 'thisWeek',
'next week': 'nextWeek',
'last month': 'lastMonth',
'this month': 'thisMonth',
'next month': 'nextMonth',
# For legacy, but incorrect, support.
'continue week': 'nextWeek',
'continue month': 'nextMonth'}
# Check for valid criteria types.
if 'criteria' in options and options['criteria'] in criteria_type:
options['criteria'] = criteria_type[options['criteria']]
# Convert date/times value if required.
if options['type'] == 'date' or options['type'] == 'time':
options['type'] = 'cellIs'
if 'value' in options:
if not supported_datetime(options['value']):
warn("Conditional format 'value' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['value'])
# Format date number to the same precision as Excel.
options['value'] = "%.16g" % date_time
if 'minimum' in options:
if not supported_datetime(options['minimum']):
warn("Conditional format 'minimum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['minimum'])
options['minimum'] = "%.16g" % date_time
if 'maximum' in options:
if not supported_datetime(options['maximum']):
warn("Conditional format 'maximum' must be a "
"datetime object.")
return -2
else:
date_time = self._convert_date_time(options['maximum'])
options['maximum'] = "%.16g" % date_time
# Valid icon styles.
valid_icons = {
"3_arrows": "3Arrows", # 1
"3_flags": "3Flags", # 2
"3_traffic_lights_rimmed": "3TrafficLights2", # 3
"3_symbols_circled": "3Symbols", # 4
"4_arrows": "4Arrows", # 5
"4_red_to_black": "4RedToBlack", # 6
"4_traffic_lights": "4TrafficLights", # 7
"5_arrows_gray": "5ArrowsGray", # 8
"5_quarters": "5Quarters", # 9
"3_arrows_gray": "3ArrowsGray", # 10
"3_traffic_lights": "3TrafficLights", # 11
"3_signs": "3Signs", # 12
"3_symbols": "3Symbols2", # 13
"4_arrows_gray": "4ArrowsGray", # 14
"4_ratings": "4Rating", # 15
"5_arrows": "5Arrows", # 16
"5_ratings": "5Rating"} # 17
# Set the icon set properties.
if options['type'] == 'iconSet':
# An icon_set must have an icon style.
if not options.get('icon_style'):
warn("The 'icon_style' parameter must be specified when "
"'type' == 'icon_set' in conditional_format()")
return -3
# Check for valid icon styles.
if options['icon_style'] not in valid_icons:
warn("Unknown icon_style '%s' in conditional_format()" %
options['icon_style'])
return -2
else:
options['icon_style'] = valid_icons[options['icon_style']]
# Set the number of icons for the icon style.
options['total_icons'] = 3
if options['icon_style'].startswith('4'):
options['total_icons'] = 4
elif options['icon_style'].startswith('5'):
options['total_icons'] = 5
options['icons'] = self._set_icon_props(options.get('total_icons'),
options.get('icons'))
# Swap last row/col for first row/col as necessary
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Set the formatting range.
# If the first and last cell are the same write a single cell.
if first_row == last_row and first_col == last_col:
cell_range = xl_rowcol_to_cell(first_row, first_col)
start_cell = cell_range
else:
cell_range = xl_range(first_row, first_col, last_row, last_col)
start_cell = xl_rowcol_to_cell(first_row, first_col)
# Override with user defined multiple range if provided.
if 'multi_range' in options:
cell_range = options['multi_range']
cell_range = cell_range.replace('$', '')
# Get the dxf format index.
if 'format' in options and options['format']:
options['format'] = options['format']._get_dxf_index()
# Set the priority based on the order of adding.
options['priority'] = self.dxf_priority
self.dxf_priority += 1
# Check for 2010 style data_bar parameters.
if (self.use_data_bars_2010 or
options.get('data_bar_2010') or
options.get('bar_solid') or
options.get('bar_border_color') or
options.get('bar_negative_color') or
options.get('bar_negative_color_same') or
options.get('bar_negative_border_color') or
options.get('bar_negative_border_color_same') or
options.get('bar_no_border') or
options.get('bar_axis_position') or
options.get('bar_axis_color') or
options.get('bar_direction')):
options['is_data_bar_2010'] = True
# Special handling of text criteria.
if options['type'] == 'text':
if options['criteria'] == 'containsText':
options['type'] = 'containsText'
options['formula'] = ('NOT(ISERROR(SEARCH("%s",%s)))'
% (options['value'], start_cell))
elif options['criteria'] == 'notContains':
options['type'] = 'notContainsText'
options['formula'] = ('ISERROR(SEARCH("%s",%s))'
% (options['value'], start_cell))
elif options['criteria'] == 'beginsWith':
options['type'] = 'beginsWith'
options['formula'] = ('LEFT(%s,%d)="%s"'
% (start_cell,
len(options['value']),
options['value']))
elif options['criteria'] == 'endsWith':
options['type'] = 'endsWith'
options['formula'] = ('RIGHT(%s,%d)="%s"'
% (start_cell,
len(options['value']),
options['value']))
else:
warn("Invalid text criteria '%s' "
"in conditional_format()" % options['criteria'])
# Special handling of time time_period criteria.
if options['type'] == 'timePeriod':
if options['criteria'] == 'yesterday':
options['formula'] = 'FLOOR(%s,1)=TODAY()-1' % start_cell
elif options['criteria'] == 'today':
options['formula'] = 'FLOOR(%s,1)=TODAY()' % start_cell
elif options['criteria'] == 'tomorrow':
options['formula'] = 'FLOOR(%s,1)=TODAY()+1' % start_cell
elif options['criteria'] == 'last7Days':
options['formula'] = \
('AND(TODAY()-FLOOR(%s,1)<=6,FLOOR(%s,1)<=TODAY())' %
(start_cell, start_cell))
elif options['criteria'] == 'lastWeek':
options['formula'] = \
('AND(TODAY()-ROUNDDOWN(%s,0)>=(WEEKDAY(TODAY())),'
'TODAY()-ROUNDDOWN(%s,0)<(WEEKDAY(TODAY())+7))' %
(start_cell, start_cell))
elif options['criteria'] == 'thisWeek':
options['formula'] = \
('AND(TODAY()-ROUNDDOWN(%s,0)<=WEEKDAY(TODAY())-1,'
'ROUNDDOWN(%s,0)-TODAY()<=7-WEEKDAY(TODAY()))' %
(start_cell, start_cell))
elif options['criteria'] == 'nextWeek':
options['formula'] = \
('AND(ROUNDDOWN(%s,0)-TODAY()>(7-WEEKDAY(TODAY())),'
'ROUNDDOWN(%s,0)-TODAY()<(15-WEEKDAY(TODAY())))' %
(start_cell, start_cell))
elif options['criteria'] == 'lastMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY())-1,OR(YEAR(%s)=YEAR('
'TODAY()),AND(MONTH(%s)=1,YEAR(A1)=YEAR(TODAY())-1)))' %
(start_cell, start_cell, start_cell))
elif options['criteria'] == 'thisMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY()),YEAR(%s)=YEAR(TODAY()))' %
(start_cell, start_cell))
elif options['criteria'] == 'nextMonth':
options['formula'] = \
('AND(MONTH(%s)=MONTH(TODAY())+1,OR(YEAR(%s)=YEAR('
'TODAY()),AND(MONTH(%s)=12,YEAR(%s)=YEAR(TODAY())+1)))' %
(start_cell, start_cell, start_cell, start_cell))
else:
warn("Invalid time_period criteria '%s' "
"in conditional_format()" % options['criteria'])
# Special handling of blanks/error types.
if options['type'] == 'containsBlanks':
options['formula'] = 'LEN(TRIM(%s))=0' % start_cell
if options['type'] == 'notContainsBlanks':
options['formula'] = 'LEN(TRIM(%s))>0' % start_cell
if options['type'] == 'containsErrors':
options['formula'] = 'ISERROR(%s)' % start_cell
if options['type'] == 'notContainsErrors':
options['formula'] = 'NOT(ISERROR(%s))' % start_cell
# Special handling for 2 color scale.
if options['type'] == '2_color_scale':
options['type'] = 'colorScale'
# Color scales don't use any additional formatting.
options['format'] = None
# Turn off 3 color parameters.
options['mid_type'] = None
options['mid_color'] = None
options.setdefault('min_type', 'min')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('min_color', '#FF7128')
options.setdefault('max_color', '#FFEF9C')
options['min_color'] = xl_color(options['min_color'])
options['max_color'] = xl_color(options['max_color'])
# Special handling for 3 color scale.
if options['type'] == '3_color_scale':
options['type'] = 'colorScale'
# Color scales don't use any additional formatting.
options['format'] = None
options.setdefault('min_type', 'min')
options.setdefault('mid_type', 'percentile')
options.setdefault('max_type', 'max')
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('min_color', '#F8696B')
options.setdefault('mid_color', '#FFEB84')
options.setdefault('max_color', '#63BE7B')
options['min_color'] = xl_color(options['min_color'])
options['mid_color'] = xl_color(options['mid_color'])
options['max_color'] = xl_color(options['max_color'])
# Set a default mid value.
if 'mid_value' not in options:
options['mid_value'] = 50
# Special handling for data bar.
if options['type'] == 'dataBar':
# Color scales don't use any additional formatting.
options['format'] = None
if not options.get('min_type'):
options['min_type'] = 'min'
options['x14_min_type'] = 'autoMin'
else:
options['x14_min_type'] = options['min_type']
if not options.get('max_type'):
options['max_type'] = 'max'
options['x14_max_type'] = 'autoMax'
else:
options['x14_max_type'] = options['max_type']
options.setdefault('min_value', 0)
options.setdefault('max_value', 0)
options.setdefault('bar_color', '#638EC6')
options.setdefault('bar_border_color', options['bar_color'])
options.setdefault('bar_only', False)
options.setdefault('bar_no_border', False)
options.setdefault('bar_solid', False)
options.setdefault('bar_direction', '')
options.setdefault('bar_negative_color', '#FF0000')
options.setdefault('bar_negative_border_color', '#FF0000')
options.setdefault('bar_negative_color_same', False)
options.setdefault('bar_negative_border_color_same', False)
options.setdefault('bar_axis_position', '')
options.setdefault('bar_axis_color', '#000000')
options['bar_color'] = xl_color(options['bar_color'])
options['bar_border_color'] = xl_color(options['bar_border_color'])
options['bar_axis_color'] = xl_color(options['bar_axis_color'])
options['bar_negative_color'] = \
xl_color(options['bar_negative_color'])
options['bar_negative_border_color'] = \
xl_color(options['bar_negative_border_color'])
# Adjust for 2010 style data_bar parameters.
if options.get('is_data_bar_2010'):
self.excel_version = 2010
if options['min_type'] == 'min' and options['min_value'] == 0:
options['min_value'] = None
if options['max_type'] == 'max' and options['max_value'] == 0:
options['max_value'] = None
options['range'] = cell_range
# Strip the leading = from formulas.
try:
options['min_value'] = options['min_value'].lstrip('=')
except (KeyError, AttributeError):
pass
try:
options['mid_value'] = options['mid_value'].lstrip('=')
except (KeyError, AttributeError):
pass
try:
options['max_value'] = options['max_value'].lstrip('=')
except (KeyError, AttributeError):
pass
# Store the conditional format until we close the worksheet.
if cell_range in self.cond_formats:
self.cond_formats[cell_range].append(options)
else:
self.cond_formats[cell_range] = [options]
@convert_range_args
def add_table(self, first_row, first_col, last_row, last_col,
options=None):
"""
Add an Excel table to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Table format options. (Optional)
Returns:
0: Success.
-1: Not supported in constant_memory mode.
-2: Row or column is out of worksheet bounds.
-3: Incorrect parameter or option.
"""
table = {}
col_formats = {}
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
if self.constant_memory:
warn("add_table() isn't supported in 'constant_memory' mode")
return -1
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -2
if self._check_dimensions(last_row, last_col, True, True):
return -2
# Valid input parameters.
valid_parameter = {
'autofilter': True,
'banded_columns': True,
'banded_rows': True,
'columns': True,
'data': True,
'first_column': True,
'header_row': True,
'last_column': True,
'name': True,
'style': True,
'total_row': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn("Unknown parameter '%s' in add_table()" % param_key)
return -3
# Turn on Excel's defaults.
options['banded_rows'] = options.get('banded_rows', True)
options['header_row'] = options.get('header_row', True)
options['autofilter'] = options.get('autofilter', True)
# Set the table options.
table['show_first_col'] = options.get('first_column', False)
table['show_last_col'] = options.get('last_column', False)
table['show_row_stripes'] = options.get('banded_rows', False)
table['show_col_stripes'] = options.get('banded_columns', False)
table['header_row_count'] = options.get('header_row', 0)
table['totals_row_shown'] = options.get('total_row', False)
# Set the table name.
if 'name' in options:
name = options['name']
table['name'] = name
if ' ' in name:
warn("Name '%s' in add_table() cannot contain spaces"
% force_unicode(name))
return -3
# Warn if the name contains invalid chars as defined by Excel.
if (not re.match(r'^[\w\\][\w\\.]*$', name, re.UNICODE)
or re.match(r'^\d', name)):
warn("Invalid Excel characters in add_table(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a cell name.
if re.match(r'^[a-zA-Z][a-zA-Z]?[a-dA-D]?[0-9]+$', name):
warn("Name looks like a cell name in add_table(): '%s'"
% force_unicode(name))
return -1
# Warn if the name looks like a R1C1 cell reference.
if (re.match(r'^[rcRC]$', name)
or re.match(r'^[rcRC]\d+[rcRC]\d+$', name)):
warn("Invalid name '%s' like a RC cell ref in add_table()"
% force_unicode(name))
return -1
# Set the table style.
if 'style' in options:
table['style'] = options['style']
if table['style'] is None:
table['style'] = ''
# Remove whitespace from style name.
table['style'] = table['style'].replace(' ', '')
else:
table['style'] = "TableStyleMedium9"
# Swap last row/col for first row/col as necessary.
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Set the data range rows (without the header and footer).
first_data_row = first_row
last_data_row = last_row
if options.get('header_row'):
first_data_row += 1
if options.get('total_row'):
last_data_row -= 1
# Set the table and autofilter ranges.
table['range'] = xl_range(first_row, first_col,
last_row, last_col)
table['a_range'] = xl_range(first_row, first_col,
last_data_row, last_col)
# If the header row if off the default is to turn autofilter off.
if not options['header_row']:
options['autofilter'] = 0
# Set the autofilter range.
if options['autofilter']:
table['autofilter'] = table['a_range']
# Add the table columns.
col_id = 1
table['columns'] = []
seen_names = {}
for col_num in range(first_col, last_col + 1):
# Set up the default column data.
col_data = {
'id': col_id,
'name': 'Column' + str(col_id),
'total_string': '',
'total_function': '',
'total_value': 0,
'formula': '',
'format': None,
'name_format': None,
}
# Overwrite the defaults with any user defined values.
if 'columns' in options:
# Check if there are user defined values for this column.
if col_id <= len(options['columns']):
user_data = options['columns'][col_id - 1]
else:
user_data = None
if user_data:
# Get the column format.
xformat = user_data.get('format', None)
# Map user defined values to internal values.
if user_data.get('header'):
col_data['name'] = user_data['header']
# Excel requires unique case insensitive header names.
header_name = col_data['name']
name = header_name.lower()
if name in seen_names:
warn("Duplicate header name in add_table(): '%s'"
% force_unicode(name))
return -1
else:
seen_names[name] = True
col_data['name_format'] = user_data.get('header_format')
# Handle the column formula.
if 'formula' in user_data and user_data['formula']:
formula = user_data['formula']
# Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
# Covert Excel 2010 "@" ref to 2007 "#This Row".
formula = formula.replace('@', '[#This Row],')
col_data['formula'] = formula
for row in range(first_data_row, last_data_row + 1):
self._write_formula(row, col_num, formula, xformat)
# Handle the function for the total row.
if user_data.get('total_function'):
function = user_data['total_function']
# Massage the function name.
function = function.lower()
function = function.replace('_', '')
function = function.replace(' ', '')
if function == 'countnums':
function = 'countNums'
if function == 'stddev':
function = 'stdDev'
col_data['total_function'] = function
formula = \
self._table_function_to_formula(function,
col_data['name'])
value = user_data.get('total_value', 0)
self._write_formula(last_row, col_num, formula,
xformat, value)
elif user_data.get('total_string'):
# Total label only (not a function).
total_string = user_data['total_string']
col_data['total_string'] = total_string
self._write_string(last_row, col_num, total_string,
user_data.get('format'))
# Get the dxf format index.
if xformat is not None:
col_data['format'] = xformat._get_dxf_index()
# Store the column format for writing the cell data.
# It doesn't matter if it is undefined.
col_formats[col_id - 1] = xformat
# Store the column data.
table['columns'].append(col_data)
# Write the column headers to the worksheet.
if options['header_row']:
self._write_string(first_row, col_num, col_data['name'],
col_data['name_format'])
col_id += 1
# Store the column id that have a formula.
col_formulas = [col['id'] for col in table['columns']
if col['formula'] != '']
# Write the cell data if supplied.
if 'data' in options:
data = options['data']
i = 0 # For indexing the row data.
for row in range(first_data_row, last_data_row + 1):
j = 0 # For indexing the col data.
k = 0 # For indexing the col id.
for col in range(first_col, last_col + 1):
if i < len(data) and j < len(data[i]):
# To avoid writing value if column contains a formula.
if col not in col_formulas:
token = data[i][j]
if k in col_formats:
self._write(row, col, token,
col_formats[k])
else:
self._write(row, col, token, None)
# Increment index of col data if column
# does not contain any formula
j += 1
k += 1
i += 1
# Store the table data.
self.tables.append(table)
return table
@convert_cell_args
def add_sparkline(self, row, col, options=None):
"""
Add sparklines to the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Sparkline formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(row, col, True, True):
return -1
sparkline = {'locations': [xl_rowcol_to_cell(row, col)]}
if options is None:
options = {}
# Valid input parameters.
valid_parameters = {
'location': True,
'range': True,
'type': True,
'high_point': True,
'low_point': True,
'negative_points': True,
'first_point': True,
'last_point': True,
'markers': True,
'style': True,
'series_color': True,
'negative_color': True,
'markers_color': True,
'first_color': True,
'last_color': True,
'high_color': True,
'low_color': True,
'max': True,
'min': True,
'axis': True,
'reverse': True,
'empty_cells': True,
'show_hidden': True,
'plot_hidden': True,
'date_axis': True,
'weight': True,
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn("Unknown parameter '%s' in add_sparkline()" % param_key)
return -1
# 'range' is a required parameter.
if 'range' not in options:
warn("Parameter 'range' is required in add_sparkline()")
return -2
# Handle the sparkline type.
spark_type = options.get('type', 'line')
if spark_type not in ('line', 'column', 'win_loss'):
warn("Parameter 'type' must be 'line', 'column' "
"or 'win_loss' in add_sparkline()")
return -2
if spark_type == 'win_loss':
spark_type = 'stacked'
sparkline['type'] = spark_type
# We handle single location/range values or list of values.
if 'location' in options:
if type(options['location']) is list:
sparkline['locations'] = options['location']
else:
sparkline['locations'] = [options['location']]
if type(options['range']) is list:
sparkline['ranges'] = options['range']
else:
sparkline['ranges'] = [options['range']]
range_count = len(sparkline['ranges'])
location_count = len(sparkline['locations'])
# The ranges and locations must match.
if range_count != location_count:
warn("Must have the same number of location and range "
"parameters in add_sparkline()")
return -2
# Store the count.
sparkline['count'] = len(sparkline['locations'])
# Get the worksheet name for the range conversion below.
sheetname = quote_sheetname(self.name)
# Cleanup the input ranges.
new_ranges = []
for spark_range in sparkline['ranges']:
# Remove the absolute reference $ symbols.
spark_range = spark_range.replace('$', '')
# Remove the = from formula.
spark_range = spark_range.lstrip('=')
# Convert a simple range into a full Sheet1!A1:D1 range.
if '!' not in spark_range:
spark_range = sheetname + "!" + spark_range
new_ranges.append(spark_range)
sparkline['ranges'] = new_ranges
# Cleanup the input locations.
new_locations = []
for location in sparkline['locations']:
location = location.replace('$', '')
new_locations.append(location)
sparkline['locations'] = new_locations
# Map options.
sparkline['high'] = options.get('high_point')
sparkline['low'] = options.get('low_point')
sparkline['negative'] = options.get('negative_points')
sparkline['first'] = options.get('first_point')
sparkline['last'] = options.get('last_point')
sparkline['markers'] = options.get('markers')
sparkline['min'] = options.get('min')
sparkline['max'] = options.get('max')
sparkline['axis'] = options.get('axis')
sparkline['reverse'] = options.get('reverse')
sparkline['hidden'] = options.get('show_hidden')
sparkline['weight'] = options.get('weight')
# Map empty cells options.
empty = options.get('empty_cells', '')
if empty == 'zero':
sparkline['empty'] = 0
elif empty == 'connect':
sparkline['empty'] = 'span'
else:
sparkline['empty'] = 'gap'
# Map the date axis range.
date_range = options.get('date_axis')
if date_range and '!' not in date_range:
date_range = sheetname + "!" + date_range
sparkline['date_axis'] = date_range
# Set the sparkline styles.
style_id = options.get('style', 0)
style = get_sparkline_style(style_id)
sparkline['series_color'] = style['series']
sparkline['negative_color'] = style['negative']
sparkline['markers_color'] = style['markers']
sparkline['first_color'] = style['first']
sparkline['last_color'] = style['last']
sparkline['high_color'] = style['high']
sparkline['low_color'] = style['low']
# Override the style colors with user defined colors.
self._set_spark_color(sparkline, options, 'series_color')
self._set_spark_color(sparkline, options, 'negative_color')
self._set_spark_color(sparkline, options, 'markers_color')
self._set_spark_color(sparkline, options, 'first_color')
self._set_spark_color(sparkline, options, 'last_color')
self._set_spark_color(sparkline, options, 'high_color')
self._set_spark_color(sparkline, options, 'low_color')
self.sparklines.append(sparkline)
@convert_range_args
def set_selection(self, first_row, first_col, last_row, last_col):
"""
Set the selected cell or cells in a worksheet
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Nothing.
"""
pane = None
# Range selection. Do this before swapping max/min to allow the
# selection direction to be reversed.
active_cell = xl_rowcol_to_cell(first_row, first_col)
# Swap last row/col for first row/col if necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# If the first and last cell are the same write a single cell.
if (first_row == last_row) and (first_col == last_col):
sqref = active_cell
else:
sqref = xl_range(first_row, first_col, last_row, last_col)
# Selection isn't set for cell A1.
if sqref == 'A1':
return
self.selections = [[pane, active_cell, sqref]]
def outline_settings(self, visible=1, symbols_below=1, symbols_right=1,
auto_style=0):
"""
Control outline settings.
Args:
visible: Outlines are visible. Optional, defaults to True.
symbols_below: Show row outline symbols below the outline bar.
Optional, defaults to True.
symbols_right: Show column outline symbols to the right of the
outline bar. Optional, defaults to True.
auto_style: Use Automatic style. Optional, defaults to False.
Returns:
0: Nothing.
"""
self.outline_on = visible
self.outline_below = symbols_below
self.outline_right = symbols_right
self.outline_style = auto_style
self.outline_changed = True
@convert_cell_args
def freeze_panes(self, row, col, top_row=None, left_col=None, pane_type=0):
"""
Create worksheet panes and mark them as frozen.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
if top_row is None:
top_row = row
if left_col is None:
left_col = col
self.panes = [row, col, top_row, left_col, pane_type]
@convert_cell_args
def split_panes(self, x, y, top_row=None, left_col=None):
"""
Create worksheet panes and mark them as split.
Args:
x: The position for the vertical split.
y: The position for the horizontal split.
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
# Same as freeze panes with a different pane type.
self.freeze_panes(x, y, top_row, left_col, 2)
def set_zoom(self, zoom=100):
"""
Set the worksheet zoom factor.
Args:
zoom: Scale factor: 10 <= zoom <= 400.
Returns:
Nothing.
"""
# Ensure the zoom scale is in Excel's range.
if zoom < 10 or zoom > 400:
warn("Zoom factor %d outside range: 10 <= zoom <= 400" % zoom)
zoom = 100
self.zoom = int(zoom)
def right_to_left(self):
"""
Display the worksheet right to left for some versions of Excel.
Args:
None.
Returns:
Nothing.
"""
self.is_right_to_left = 1
def hide_zero(self):
"""
Hide zero values in worksheet cells.
Args:
None.
Returns:
Nothing.
"""
self.show_zeros = 0
def set_tab_color(self, color):
"""
Set the color of the worksheet tab.
Args:
color: A #RGB color index.
Returns:
Nothing.
"""
self.tab_color = xl_color(color)
def protect(self, password='', options=None):
"""
Set the password and protection options of the worksheet.
Args:
password: An optional password string.
options: A dictionary of worksheet objects to protect.
Returns:
Nothing.
"""
if password != '':
password = self._encode_password(password)
if not options:
options = {}
# Default values for objects that can be protected.
defaults = {
'sheet': True,
'content': False,
'objects': False,
'scenarios': False,
'format_cells': False,
'format_columns': False,
'format_rows': False,
'insert_columns': False,
'insert_rows': False,
'insert_hyperlinks': False,
'delete_columns': False,
'delete_rows': False,
'select_locked_cells': True,
'sort': False,
'autofilter': False,
'pivot_tables': False,
'select_unlocked_cells': True}
# Overwrite the defaults with user specified values.
for key in (options.keys()):
if key in defaults:
defaults[key] = options[key]
else:
warn("Unknown protection object: '%s'" % key)
# Set the password after the user defined values.
defaults['password'] = password
self.protect_options = defaults
@convert_cell_args
def insert_button(self, row, col, options=None):
"""
Insert a button form object into the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Button formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn('Cannot insert button at (%d, %d).' % (row, col))
return -1
if options is None:
options = {}
button = self._button_params(row, col, options)
self.buttons_list.append(button)
self.has_vml = 1
###########################################################################
#
# Public API. Page Setup methods.
#
###########################################################################
def set_landscape(self):
"""
Set the page orientation as landscape.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 0
self.page_setup_changed = True
def set_portrait(self):
"""
Set the page orientation as portrait.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 1
self.page_setup_changed = True
def set_page_view(self):
"""
Set the page view mode.
Args:
None.
Returns:
Nothing.
"""
self.page_view = 1
def set_paper(self, paper_size):
"""
Set the paper type. US Letter = 1, A4 = 9.
Args:
paper_size: Paper index.
Returns:
Nothing.
"""
if paper_size:
self.paper_size = paper_size
self.page_setup_changed = True
def center_horizontally(self):
"""
Center the page horizontally.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.hcenter = 1
def center_vertically(self):
"""
Center the page vertically.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.vcenter = 1
def set_margins(self, left=0.7, right=0.7, top=0.75, bottom=0.75):
"""
Set all the page margins in inches.
Args:
left: Left margin.
right: Right margin.
top: Top margin.
bottom: Bottom margin.
Returns:
Nothing.
"""
self.margin_left = left
self.margin_right = right
self.margin_top = top
self.margin_bottom = bottom
def set_header(self, header='', options=None, margin=None):
"""
Set the page header caption and optional margin.
Args:
header: Header string.
margin: Header margin.
options: Header options, mainly for images.
Returns:
Nothing.
"""
header_orig = header
header = header.replace('&[Picture]', '&G')
if len(header) >= 255:
warn('Header string must be less than 255 characters')
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {'margin': options}
else:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# For backward compatibility.
if margin is not None:
options['margin'] = margin
# Reset the list in case the function is called more than once.
self.header_images = []
if options.get('image_left'):
self.header_images.append([options.get('image_left'),
options.get('image_data_left'),
'LH'])
if options.get('image_center'):
self.header_images.append([options.get('image_center'),
options.get('image_data_center'),
'CH'])
if options.get('image_right'):
self.header_images.append([options.get('image_right'),
options.get('image_data_right'),
'RH'])
placeholder_count = header.count('&G')
image_count = len(self.header_images)
if placeholder_count != image_count:
warn("Number of header images (%s) doesn't match placeholder "
"count (%s) in string: %s"
% (image_count, placeholder_count, header_orig))
self.header_images = []
return
if 'align_with_margins' in options:
self.header_footer_aligns = options['align_with_margins']
if 'scale_with_doc' in options:
self.header_footer_scales = options['scale_with_doc']
self.header = header
self.margin_header = options.get('margin', 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def set_footer(self, footer='', options=None, margin=None):
"""
Set the page footer caption and optional margin.
Args:
footer: Footer string.
margin: Footer margin.
options: Footer options, mainly for images.
Returns:
Nothing.
"""
footer_orig = footer
footer = footer.replace('&[Picture]', '&G')
if len(footer) >= 255:
warn('Footer string must be less than 255 characters')
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {'margin': options}
else:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# For backward compatibility.
if margin is not None:
options['margin'] = margin
# Reset the list in case the function is called more than once.
self.footer_images = []
if options.get('image_left'):
self.footer_images.append([options.get('image_left'),
options.get('image_data_left'),
'LF'])
if options.get('image_center'):
self.footer_images.append([options.get('image_center'),
options.get('image_data_center'),
'CF'])
if options.get('image_right'):
self.footer_images.append([options.get('image_right'),
options.get('image_data_right'),
'RF'])
placeholder_count = footer.count('&G')
image_count = len(self.footer_images)
if placeholder_count != image_count:
warn("Number of footer images (%s) doesn't match placeholder "
"count (%s) in string: %s"
% (image_count, placeholder_count, footer_orig))
self.footer_images = []
return
if 'align_with_margins' in options:
self.header_footer_aligns = options['align_with_margins']
if 'scale_with_doc' in options:
self.header_footer_scales = options['scale_with_doc']
self.footer = footer
self.margin_footer = options.get('margin', 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def repeat_rows(self, first_row, last_row=None):
"""
Set the rows to repeat at the top of each printed page.
Args:
first_row: Start row for range.
last_row: End row for range.
Returns:
Nothing.
"""
if last_row is None:
last_row = first_row
# Convert rows to 1 based.
first_row += 1
last_row += 1
# Create the row range area like: $1:$2.
area = '$%d:$%d' % (first_row, last_row)
# Build up the print titles area "Sheet1!$1:$2"
sheetname = quote_sheetname(self.name)
self.repeat_row_range = sheetname + '!' + area
@convert_column_args
def repeat_columns(self, first_col, last_col=None):
"""
Set the columns to repeat at the left hand side of each printed page.
Args:
first_col: Start column for range.
last_col: End column for range.
Returns:
Nothing.
"""
if last_col is None:
last_col = first_col
# Convert to A notation.
first_col = xl_col_to_name(first_col, 1)
last_col = xl_col_to_name(last_col, 1)
# Create a column range like $C:$D.
area = first_col + ':' + last_col
# Build up the print area range "=Sheet2!$C:$D"
sheetname = quote_sheetname(self.name)
self.repeat_col_range = sheetname + "!" + area
def hide_gridlines(self, option=1):
"""
Set the option to hide gridlines on the screen and the printed page.
Args:
option: 0 : Don't hide gridlines
1 : Hide printed gridlines only
2 : Hide screen and printed gridlines
Returns:
Nothing.
"""
if option == 0:
self.print_gridlines = 1
self.screen_gridlines = 1
self.print_options_changed = True
elif option == 1:
self.print_gridlines = 0
self.screen_gridlines = 1
else:
self.print_gridlines = 0
self.screen_gridlines = 0
def print_row_col_headers(self):
"""
Set the option to print the row and column headers on the printed page.
Args:
None.
Returns:
Nothing.
"""
self.print_headers = True
self.print_options_changed = True
def hide_row_col_headers(self):
"""
Set the option to hide the row and column headers on the worksheet.
Args:
None.
Returns:
Nothing.
"""
self.row_col_headers = True
@convert_range_args
def print_area(self, first_row, first_col, last_row, last_col):
"""
Set the print area in the current worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Set the print area in the current worksheet.
# Ignore max print area since it is the same as no area for Excel.
if (first_row == 0 and first_col == 0
and last_row == self.xls_rowmax - 1
and last_col == self.xls_colmax - 1):
return
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col,
last_row, last_col)
self.print_area_range = area
def print_across(self):
"""
Set the order in which pages are printed.
Args:
None.
Returns:
Nothing.
"""
self.page_order = 1
self.page_setup_changed = True
def fit_to_pages(self, width, height):
"""
Fit the printed area to a specific number of pages both vertically and
horizontally.
Args:
width: Number of pages horizontally.
height: Number of pages vertically.
Returns:
Nothing.
"""
self.fit_page = 1
self.fit_width = width
self.fit_height = height
self.page_setup_changed = True
def set_start_page(self, start_page):
"""
Set the start page number when printing.
Args:
start_page: Start page number.
Returns:
Nothing.
"""
self.page_start = start_page
def set_print_scale(self, scale):
"""
Set the scale factor for the printed page.
Args:
scale: Print scale. 10 <= scale <= 400.
Returns:
Nothing.
"""
# Confine the scale to Excel's range.
if scale < 10 or scale > 400:
warn("Print scale '%d' outside range: 10 <= scale <= 400" % scale)
return
# Turn off "fit to page" option when print scale is on.
self.fit_page = 0
self.print_scale = int(scale)
self.page_setup_changed = True
def set_h_pagebreaks(self, breaks):
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of rows where the page breaks should be added.
Returns:
Nothing.
"""
self.hbreaks = breaks
def set_v_pagebreaks(self, breaks):
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of columns where the page breaks should be added.
Returns:
Nothing.
"""
self.vbreaks = breaks
def set_vba_name(self, name=None):
"""
Set the VBA name for the worksheet. By default this is the
same as the sheet name: i.e., Sheet1 etc.
Args:
name: The VBA name for the worksheet.
Returns:
Nothing.
"""
if name is not None:
self.vba_codename = name
else:
self.vba_codename = self.name
###########################################################################
#
# Private API.
#
###########################################################################
def _initialize(self, init_data):
self.name = init_data['name']
self.index = init_data['index']
self.str_table = init_data['str_table']
self.worksheet_meta = init_data['worksheet_meta']
self.constant_memory = init_data['constant_memory']
self.tmpdir = init_data['tmpdir']
self.date_1904 = init_data['date_1904']
self.strings_to_numbers = init_data['strings_to_numbers']
self.strings_to_formulas = init_data['strings_to_formulas']
self.strings_to_urls = init_data['strings_to_urls']
self.nan_inf_to_errors = init_data['nan_inf_to_errors']
self.default_date_format = init_data['default_date_format']
self.default_url_format = init_data['default_url_format']
self.excel2003_style = init_data['excel2003_style']
self.remove_timezone = init_data['remove_timezone']
self.max_url_length = init_data['max_url_length']
if self.excel2003_style:
self.original_row_height = 12.75
self.default_row_height = 12.75
self.default_row_pixels = 17
self.margin_left = 0.75
self.margin_right = 0.75
self.margin_top = 1
self.margin_bottom = 1
self.margin_header = 0.5
self.margin_footer = 0.5
self.header_footer_aligns = False
# Open a temp filehandle to store row data in constant_memory mode.
if self.constant_memory:
# This is sub-optimal but we need to create a temp file
# with utf8 encoding in Python < 3.
(fd, filename) = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.row_data_filename = filename
self.row_data_fh = codecs.open(filename, 'w+', 'utf-8')
# Set as the worksheet filehandle until the file is assembled.
self.fh = self.row_data_fh
def _assemble_xml_file(self):
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the root worksheet element.
self._write_worksheet()
# Write the worksheet properties.
self._write_sheet_pr()
# Write the worksheet dimensions.
self._write_dimension()
# Write the sheet view properties.
self._write_sheet_views()
# Write the sheet format properties.
self._write_sheet_format_pr()
# Write the sheet column info.
self._write_cols()
# Write the worksheet data such as rows columns and cells.
if not self.constant_memory:
self._write_sheet_data()
else:
self._write_optimized_sheet_data()
# Write the sheetProtection element.
self._write_sheet_protection()
# Write the phoneticPr element.
if self.excel2003_style:
self._write_phonetic_pr()
# Write the autoFilter element.
self._write_auto_filter()
# Write the mergeCells element.
self._write_merge_cells()
# Write the conditional formats.
self._write_conditional_formats()
# Write the dataValidations element.
self._write_data_validations()
# Write the hyperlink element.
self._write_hyperlinks()
# Write the printOptions element.
self._write_print_options()
# Write the worksheet page_margins.
self._write_page_margins()
# Write the worksheet page setup.
self._write_page_setup()
# Write the headerFooter element.
self._write_header_footer()
# Write the rowBreaks element.
self._write_row_breaks()
# Write the colBreaks element.
self._write_col_breaks()
# Write the drawing element.
self._write_drawings()
# Write the legacyDrawing element.
self._write_legacy_drawing()
# Write the legacyDrawingHF element.
self._write_legacy_drawing_hf()
# Write the tableParts element.
self._write_table_parts()
# Write the extLst elements.
self._write_ext_list()
# Close the worksheet tag.
self._xml_end_tag('worksheet')
# Close the file.
self._xml_close()
def _check_dimensions(self, row, col, ignore_row=False, ignore_col=False):
# Check that row and col are valid and store the max and min
# values for use in other methods/elements. The ignore_row /
# ignore_col flags is used to indicate that we wish to perform
# the dimension check without storing the value. The ignore
# flags are use by set_row() and data_validate.
# Check that the row/col are within the worksheet bounds.
if row < 0 or col < 0:
return -1
if row >= self.xls_rowmax or col >= self.xls_colmax:
return -1
# In constant_memory mode we don't change dimensions for rows
# that are already written.
if not ignore_row and not ignore_col and self.constant_memory:
if row < self.previous_row:
return -2
if not ignore_row:
if self.dim_rowmin is None or row < self.dim_rowmin:
self.dim_rowmin = row
if self.dim_rowmax is None or row > self.dim_rowmax:
self.dim_rowmax = row
if not ignore_col:
if self.dim_colmin is None or col < self.dim_colmin:
self.dim_colmin = col
if self.dim_colmax is None or col > self.dim_colmax:
self.dim_colmax = col
return 0
def _convert_date_time(self, dt_obj):
# Convert a datetime object to an Excel serial date and time.
return datetime_to_excel_datetime(dt_obj,
self.date_1904,
self.remove_timezone)
def _convert_name_area(self, row_num_1, col_num_1, row_num_2, col_num_2):
# Convert zero indexed rows and columns to the format required by
# worksheet named ranges, eg, "Sheet1!$A$1:$C$13".
range1 = ''
range2 = ''
area = ''
row_col_only = 0
# Convert to A1 notation.
col_char_1 = xl_col_to_name(col_num_1, 1)
col_char_2 = xl_col_to_name(col_num_2, 1)
row_char_1 = '$' + str(row_num_1 + 1)
row_char_2 = '$' + str(row_num_2 + 1)
# We need to handle special cases that refer to rows or columns only.
if row_num_1 == 0 and row_num_2 == self.xls_rowmax - 1:
range1 = col_char_1
range2 = col_char_2
row_col_only = 1
elif col_num_1 == 0 and col_num_2 == self.xls_colmax - 1:
range1 = row_char_1
range2 = row_char_2
row_col_only = 1
else:
range1 = col_char_1 + row_char_1
range2 = col_char_2 + row_char_2
# A repeated range is only written once (if it isn't a special case).
if range1 == range2 and not row_col_only:
area = range1
else:
area = range1 + ':' + range2
# Build up the print area range "Sheet1!$A$1:$C$13".
sheetname = quote_sheetname(self.name)
area = sheetname + "!" + area
return area
def _sort_pagebreaks(self, breaks):
# This is an internal method used to filter elements of a list of
# pagebreaks used in the _store_hbreak() and _store_vbreak() methods.
# It:
# 1. Removes duplicate entries from the list.
# 2. Sorts the list.
# 3. Removes 0 from the list if present.
if not breaks:
return
breaks_set = set(breaks)
if 0 in breaks_set:
breaks_set.remove(0)
breaks_list = list(breaks_set)
breaks_list.sort()
# The Excel 2007 specification says that the maximum number of page
# breaks is 1026. However, in practice it is actually 1023.
max_num_breaks = 1023
if len(breaks_list) > max_num_breaks:
breaks_list = breaks_list[:max_num_breaks]
return breaks_list
def _extract_filter_tokens(self, expression):
# Extract the tokens from the filter expression. The tokens are mainly
# non-whitespace groups. The only tricky part is to extract string
# tokens that contain whitespace and/or quoted double quotes (Excel's
# escaped quotes).
#
# Examples: 'x < 2000'
# 'x > 2000 and x < 5000'
# 'x = "foo"'
# 'x = "foo bar"'
# 'x = "foo "" bar"'
#
if not expression:
return []
token_re = re.compile(r'"(?:[^"]|"")*"|\S+')
tokens = token_re.findall(expression)
new_tokens = []
# Remove single leading and trailing quotes and un-escape other quotes.
for token in tokens:
if token.startswith('"'):
token = token[1:]
if token.endswith('"'):
token = token[:-1]
token = token.replace('""', '"')
new_tokens.append(token)
return new_tokens
def _parse_filter_expression(self, expression, tokens):
# Converts the tokens of a possibly conditional expression into 1 or 2
# sub expressions for further parsing.
#
# Examples:
# ('x', '==', 2000) -> exp1
# ('x', '>', 2000, 'and', 'x', '<', 5000) -> exp1 and exp2
if len(tokens) == 7:
# The number of tokens will be either 3 (for 1 expression)
# or 7 (for 2 expressions).
conditional = tokens[3]
if re.match('(and|&&)', conditional):
conditional = 0
elif re.match(r'(or|\|\|)', conditional):
conditional = 1
else:
warn("Token '%s' is not a valid conditional "
"in filter expression '%s'" % (conditional, expression))
expression_1 = self._parse_filter_tokens(expression, tokens[0:3])
expression_2 = self._parse_filter_tokens(expression, tokens[4:7])
return expression_1 + [conditional] + expression_2
else:
return self._parse_filter_tokens(expression, tokens)
def _parse_filter_tokens(self, expression, tokens):
# Parse the 3 tokens of a filter expression and return the operator
# and token. The use of numbers instead of operators is a legacy of
# Spreadsheet::WriteExcel.
operators = {
'==': 2,
'=': 2,
'=~': 2,
'eq': 2,
'!=': 5,
'!~': 5,
'ne': 5,
'<>': 5,
'<': 1,
'<=': 3,
'>': 4,
'>=': 6,
}
operator = operators.get(tokens[1], None)
token = tokens[2]
# Special handling of "Top" filter expressions.
if re.match('top|bottom', tokens[0].lower()):
value = int(tokens[1])
if value < 1 or value > 500:
warn("The value '%d' in expression '%s' "
"must be in the range 1 to 500" % (value, expression))
token = token.lower()
if token != 'items' and token != '%':
warn("The type '%s' in expression '%s' "
"must be either 'items' or '%'" % (token, expression))
if tokens[0].lower() == 'top':
operator = 30
else:
operator = 32
if tokens[2] == '%':
operator += 1
token = str(value)
if not operator and tokens[0]:
warn("Token '%s' is not a valid operator "
"in filter expression '%s'" % (token[0], expression))
# Special handling for Blanks/NonBlanks.
if re.match('blanks|nonblanks', token.lower()):
# Only allow Equals or NotEqual in this context.
if operator != 2 and operator != 5:
warn("The operator '%s' in expression '%s' "
"is not valid in relation to Blanks/NonBlanks'"
% (tokens[1], expression))
token = token.lower()
# The operator should always be 2 (=) to flag a "simple" equality
# in the binary record. Therefore we convert <> to =.
if token == 'blanks':
if operator == 5:
token = ' '
else:
if operator == 5:
operator = 2
token = 'blanks'
else:
operator = 5
token = ' '
# if the string token contains an Excel match character then change the
# operator type to indicate a non "simple" equality.
if operator == 2 and re.search('[*?]', token):
operator = 22
return [operator, token]
def _encode_password(self, plaintext):
# Encode the worksheet protection "password" as a simple hash.
# Based on the algorithm by Daniel Rentz of OpenOffice.
i = 0
count = len(plaintext)
digits = []
for char in plaintext:
i += 1
char = ord(char) << i
low_15 = char & 0x7fff
high_15 = char & 0x7fff << 15
high_15 >>= 15
char = low_15 | high_15
digits.append(char)
password_hash = 0x0000
for digit in digits:
password_hash ^= digit
password_hash ^= count
password_hash ^= 0xCE4B
return "%X" % password_hash
def _prepare_image(self, index, image_id, drawing_id, width, height,
name, image_type, x_dpi, y_dpi):
# Set up images/drawings.
drawing_type = 2
(row, col, _, x_offset, y_offset,
x_scale, y_scale, url, tip, anchor, _) = self.images[index]
width *= x_scale
height *= y_scale
# Scale by non 96dpi resolutions.
width *= 96.0 / x_dpi
height *= 96.0 / y_dpi
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height, anchor)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml', None])
else:
drawing = self.drawing
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = drawing_type
drawing_object['dimensions'] = dimensions
drawing_object['width'] = width
drawing_object['height'] = height
drawing_object['description'] = name
drawing_object['shape'] = None
drawing_object['anchor'] = anchor
drawing_object['rel_index'] = 0
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = tip
if url:
target = None
rel_type = '/hyperlink'
target_mode = 'External'
if re.match('(ftp|http)s?://', url):
target = self._escape_url(url)
if re.match('^mailto:', url):
target = self._escape_url(url)
if re.match('external:', url):
target = url.replace('external:', 'file:///')
target = self._escape_url(target)
# Additional escape not required in worksheet hyperlinks.
target = target.replace('#', '%23')
if re.match('internal:', url):
target = url.replace('internal:', '#')
target_mode = None
if target is not None:
if len(target) > self.max_url_length:
warn("Ignoring URL '%s' with link and/or anchor > %d "
"characters since it exceeds Excel's limit for URLS" %
(force_unicode(url), self.max_url_length))
else:
self.drawing_links.append([rel_type, target, target_mode])
drawing_object['url_rel_index'] = \
self._get_drawing_rel_index()
drawing_object['rel_index'] = self._get_drawing_rel_index()
self.drawing_links.append(['/image',
'../media/image'
+ str(image_id) + '.'
+ image_type])
def _prepare_shape(self, index, drawing_id):
# Set up shapes/drawings.
drawing_type = 3
(row, col, x_offset, y_offset,
x_scale, y_scale, text, anchor, options) = self.shapes[index]
width = options.get('width', self.default_col_pixels * 3)
height = options.get('height', self.default_row_pixels * 6)
width *= x_scale
height *= y_scale
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height, anchor)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml', None])
else:
drawing = self.drawing
shape = Shape('rect', 'TextBox', options)
shape.text = text
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = drawing_type
drawing_object['dimensions'] = dimensions
drawing_object['width'] = width
drawing_object['height'] = height
drawing_object['description'] = None
drawing_object['shape'] = shape
drawing_object['anchor'] = anchor
drawing_object['rel_index'] = 0
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = options.get('tip')
url = options.get('url', None)
if url:
target = None
rel_type = '/hyperlink'
target_mode = 'External'
if re.match('(ftp|http)s?://', url):
target = self._escape_url(url)
if re.match('^mailto:', url):
target = self._escape_url(url)
if re.match('external:', url):
target = url.replace('external:', 'file:///')
target = self._escape_url(target)
# Additional escape not required in worksheet hyperlinks.
target = target.replace('#', '%23')
if re.match('internal:', url):
target = url.replace('internal:', '#')
target_mode = None
if target is not None:
if len(target) > self.max_url_length:
warn("Ignoring URL '%s' with link and/or anchor > %d "
"characters since it exceeds Excel's limit for URLS" %
(force_unicode(url), self.max_url_length))
else:
self.drawing_links.append([rel_type, target, target_mode])
drawing_object['url_rel_index'] = \
self._get_drawing_rel_index()
def _prepare_header_image(self, image_id, width, height, name, image_type,
position, x_dpi, y_dpi):
# Set up an image without a drawing object for header/footer images.
# Strip the extension from the filename.
name = re.sub(r'\..*$', '', name)
self.header_images_list.append([width, height, name, position,
x_dpi, y_dpi])
self.vml_drawing_links.append(['/image',
'../media/image'
+ str(image_id) + '.'
+ image_type])
def _prepare_chart(self, index, chart_id, drawing_id):
# Set up chart/drawings.
drawing_type = 1
(row, col, chart, x_offset, y_offset, x_scale, y_scale, anchor) = \
self.charts[index]
chart.id = chart_id - 1
# Use user specified dimensions, if any.
width = int(0.5 + (chart.width * x_scale))
height = int(0.5 + (chart.height * y_scale))
dimensions = self._position_object_emus(col, row, x_offset, y_offset,
width, height, anchor)
# Set the chart name for the embedded object if it has been specified.
name = chart.chart_name
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(['/drawing',
'../drawings/drawing'
+ str(drawing_id)
+ '.xml'])
else:
drawing = self.drawing
drawing_object = drawing._add_drawing_object()
drawing_object['type'] = drawing_type
drawing_object['dimensions'] = dimensions
drawing_object['width'] = width
drawing_object['height'] = height
drawing_object['description'] = name
drawing_object['shape'] = None
drawing_object['anchor'] = anchor
drawing_object['rel_index'] = self._get_drawing_rel_index()
drawing_object['url_rel_index'] = 0
drawing_object['tip'] = None
self.drawing_links.append(['/chart',
'../charts/chart'
+ str(chart_id)
+ '.xml'])
def _position_object_emus(self, col_start, row_start, x1, y1,
width, height, anchor):
# Calculate the vertices that define the position of a graphical
# object within the worksheet in EMUs.
#
# The vertices are expressed as English Metric Units (EMUs). There are
# 12,700 EMUs per point. Therefore, 12,700 * 3 /4 = 9,525 EMUs per
# pixel
(col_start, row_start, x1, y1,
col_end, row_end, x2, y2, x_abs, y_abs) = \
self._position_object_pixels(col_start, row_start, x1, y1,
width, height, anchor)
# Convert the pixel values to EMUs. See above.
x1 = int(0.5 + 9525 * x1)
y1 = int(0.5 + 9525 * y1)
x2 = int(0.5 + 9525 * x2)
y2 = int(0.5 + 9525 * y2)
x_abs = int(0.5 + 9525 * x_abs)
y_abs = int(0.5 + 9525 * y_abs)
return (col_start, row_start, x1, y1, col_end, row_end, x2, y2,
x_abs, y_abs)
# Calculate the vertices that define the position of a graphical object
# within the worksheet in pixels.
#
# +------------+------------+
# | A | B |
# +-----+------------+------------+
# | |(x1,y1) | |
# | 1 |(A1)._______|______ |
# | | | | |
# | | | | |
# +-----+----| OBJECT |-----+
# | | | | |
# | 2 | |______________. |
# | | | (B2)|
# | | | (x2,y2)|
# +---- +------------+------------+
#
# Example of an object that covers some of the area from cell A1 to B2.
#
# Based on the width and height of the object we need to calculate 8 vars:
#
# col_start, row_start, col_end, row_end, x1, y1, x2, y2.
#
# We also calculate the absolute x and y position of the top left vertex of
# the object. This is required for images.
#
# The width and height of the cells that the object occupies can be
# variable and have to be taken into account.
#
# The values of col_start and row_start are passed in from the calling
# function. The values of col_end and row_end are calculated by
# subtracting the width and height of the object from the width and
# height of the underlying cells.
#
def _position_object_pixels(self, col_start, row_start, x1, y1,
width, height, anchor):
# col_start # Col containing upper left corner of object.
# x1 # Distance to left side of object.
#
# row_start # Row containing top left corner of object.
# y1 # Distance to top of object.
#
# col_end # Col containing lower right corner of object.
# x2 # Distance to right side of object.
#
# row_end # Row containing bottom right corner of object.
# y2 # Distance to bottom of object.
#
# width # Width of object frame.
# height # Height of object frame.
#
# x_abs # Absolute distance to left side of object.
# y_abs # Absolute distance to top side of object.
x_abs = 0
y_abs = 0
# Adjust start column for negative offsets.
while x1 < 0 and col_start > 0:
x1 += self._size_col(col_start - 1)
col_start -= 1
# Adjust start row for negative offsets.
while y1 < 0 and row_start > 0:
y1 += self._size_row(row_start - 1)
row_start -= 1
# Ensure that the image isn't shifted off the page at top left.
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
# Calculate the absolute x offset of the top-left vertex.
if self.col_size_changed:
for col_id in range(col_start):
x_abs += self._size_col(col_id)
else:
# Optimization for when the column widths haven't changed.
x_abs += self.default_col_pixels * col_start
x_abs += x1
# Calculate the absolute y offset of the top-left vertex.
if self.row_size_changed:
for row_id in range(row_start):
y_abs += self._size_row(row_id)
else:
# Optimization for when the row heights haven't changed.
y_abs += self.default_row_pixels * row_start
y_abs += y1
# Adjust start column for offsets that are greater than the col width.
if self._size_col(col_start) > 0:
while x1 >= self._size_col(col_start):
x1 -= self._size_col(col_start)
col_start += 1
# Adjust start row for offsets that are greater than the row height.
if self._size_row(row_start) > 0:
while y1 >= self._size_row(row_start):
y1 -= self._size_row(row_start)
row_start += 1
# Initialize end cell to the same as the start cell.
col_end = col_start
row_end = row_start
# Don't offset the image in the cell if the row/col is hidden.
if self._size_col(col_start) > 0:
width = width + x1
if self._size_row(row_start) > 0:
height = height + y1
# Subtract the underlying cell widths to find end cell of the object.
while width >= self._size_col(col_end, anchor):
width -= self._size_col(col_end, anchor)
col_end += 1
# Subtract the underlying cell heights to find end cell of the object.
while height >= self._size_row(row_end, anchor):
height -= self._size_row(row_end, anchor)
row_end += 1
# The end vertices are whatever is left from the width and height.
x2 = width
y2 = height
return ([col_start, row_start, x1, y1, col_end, row_end, x2, y2,
x_abs, y_abs])
def _size_col(self, col, anchor=0):
# Convert the width of a cell from user's units to pixels. Excel
# rounds the column width to the nearest pixel. If the width hasn't
# been set by the user we use the default value. A hidden column is
# treated as having a width of zero unless it has the special
# "object_position" of 4 (size with cells).
max_digit_width = 7 # For Calabri 11.
padding = 5
pixels = 0
# Look up the cell value to see if it has been changed.
if col in self.col_sizes:
width = self.col_sizes[col][0]
hidden = self.col_sizes[col][1]
# Convert to pixels.
if hidden and anchor != 4:
pixels = 0
elif width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
else:
pixels = self.default_col_pixels
return pixels
def _size_row(self, row, anchor=0):
# Convert the height of a cell from user's units to pixels. If the
# height hasn't been set by the user we use the default value. A
# hidden row is treated as having a height of zero unless it has the
# special "object_position" of 4 (size with cells).
pixels = 0
# Look up the cell value to see if it has been changed
if row in self.row_sizes:
height = self.row_sizes[row][0]
hidden = self.row_sizes[row][1]
if hidden and anchor != 4:
pixels = 0
else:
pixels = int(4.0 / 3.0 * height)
else:
pixels = int(4.0 / 3.0 * self.default_row_height)
return pixels
def _comment_params(self, row, col, string, options):
# This method handles the additional optional parameters to
# write_comment() as well as calculating the comment object
# position and vertices.
default_width = 128
default_height = 74
anchor = 0
params = {
'author': None,
'color': '#ffffe1',
'start_cell': None,
'start_col': None,
'start_row': None,
'visible': None,
'width': default_width,
'height': default_height,
'x_offset': None,
'x_scale': 1,
'y_offset': None,
'y_scale': 1,
'font_name': 'Tahoma',
'font_size': 8,
'font_family': 2,
}
# Overwrite the defaults with any user supplied values. Incorrect or
# misspelled parameters are silently ignored.
for key in options.keys():
params[key] = options[key]
# Ensure that a width and height have been set.
if not params['width']:
params['width'] = default_width
if not params['height']:
params['height'] = default_height
# Set the comment background color.
params['color'] = xl_color(params['color']).lower()
# Convert from Excel XML style color to XML html style color.
params['color'] = params['color'].replace('ff', '#', 1)
# Convert a cell reference to a row and column.
if params['start_cell'] is not None:
(start_row, start_col) = xl_cell_to_rowcol(params['start_cell'])
params['start_row'] = start_row
params['start_col'] = start_col
# Set the default start cell and offsets for the comment. These are
# generally fixed in relation to the parent cell. However there are
# some edge cases for cells at the, er, edges.
row_max = self.xls_rowmax
col_max = self.xls_colmax
if params['start_row'] is None:
if row == 0:
params['start_row'] = 0
elif row == row_max - 3:
params['start_row'] = row_max - 7
elif row == row_max - 2:
params['start_row'] = row_max - 6
elif row == row_max - 1:
params['start_row'] = row_max - 5
else:
params['start_row'] = row - 1
if params['y_offset'] is None:
if row == 0:
params['y_offset'] = 2
elif row == row_max - 3:
params['y_offset'] = 16
elif row == row_max - 2:
params['y_offset'] = 16
elif row == row_max - 1:
params['y_offset'] = 14
else:
params['y_offset'] = 10
if params['start_col'] is None:
if col == col_max - 3:
params['start_col'] = col_max - 6
elif col == col_max - 2:
params['start_col'] = col_max - 5
elif col == col_max - 1:
params['start_col'] = col_max - 4
else:
params['start_col'] = col + 1
if params['x_offset'] is None:
if col == col_max - 3:
params['x_offset'] = 49
elif col == col_max - 2:
params['x_offset'] = 49
elif col == col_max - 1:
params['x_offset'] = 49
else:
params['x_offset'] = 15
# Scale the size of the comment box if required.
if params['x_scale']:
params['width'] = params['width'] * params['x_scale']
if params['y_scale']:
params['height'] = params['height'] * params['y_scale']
# Round the dimensions to the nearest pixel.
params['width'] = int(0.5 + params['width'])
params['height'] = int(0.5 + params['height'])
# Calculate the positions of the comment object.
vertices = self._position_object_pixels(
params['start_col'], params['start_row'], params['x_offset'],
params['y_offset'], params['width'], params['height'], anchor)
# Add the width and height for VML.
vertices.append(params['width'])
vertices.append(params['height'])
return ([row, col, string, params['author'],
params['visible'], params['color'],
params['font_name'], params['font_size'],
params['font_family']] + [vertices])
def _button_params(self, row, col, options):
# This method handles the parameters passed to insert_button() as well
# as calculating the comment object position and vertices.
default_height = self.default_row_pixels
default_width = self.default_col_pixels
anchor = 0
button_number = 1 + len(self.buttons_list)
button = {'row': row, 'col': col, 'font': {}}
params = {}
# Overwrite the defaults with any user supplied values. Incorrect or
# misspelled parameters are silently ignored.
for key in options.keys():
params[key] = options[key]
# Set the button caption.
caption = params.get('caption')
# Set a default caption if none was specified by user.
if caption is None:
caption = 'Button %d' % button_number
button['font']['caption'] = caption
# Set the macro name.
if params.get('macro'):
button['macro'] = '[0]!' + params['macro']
else:
button['macro'] = '[0]!Button%d_Click' % button_number
# Ensure that a width and height have been set.
params['width'] = params.get('width', default_width)
params['height'] = params.get('height', default_height)
# Set the x/y offsets.
params['x_offset'] = params.get('x_offset', 0)
params['y_offset'] = params.get('y_offset', 0)
# Scale the size of the button if required.
params['width'] = params['width'] * params.get('x_scale', 1)
params['height'] = params['height'] * params.get('y_scale', 1)
# Round the dimensions to the nearest pixel.
params['width'] = int(0.5 + params['width'])
params['height'] = int(0.5 + params['height'])
params['start_row'] = row
params['start_col'] = col
# Calculate the positions of the button object.
vertices = self._position_object_pixels(
params['start_col'], params['start_row'], params['x_offset'],
params['y_offset'], params['width'], params['height'], anchor)
# Add the width and height for VML.
vertices.append(params['width'])
vertices.append(params['height'])
button['vertices'] = vertices
return button
def _prepare_vml_objects(self, vml_data_id, vml_shape_id, vml_drawing_id,
comment_id):
comments = []
# Sort the comments into row/column order for easier comparison
# testing and set the external links for comments and buttons.
row_nums = sorted(self.comments.keys())
for row in row_nums:
col_nums = sorted(self.comments[row].keys())
for col in col_nums:
user_options = self.comments[row][col]
params = self._comment_params(*user_options)
self.comments[row][col] = params
# Set comment visibility if required and not user defined.
if self.comments_visible:
if self.comments[row][col][4] is None:
self.comments[row][col][4] = 1
# Set comment author if not already user defined.
if self.comments[row][col][3] is None:
self.comments[row][col][3] = self.comments_author
comments.append(self.comments[row][col])
self.external_vml_links.append(['/vmlDrawing',
'../drawings/vmlDrawing'
+ str(vml_drawing_id)
+ '.vml'])
if self.has_comments:
self.comments_list = comments
self.external_comment_links.append(['/comments',
'../comments'
+ str(comment_id)
+ '.xml'])
count = len(comments)
start_data_id = vml_data_id
# The VML o:idmap data id contains a comma separated range when there
# is more than one 1024 block of comments, like this: data="1,2".
for i in range(int(count / 1024)):
vml_data_id = '%s,%d' % (vml_data_id, start_data_id + i + 1)
self.vml_data_id = vml_data_id
self.vml_shape_id = vml_shape_id
return count
def _prepare_header_vml_objects(self, vml_header_id, vml_drawing_id):
# Set up external linkage for VML header/footer images.
self.vml_header_id = vml_header_id
self.external_vml_links.append(['/vmlDrawing',
'../drawings/vmlDrawing'
+ str(vml_drawing_id) + '.vml'])
def _prepare_tables(self, table_id, seen):
# Set the table ids for the worksheet tables.
for table in self.tables:
table['id'] = table_id
if table.get('name') is None:
# Set a default name.
table['name'] = 'Table' + str(table_id)
# Check for duplicate table names.
name = table['name'].lower()
if name in seen:
raise DuplicateTableName(
"Duplicate name '%s' used in worksheet.add_table()." %
table['name'])
else:
seen[name] = True
# Store the link used for the rels file.
self.external_table_links.append(['/table',
'../tables/table'
+ str(table_id)
+ '.xml'])
table_id += 1
def _table_function_to_formula(self, function, col_name):
# Convert a table total function to a worksheet formula.
formula = ''
# Escape special characters, as required by Excel.
col_name = re.sub(r"'", "''", col_name)
col_name = re.sub(r"#", "'#", col_name)
col_name = re.sub(r"]", "']", col_name)
col_name = re.sub(r"\[", "'[", col_name)
subtotals = {
'average': 101,
'countNums': 102,
'count': 103,
'max': 104,
'min': 105,
'stdDev': 107,
'sum': 109,
'var': 110,
}
if function in subtotals:
func_num = subtotals[function]
formula = "SUBTOTAL(%s,[%s])" % (func_num, col_name)
else:
warn("Unsupported function '%s' in add_table()" % function)
return formula
def _set_spark_color(self, sparkline, options, user_color):
# Set the sparkline color.
if user_color not in options:
return
sparkline[user_color] = {'rgb': xl_color(options[user_color])}
def _get_range_data(self, row_start, col_start, row_end, col_end):
# Returns a range of data from the worksheet _table to be used in
# chart cached data. Strings are returned as SST ids and decoded
# in the workbook. Return None for data that doesn't exist since
# Excel can chart series with data missing.
if self.constant_memory:
return ()
data = []
# Iterate through the table data.
for row_num in range(row_start, row_end + 1):
# Store None if row doesn't exist.
if row_num not in self.table:
data.append(None)
continue
for col_num in range(col_start, col_end + 1):
if col_num in self.table[row_num]:
cell = self.table[row_num][col_num]
type_cell_name = type(cell).__name__
if type_cell_name == 'Number':
# Return a number with Excel's precision.
data.append("%.16g" % cell.number)
elif type_cell_name == 'String':
# Return a string from it's shared string index.
index = cell.string
string = self.str_table._get_shared_string(index)
data.append(string)
elif (type_cell_name == 'Formula'
or type_cell_name == 'ArrayFormula'):
# Return the formula value.
value = cell.value
if value is None:
value = 0
data.append(value)
elif type_cell_name == 'Blank':
# Return a empty cell.
data.append('')
else:
# Store None if column doesn't exist.
data.append(None)
return data
def _csv_join(self, *items):
# Create a csv string for use with data validation formulas and lists.
# Convert non string types to string.
items = [str(item) if not isinstance(item, str_types) else item
for item in items]
return ','.join(items)
def _escape_url(self, url):
# Don't escape URL if it looks already escaped.
if re.search('%[0-9a-fA-F]{2}', url):
return url
# Can't use url.quote() here because it doesn't match Excel.
url = url.replace('%', '%25')
url = url.replace('"', '%22')
url = url.replace(' ', '%20')
url = url.replace('<', '%3c')
url = url.replace('>', '%3e')
url = url.replace('[', '%5b')
url = url.replace(']', '%5d')
url = url.replace('^', '%5e')
url = url.replace('`', '%60')
url = url.replace('{', '%7b')
url = url.replace('}', '%7d')
return url
def _get_drawing_rel_index(self, target=None):
# Get the index used to address a drawing rel link.
if target is None:
self.drawing_rels_id += 1
return self.drawing_rels_id
elif self.drawing_rels.get(target):
return self.drawing_rels[target]
else:
self.drawing_rels_id += 1
self.drawing_rels[target] = self.drawing_rels_id
return self.drawing_rels_id
###########################################################################
#
# The following font methods are, more or less, duplicated from the
# Styles class. Not the cleanest version of reuse but works for now.
#
###########################################################################
def _write_font(self, xf_format):
# Write the <font> element.
xml_writer = self.rstring
xml_writer._xml_start_tag('rPr')
# Handle the main font properties.
if xf_format.bold:
xml_writer._xml_empty_tag('b')
if xf_format.italic:
xml_writer._xml_empty_tag('i')
if xf_format.font_strikeout:
xml_writer._xml_empty_tag('strike')
if xf_format.font_outline:
xml_writer._xml_empty_tag('outline')
if xf_format.font_shadow:
xml_writer._xml_empty_tag('shadow')
# Handle the underline variants.
if xf_format.underline:
self._write_underline(xf_format.underline)
# Handle super/subscript.
if xf_format.font_script == 1:
self._write_vert_align('superscript')
if xf_format.font_script == 2:
self._write_vert_align('subscript')
# Write the font size
xml_writer._xml_empty_tag('sz', [('val', xf_format.font_size)])
# Handle colors.
if xf_format.theme == -1:
# Ignore for excel2003_style.
pass
elif xf_format.theme:
self._write_color('theme', xf_format.theme)
elif xf_format.color_indexed:
self._write_color('indexed', xf_format.color_indexed)
elif xf_format.font_color:
color = self._get_palette_color(xf_format.font_color)
self._write_rstring_color('rgb', color)
else:
self._write_rstring_color('theme', 1)
# Write some other font properties related to font families.
xml_writer._xml_empty_tag('rFont', [('val', xf_format.font_name)])
xml_writer._xml_empty_tag('family', [('val', xf_format.font_family)])
if xf_format.font_name == 'Calibri' and not xf_format.hyperlink:
xml_writer._xml_empty_tag('scheme',
[('val', xf_format.font_scheme)])
xml_writer._xml_end_tag('rPr')
def _write_underline(self, underline):
# Write the underline font element.
attributes = []
# Handle the underline variants.
if underline == 2:
attributes = [('val', 'double')]
elif underline == 33:
attributes = [('val', 'singleAccounting')]
elif underline == 34:
attributes = [('val', 'doubleAccounting')]
self.rstring._xml_empty_tag('u', attributes)
def _write_vert_align(self, val):
# Write the <vertAlign> font sub-element.
attributes = [('val', val)]
self.rstring._xml_empty_tag('vertAlign', attributes)
def _write_rstring_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self.rstring._xml_empty_tag('color', attributes)
def _get_palette_color(self, color):
# Convert the RGB color.
if color[0] == '#':
color = color[1:]
return "FF" + color.upper()
def _opt_close(self):
# Close the row data filehandle in constant_memory mode.
if not self.row_data_fh_closed:
self.row_data_fh.close()
self.row_data_fh_closed = True
def _opt_reopen(self):
# Reopen the row data filehandle in constant_memory mode.
if self.row_data_fh_closed:
filename = self.row_data_filename
self.row_data_fh = codecs.open(filename, 'a+', 'utf-8')
self.row_data_fh_closed = False
self.fh = self.row_data_fh
def _set_icon_props(self, total_icons, user_props=None):
# Set the sub-properties for icons.
props = []
# Set the defaults.
for _ in range(total_icons):
props.append({'criteria': False,
'value': 0,
'type': 'percent'})
# Set the default icon values based on the number of icons.
if total_icons == 3:
props[0]['value'] = 67
props[1]['value'] = 33
if total_icons == 4:
props[0]['value'] = 75
props[1]['value'] = 50
props[2]['value'] = 25
if total_icons == 5:
props[0]['value'] = 80
props[1]['value'] = 60
props[2]['value'] = 40
props[3]['value'] = 20
# Overwrite default properties with user defined properties.
if user_props:
# Ensure we don't set user properties for lowest icon.
max_data = len(user_props)
if max_data >= total_icons:
max_data = total_icons - 1
for i in range(max_data):
# Set the user defined 'value' property.
if user_props[i].get('value') is not None:
props[i]['value'] = user_props[i]['value']
# Remove the formula '=' sign if it exists.
tmp = props[i]['value']
if isinstance(tmp, str_types) and tmp.startswith('='):
props[i]['value'] = tmp.lstrip('=')
# Set the user defined 'type' property.
if user_props[i].get('type'):
valid_types = ('percent',
'percentile',
'number',
'formula')
if user_props[i]['type'] not in valid_types:
warn("Unknown icon property type '%s' for sub-"
"property 'type' in conditional_format()" %
user_props[i]['type'])
else:
props[i]['type'] = user_props[i]['type']
if props[i]['type'] == 'number':
props[i]['type'] = 'num'
# Set the user defined 'criteria' property.
criteria = user_props[i].get('criteria')
if criteria and criteria == '>':
props[i]['criteria'] = True
return props
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_worksheet(self):
# Write the <worksheet> element. This is the root element.
schema = 'http://schemas.openxmlformats.org/'
xmlns = schema + 'spreadsheetml/2006/main'
xmlns_r = schema + 'officeDocument/2006/relationships'
xmlns_mc = schema + 'markup-compatibility/2006'
ms_schema = 'http://schemas.microsoft.com/'
xmlns_x14ac = ms_schema + 'office/spreadsheetml/2009/9/ac'
attributes = [
('xmlns', xmlns),
('xmlns:r', xmlns_r)]
# Add some extra attributes for Excel 2010. Mainly for sparklines.
if self.excel_version == 2010:
attributes.append(('xmlns:mc', xmlns_mc))
attributes.append(('xmlns:x14ac', xmlns_x14ac))
attributes.append(('mc:Ignorable', 'x14ac'))
self._xml_start_tag('worksheet', attributes)
def _write_dimension(self):
# Write the <dimension> element. This specifies the range of
# cells in the worksheet. As a special case, empty
# spreadsheets use 'A1' as a range.
if self.dim_rowmin is None and self.dim_colmin is None:
# If the min dimensions are not defined then no dimensions
# have been set and we use the default 'A1'.
ref = 'A1'
elif self.dim_rowmin is None and self.dim_colmin is not None:
# If the row dimensions aren't set but the column
# dimensions are set then they have been changed via
# set_column().
if self.dim_colmin == self.dim_colmax:
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(0, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(0, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(0, self.dim_colmax)
ref = cell_1 + ':' + cell_2
elif (self.dim_rowmin == self.dim_rowmax and
self.dim_colmin == self.dim_colmax):
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(self.dim_rowmax, self.dim_colmax)
ref = cell_1 + ':' + cell_2
self._xml_empty_tag('dimension', [('ref', ref)])
def _write_sheet_views(self):
# Write the <sheetViews> element.
self._xml_start_tag('sheetViews')
# Write the sheetView element.
self._write_sheet_view()
self._xml_end_tag('sheetViews')
def _write_sheet_view(self):
# Write the <sheetViews> element.
attributes = []
# Hide screen gridlines if required.
if not self.screen_gridlines:
attributes.append(('showGridLines', 0))
# Hide screen row/column headers.
if self.row_col_headers:
attributes.append(('showRowColHeaders', 0))
# Hide zeroes in cells.
if not self.show_zeros:
attributes.append(('showZeros', 0))
# Display worksheet right to left for Hebrew, Arabic and others.
if self.is_right_to_left:
attributes.append(('rightToLeft', 1))
# Show that the sheet tab is selected.
if self.selected:
attributes.append(('tabSelected', 1))
# Turn outlines off. Also required in the outlinePr element.
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
# Set the page view/layout mode if required.
if self.page_view:
attributes.append(('view', 'pageLayout'))
# Set the zoom level.
if self.zoom != 100:
if not self.page_view:
attributes.append(('zoomScale', self.zoom))
if self.zoom_scale_normal:
attributes.append(('zoomScaleNormal', self.zoom))
attributes.append(('workbookViewId', 0))
if self.panes or len(self.selections):
self._xml_start_tag('sheetView', attributes)
self._write_panes()
self._write_selections()
self._xml_end_tag('sheetView')
else:
self._xml_empty_tag('sheetView', attributes)
def _write_sheet_format_pr(self):
# Write the <sheetFormatPr> element.
default_row_height = self.default_row_height
row_level = self.outline_row_level
col_level = self.outline_col_level
attributes = [('defaultRowHeight', default_row_height)]
if self.default_row_height != self.original_row_height:
attributes.append(('customHeight', 1))
if self.default_row_zeroed:
attributes.append(('zeroHeight', 1))
if row_level:
attributes.append(('outlineLevelRow', row_level))
if col_level:
attributes.append(('outlineLevelCol', col_level))
if self.excel_version == 2010:
attributes.append(('x14ac:dyDescent', '0.25'))
self._xml_empty_tag('sheetFormatPr', attributes)
def _write_cols(self):
# Write the <cols> element and <col> sub elements.
# Exit unless some column have been formatted.
if not self.colinfo:
return
self._xml_start_tag('cols')
for col in sorted(self.colinfo.keys()):
self._write_col_info(self.colinfo[col])
self._xml_end_tag('cols')
def _write_col_info(self, col_info):
# Write the <col> element.
(col_min, col_max, width, cell_format,
hidden, level, collapsed) = col_info
custom_width = 1
xf_index = 0
# Get the cell_format index.
if cell_format:
xf_index = cell_format._get_xf_index()
# Set the Excel default column width.
if width is None:
if not hidden:
width = 8.43
custom_width = 0
else:
width = 0
elif width == 8.43:
# Width is defined but same as default.
custom_width = 0
# Convert column width from user units to character width.
if width > 0:
# For Calabri 11.
max_digit_width = 7
padding = 5
if width < 1:
width = int((int(width * (max_digit_width + padding) + 0.5))
/ float(max_digit_width) * 256.0) / 256.0
else:
width = int((int(width * max_digit_width + 0.5) + padding)
/ float(max_digit_width) * 256.0) / 256.0
attributes = [
('min', col_min + 1),
('max', col_max + 1),
('width', "%.16g" % width)]
if xf_index:
attributes.append(('style', xf_index))
if hidden:
attributes.append(('hidden', '1'))
if custom_width:
attributes.append(('customWidth', '1'))
if level:
attributes.append(('outlineLevel', level))
if collapsed:
attributes.append(('collapsed', '1'))
self._xml_empty_tag('col', attributes)
def _write_sheet_data(self):
# Write the <sheetData> element.
if self.dim_rowmin is None:
# If the dimensions aren't defined there is no data to write.
self._xml_empty_tag('sheetData')
else:
self._xml_start_tag('sheetData')
self._write_rows()
self._xml_end_tag('sheetData')
def _write_optimized_sheet_data(self):
# Write the <sheetData> element when constant_memory is on. In this
# case we read the data stored in the temp file and rewrite it to the
# XML sheet file.
if self.dim_rowmin is None:
# If the dimensions aren't defined then there is no data to write.
self._xml_empty_tag('sheetData')
else:
self._xml_start_tag('sheetData')
# Rewind the filehandle that was used for temp row data.
buff_size = 65536
self.row_data_fh.seek(0)
data = self.row_data_fh.read(buff_size)
while data:
self.fh.write(data)
data = self.row_data_fh.read(buff_size)
self.row_data_fh.close()
os.unlink(self.row_data_filename)
self._xml_end_tag('sheetData')
def _write_page_margins(self):
# Write the <pageMargins> element.
attributes = [
('left', self.margin_left),
('right', self.margin_right),
('top', self.margin_top),
('bottom', self.margin_bottom),
('header', self.margin_header),
('footer', self.margin_footer)]
self._xml_empty_tag('pageMargins', attributes)
def _write_page_setup(self):
# Write the <pageSetup> element.
#
# The following is an example taken from Excel.
#
# <pageSetup
# paperSize="9"
# scale="110"
# fitToWidth="2"
# fitToHeight="2"
# pageOrder="overThenDown"
# orientation="portrait"
# blackAndWhite="1"
# draft="1"
# horizontalDpi="200"
# verticalDpi="200"
# r:id="rId1"
# />
#
attributes = []
# Skip this element if no page setup has changed.
if not self.page_setup_changed:
return
# Set paper size.
if self.paper_size:
attributes.append(('paperSize', self.paper_size))
# Set the print_scale.
if self.print_scale != 100:
attributes.append(('scale', self.print_scale))
# Set the "Fit to page" properties.
if self.fit_page and self.fit_width != 1:
attributes.append(('fitToWidth', self.fit_width))
if self.fit_page and self.fit_height != 1:
attributes.append(('fitToHeight', self.fit_height))
# Set the page print direction.
if self.page_order:
attributes.append(('pageOrder', "overThenDown"))
# Set start page for printing.
if self.page_start > 1:
attributes.append(('firstPageNumber', self.page_start))
# Set page orientation.
if self.orientation:
attributes.append(('orientation', 'portrait'))
else:
attributes.append(('orientation', 'landscape'))
# Set start page for printing.
if self.page_start != 0:
attributes.append(('useFirstPageNumber', '1'))
# Set the DPI. Mainly only for testing.
if self.is_chartsheet:
if self.horizontal_dpi:
attributes.append(('horizontalDpi', self.horizontal_dpi))
if self.vertical_dpi:
attributes.append(('verticalDpi', self.vertical_dpi))
else:
if self.vertical_dpi:
attributes.append(('verticalDpi', self.vertical_dpi))
if self.horizontal_dpi:
attributes.append(('horizontalDpi', self.horizontal_dpi))
self._xml_empty_tag('pageSetup', attributes)
def _write_print_options(self):
# Write the <printOptions> element.
attributes = []
if not self.print_options_changed:
return
# Set horizontal centering.
if self.hcenter:
attributes.append(('horizontalCentered', 1))
# Set vertical centering.
if self.vcenter:
attributes.append(('verticalCentered', 1))
# Enable row and column headers.
if self.print_headers:
attributes.append(('headings', 1))
# Set printed gridlines.
if self.print_gridlines:
attributes.append(('gridLines', 1))
self._xml_empty_tag('printOptions', attributes)
def _write_header_footer(self):
# Write the <headerFooter> element.
attributes = []
if not self.header_footer_scales:
attributes.append(('scaleWithDoc', 0))
if not self.header_footer_aligns:
attributes.append(('alignWithMargins', 0))
if self.header_footer_changed:
self._xml_start_tag('headerFooter', attributes)
if self.header:
self._write_odd_header()
if self.footer:
self._write_odd_footer()
self._xml_end_tag('headerFooter')
elif self.excel2003_style:
self._xml_empty_tag('headerFooter', attributes)
def _write_odd_header(self):
# Write the <headerFooter> element.
self._xml_data_element('oddHeader', self.header)
def _write_odd_footer(self):
# Write the <headerFooter> element.
self._xml_data_element('oddFooter', self.footer)
def _write_rows(self):
# Write out the worksheet data as a series of rows and cells.
self._calculate_spans()
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if (row_num in self.set_rows or row_num in self.comments
or self.table[row_num]):
# Only process rows with formatting, cell data and/or comments.
span_index = int(row_num / 16)
if span_index in self.row_spans:
span = self.row_spans[span_index]
else:
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.set_rows:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.set_rows[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag('row')
elif row_num in self.comments:
# Row with comments in cells.
self._write_empty_row(row_num, span,
self.set_rows[row_num])
else:
# Blank row with attributes only.
self._write_empty_row(row_num, span,
self.set_rows[row_num])
def _write_single_row(self, current_row_num=0):
# Write out the worksheet data as a single row with cells.
# This method is used when constant_memory is on. A single
# row is written and the data table is reset. That way only
# one row of data is kept in memory at any one time. We don't
# write span data in the optimized case since it is optional.
# Set the new previous row as the current row.
row_num = self.previous_row
self.previous_row = current_row_num
if (row_num in self.set_rows or row_num in self.comments
or self.table[row_num]):
# Only process rows with formatting, cell data and/or comments.
# No span data in optimized mode.
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.set_rows:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.set_rows[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag('row')
else:
# Row attributes or comments only.
self._write_empty_row(row_num, span, self.set_rows[row_num])
# Reset table.
self.table.clear()
def _calculate_spans(self):
# Calculate the "spans" attribute of the <row> tag. This is an
# XLSX optimization and isn't strictly required. However, it
# makes comparing files easier. The span is the same for each
# block of 16 rows.
spans = {}
span_min = None
span_max = None
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if row_num in self.table:
# Calculate spans for cell data.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
if span_min is None:
span_min = col_num
span_max = col_num
else:
if col_num < span_min:
span_min = col_num
if col_num > span_max:
span_max = col_num
if row_num in self.comments:
# Calculate spans for comments.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if (row_num in self.comments
and col_num in self.comments[row_num]):
if span_min is None:
span_min = col_num
span_max = col_num
else:
if col_num < span_min:
span_min = col_num
if col_num > span_max:
span_max = col_num
if ((row_num + 1) % 16 == 0) or row_num == self.dim_rowmax:
span_index = int(row_num / 16)
if span_min is not None:
span_min += 1
span_max += 1
spans[span_index] = "%s:%s" % (span_min, span_max)
span_min = None
self.row_spans = spans
def _write_row(self, row, spans, properties=None, empty_row=False):
# Write the <row> element.
xf_index = 0
if properties:
height, cell_format, hidden, level, collapsed = properties
else:
height, cell_format, hidden, level, collapsed = None, None, 0, 0, 0
if height is None:
height = self.default_row_height
attributes = [('r', row + 1)]
# Get the cell_format index.
if cell_format:
xf_index = cell_format._get_xf_index()
# Add row attributes where applicable.
if spans:
attributes.append(('spans', spans))
if xf_index:
attributes.append(('s', xf_index))
if cell_format:
attributes.append(('customFormat', 1))
if height != self.original_row_height:
attributes.append(('ht', height))
if hidden:
attributes.append(('hidden', 1))
if height != self.original_row_height:
attributes.append(('customHeight', 1))
if level:
attributes.append(('outlineLevel', level))
if collapsed:
attributes.append(('collapsed', 1))
if self.excel_version == 2010:
attributes.append(('x14ac:dyDescent', '0.25'))
if empty_row:
self._xml_empty_tag_unencoded('row', attributes)
else:
self._xml_start_tag_unencoded('row', attributes)
def _write_empty_row(self, row, spans, properties=None):
# Write and empty <row> element.
self._write_row(row, spans, properties, empty_row=True)
def _write_cell(self, row, col, cell):
# Write the <cell> element.
# Note. This is the innermost loop so efficiency is important.
cell_range = xl_rowcol_to_cell_fast(row, col)
attributes = [('r', cell_range)]
if cell.format:
# Add the cell format index.
xf_index = cell.format._get_xf_index()
attributes.append(('s', xf_index))
elif row in self.set_rows and self.set_rows[row][1]:
# Add the row format.
row_xf = self.set_rows[row][1]
attributes.append(('s', row_xf._get_xf_index()))
elif col in self.col_formats:
# Add the column format.
col_xf = self.col_formats[col]
attributes.append(('s', col_xf._get_xf_index()))
type_cell_name = type(cell).__name__
# Write the various cell types.
if type_cell_name == 'Number':
# Write a number.
self._xml_number_element(cell.number, attributes)
elif type_cell_name == 'String':
# Write a string.
string = cell.string
if not self.constant_memory:
# Write a shared string.
self._xml_string_element(string, attributes)
else:
# Write an optimized in-line string.
# Escape control characters. See SharedString.pm for details.
string = re.sub('(_x[0-9a-fA-F]{4}_)', r'_x005F\1', string)
string = re.sub(r'([\x00-\x08\x0B-\x1F])',
lambda match: "_x%04X_" %
ord(match.group(1)), string)
# Escape non characters.
if sys.version_info[0] == 2:
non_char1 = unichr(0xFFFE)
non_char2 = unichr(0xFFFF)
else:
non_char1 = "\uFFFE"
non_char2 = "\uFFFF"
string = re.sub(non_char1, '_xFFFE_', string)
string = re.sub(non_char2, '_xFFFF_', string)
# Write any rich strings without further tags.
if re.search('^<r>', string) and re.search('</r>$', string):
self._xml_rich_inline_string(string, attributes)
else:
# Add attribute to preserve leading or trailing whitespace.
preserve = 0
if re.search(r'^\s', string) or re.search(r'\s$', string):
preserve = 1
self._xml_inline_string(string, preserve, attributes)
elif type_cell_name == 'Formula':
# Write a formula. First check the formula value type.
value = cell.value
if type(cell.value) == bool:
attributes.append(('t', 'b'))
if cell.value:
value = 1
else:
value = 0
elif isinstance(cell.value, str_types):
error_codes = ('#DIV/0!', '#N/A', '#NAME?', '#NULL!',
'#NUM!', '#REF!', '#VALUE!')
if cell.value in error_codes:
attributes.append(('t', 'e'))
else:
attributes.append(('t', 'str'))
self._xml_formula_element(cell.formula, value, attributes)
elif type_cell_name == 'ArrayFormula':
# Write a array formula.
# First check if the formula value is a string.
try:
float(cell.value)
except ValueError:
attributes.append(('t', 'str'))
# Write an array formula.
self._xml_start_tag('c', attributes)
self._write_cell_array_formula(cell.formula, cell.range)
self._write_cell_value(cell.value)
self._xml_end_tag('c')
elif type_cell_name == 'Blank':
# Write a empty cell.
self._xml_empty_tag('c', attributes)
elif type_cell_name == 'Boolean':
# Write a boolean cell.
attributes.append(('t', 'b'))
self._xml_start_tag('c', attributes)
self._write_cell_value(cell.boolean)
self._xml_end_tag('c')
def _write_cell_value(self, value):
# Write the cell value <v> element.
if value is None:
value = ''
self._xml_data_element('v', value)
def _write_cell_array_formula(self, formula, cell_range):
# Write the cell array formula <f> element.
attributes = [
('t', 'array'),
('ref', cell_range)
]
self._xml_data_element('f', formula, attributes)
def _write_sheet_pr(self):
# Write the <sheetPr> element for Sheet level properties.
attributes = []
if (not self.fit_page
and not self.filter_on
and not self.tab_color
and not self.outline_changed
and not self.vba_codename):
return
if self.vba_codename:
attributes.append(('codeName', self.vba_codename))
if self.filter_on:
attributes.append(('filterMode', 1))
if (self.fit_page
or self.tab_color
or self.outline_changed):
self._xml_start_tag('sheetPr', attributes)
self._write_tab_color()
self._write_outline_pr()
self._write_page_set_up_pr()
self._xml_end_tag('sheetPr')
else:
self._xml_empty_tag('sheetPr', attributes)
def _write_page_set_up_pr(self):
# Write the <pageSetUpPr> element.
if not self.fit_page:
return
attributes = [('fitToPage', 1)]
self._xml_empty_tag('pageSetUpPr', attributes)
def _write_tab_color(self):
# Write the <tabColor> element.
color = self.tab_color
if not color:
return
attributes = [('rgb', color)]
self._xml_empty_tag('tabColor', attributes)
def _write_outline_pr(self):
# Write the <outlinePr> element.
attributes = []
if not self.outline_changed:
return
if self.outline_style:
attributes.append(("applyStyles", 1))
if not self.outline_below:
attributes.append(("summaryBelow", 0))
if not self.outline_right:
attributes.append(("summaryRight", 0))
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
self._xml_empty_tag('outlinePr', attributes)
def _write_row_breaks(self):
# Write the <rowBreaks> element.
page_breaks = self._sort_pagebreaks(self.hbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
('count', count),
('manualBreakCount', count),
]
self._xml_start_tag('rowBreaks', attributes)
for row_num in page_breaks:
self._write_brk(row_num, 16383)
self._xml_end_tag('rowBreaks')
def _write_col_breaks(self):
# Write the <colBreaks> element.
page_breaks = self._sort_pagebreaks(self.vbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
('count', count),
('manualBreakCount', count),
]
self._xml_start_tag('colBreaks', attributes)
for col_num in page_breaks:
self._write_brk(col_num, 1048575)
self._xml_end_tag('colBreaks')
def _write_brk(self, brk_id, brk_max):
# Write the <brk> element.
attributes = [
('id', brk_id),
('max', brk_max),
('man', 1)]
self._xml_empty_tag('brk', attributes)
def _write_merge_cells(self):
# Write the <mergeCells> element.
merged_cells = self.merge
count = len(merged_cells)
if not count:
return
attributes = [('count', count)]
self._xml_start_tag('mergeCells', attributes)
for merged_range in merged_cells:
# Write the mergeCell element.
self._write_merge_cell(merged_range)
self._xml_end_tag('mergeCells')
def _write_merge_cell(self, merged_range):
# Write the <mergeCell> element.
(row_min, col_min, row_max, col_max) = merged_range
# Convert the merge dimensions to a cell range.
cell_1 = xl_rowcol_to_cell(row_min, col_min)
cell_2 = xl_rowcol_to_cell(row_max, col_max)
ref = cell_1 + ':' + cell_2
attributes = [('ref', ref)]
self._xml_empty_tag('mergeCell', attributes)
def _write_hyperlinks(self):
# Process any stored hyperlinks in row/col order and write the
# <hyperlinks> element. The attributes are different for internal
# and external links.
hlink_refs = []
display = None
# Sort the hyperlinks into row order.
row_nums = sorted(self.hyperlinks.keys())
# Exit if there are no hyperlinks to process.
if not row_nums:
return
# Iterate over the rows.
for row_num in row_nums:
# Sort the hyperlinks into column order.
col_nums = sorted(self.hyperlinks[row_num].keys())
# Iterate over the columns.
for col_num in col_nums:
# Get the link data for this cell.
link = self.hyperlinks[row_num][col_num]
link_type = link['link_type']
# If the cell isn't a string then we have to add the url as
# the string to display.
if (self.table
and self.table[row_num]
and self.table[row_num][col_num]):
cell = self.table[row_num][col_num]
if type(cell).__name__ != 'String':
display = link['url']
if link_type == 1:
# External link with rel file relationship.
self.rel_count += 1
hlink_refs.append([link_type,
row_num,
col_num,
self.rel_count,
link['str'],
display,
link['tip']])
# Links for use by the packager.
self.external_hyper_links.append(['/hyperlink',
link['url'], 'External'])
else:
# Internal link with rel file relationship.
hlink_refs.append([link_type,
row_num,
col_num,
link['url'],
link['str'],
link['tip']])
# Write the hyperlink elements.
self._xml_start_tag('hyperlinks')
for args in hlink_refs:
link_type = args.pop(0)
if link_type == 1:
self._write_hyperlink_external(*args)
elif link_type == 2:
self._write_hyperlink_internal(*args)
self._xml_end_tag('hyperlinks')
def _write_hyperlink_external(self, row, col, id_num, location=None,
display=None, tooltip=None):
# Write the <hyperlink> element for external links.
ref = xl_rowcol_to_cell(row, col)
r_id = 'rId' + str(id_num)
attributes = [
('ref', ref),
('r:id', r_id)]
if location is not None:
attributes.append(('location', location))
if display is not None:
attributes.append(('display', display))
if tooltip is not None:
attributes.append(('tooltip', tooltip))
self._xml_empty_tag('hyperlink', attributes)
def _write_hyperlink_internal(self, row, col, location=None, display=None,
tooltip=None):
# Write the <hyperlink> element for internal links.
ref = xl_rowcol_to_cell(row, col)
attributes = [
('ref', ref),
('location', location)]
if tooltip is not None:
attributes.append(('tooltip', tooltip))
attributes.append(('display', display))
self._xml_empty_tag('hyperlink', attributes)
def _write_auto_filter(self):
# Write the <autoFilter> element.
if not self.autofilter_ref:
return
attributes = [('ref', self.autofilter_ref)]
if self.filter_on:
# Autofilter defined active filters.
self._xml_start_tag('autoFilter', attributes)
self._write_autofilters()
self._xml_end_tag('autoFilter')
else:
# Autofilter defined without active filters.
self._xml_empty_tag('autoFilter', attributes)
def _write_autofilters(self):
# Function to iterate through the columns that form part of an
# autofilter range and write the appropriate filters.
(col1, col2) = self.filter_range
for col in range(col1, col2 + 1):
# Skip if column doesn't have an active filter.
if col not in self.filter_cols:
continue
# Retrieve the filter tokens and write the autofilter records.
tokens = self.filter_cols[col]
filter_type = self.filter_type[col]
# Filters are relative to first column in the autofilter.
self._write_filter_column(col - col1, filter_type, tokens)
def _write_filter_column(self, col_id, filter_type, filters):
# Write the <filterColumn> element.
attributes = [('colId', col_id)]
self._xml_start_tag('filterColumn', attributes)
if filter_type == 1:
# Type == 1 is the new XLSX style filter.
self._write_filters(filters)
else:
# Type == 0 is the classic "custom" filter.
self._write_custom_filters(filters)
self._xml_end_tag('filterColumn')
def _write_filters(self, filters):
# Write the <filters> element.
non_blanks = [filter for filter in filters
if str(filter).lower() != 'blanks']
attributes = []
if len(filters) != len(non_blanks):
attributes = [('blank', 1)]
if len(filters) == 1 and len(non_blanks) == 0:
# Special case for blank cells only.
self._xml_empty_tag('filters', attributes)
else:
# General case.
self._xml_start_tag('filters', attributes)
for autofilter in sorted(non_blanks):
self._write_filter(autofilter)
self._xml_end_tag('filters')
def _write_filter(self, val):
# Write the <filter> element.
attributes = [('val', val)]
self._xml_empty_tag('filter', attributes)
def _write_custom_filters(self, tokens):
# Write the <customFilters> element.
if len(tokens) == 2:
# One filter expression only.
self._xml_start_tag('customFilters')
self._write_custom_filter(*tokens)
self._xml_end_tag('customFilters')
else:
# Two filter expressions.
attributes = []
# Check if the "join" operand is "and" or "or".
if tokens[2] == 0:
attributes = [('and', 1)]
else:
attributes = [('and', 0)]
# Write the two custom filters.
self._xml_start_tag('customFilters', attributes)
self._write_custom_filter(tokens[0], tokens[1])
self._write_custom_filter(tokens[3], tokens[4])
self._xml_end_tag('customFilters')
def _write_custom_filter(self, operator, val):
# Write the <customFilter> element.
attributes = []
operators = {
1: 'lessThan',
2: 'equal',
3: 'lessThanOrEqual',
4: 'greaterThan',
5: 'notEqual',
6: 'greaterThanOrEqual',
22: 'equal',
}
# Convert the operator from a number to a descriptive string.
if operators[operator] is not None:
operator = operators[operator]
else:
warn("Unknown operator = %s" % operator)
# The 'equal' operator is the default attribute and isn't stored.
if not operator == 'equal':
attributes.append(('operator', operator))
attributes.append(('val', val))
self._xml_empty_tag('customFilter', attributes)
def _write_sheet_protection(self):
# Write the <sheetProtection> element.
attributes = []
if not self.protect_options:
return
options = self.protect_options
if options['password']:
attributes.append(('password', options['password']))
if options['sheet']:
attributes.append(('sheet', 1))
if options['content']:
attributes.append(('content', 1))
if not options['objects']:
attributes.append(('objects', 1))
if not options['scenarios']:
attributes.append(('scenarios', 1))
if options['format_cells']:
attributes.append(('formatCells', 0))
if options['format_columns']:
attributes.append(('formatColumns', 0))
if options['format_rows']:
attributes.append(('formatRows', 0))
if options['insert_columns']:
attributes.append(('insertColumns', 0))
if options['insert_rows']:
attributes.append(('insertRows', 0))
if options['insert_hyperlinks']:
attributes.append(('insertHyperlinks', 0))
if options['delete_columns']:
attributes.append(('deleteColumns', 0))
if options['delete_rows']:
attributes.append(('deleteRows', 0))
if not options['select_locked_cells']:
attributes.append(('selectLockedCells', 1))
if options['sort']:
attributes.append(('sort', 0))
if options['autofilter']:
attributes.append(('autoFilter', 0))
if options['pivot_tables']:
attributes.append(('pivotTables', 0))
if not options['select_unlocked_cells']:
attributes.append(('selectUnlockedCells', 1))
self._xml_empty_tag('sheetProtection', attributes)
def _write_drawings(self):
# Write the <drawing> elements.
if not self.drawing:
return
self.rel_count += 1
self._write_drawing(self.rel_count)
def _write_drawing(self, drawing_id):
# Write the <drawing> element.
r_id = 'rId' + str(drawing_id)
attributes = [('r:id', r_id)]
self._xml_empty_tag('drawing', attributes)
def _write_legacy_drawing(self):
# Write the <legacyDrawing> element.
if not self.has_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = 'rId' + str(self.rel_count)
attributes = [('r:id', r_id)]
self._xml_empty_tag('legacyDrawing', attributes)
def _write_legacy_drawing_hf(self):
# Write the <legacyDrawingHF> element.
if not self.has_header_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = 'rId' + str(self.rel_count)
attributes = [('r:id', r_id)]
self._xml_empty_tag('legacyDrawingHF', attributes)
def _write_data_validations(self):
# Write the <dataValidations> element.
validations = self.validations
count = len(validations)
if not count:
return
attributes = [('count', count)]
self._xml_start_tag('dataValidations', attributes)
for validation in validations:
# Write the dataValidation element.
self._write_data_validation(validation)
self._xml_end_tag('dataValidations')
def _write_data_validation(self, options):
# Write the <dataValidation> element.
sqref = ''
attributes = []
# Set the cell range(s) for the data validation.
for cells in options['cells']:
# Add a space between multiple cell ranges.
if sqref != '':
sqref += ' '
(row_first, col_first, row_last, col_last) = cells
# Swap last row/col for first row/col as necessary
if row_first > row_last:
(row_first, row_last) = (row_last, row_first)
if col_first > col_last:
(col_first, col_last) = (col_last, col_first)
# If the first and last cell are the same write a single cell.
if (row_first == row_last) and (col_first == col_last):
sqref += xl_rowcol_to_cell(row_first, col_first)
else:
sqref += xl_range(row_first, col_first, row_last, col_last)
if options['validate'] != 'none':
attributes.append(('type', options['validate']))
if options['criteria'] != 'between':
attributes.append(('operator', options['criteria']))
if 'error_type' in options:
if options['error_type'] == 1:
attributes.append(('errorStyle', 'warning'))
if options['error_type'] == 2:
attributes.append(('errorStyle', 'information'))
if options['ignore_blank']:
attributes.append(('allowBlank', 1))
if not options['dropdown']:
attributes.append(('showDropDown', 1))
if options['show_input']:
attributes.append(('showInputMessage', 1))
if options['show_error']:
attributes.append(('showErrorMessage', 1))
if 'error_title' in options:
attributes.append(('errorTitle', options['error_title']))
if 'error_message' in options:
attributes.append(('error', options['error_message']))
if 'input_title' in options:
attributes.append(('promptTitle', options['input_title']))
if 'input_message' in options:
attributes.append(('prompt', options['input_message']))
attributes.append(('sqref', sqref))
if options['validate'] == 'none':
self._xml_empty_tag('dataValidation', attributes)
else:
self._xml_start_tag('dataValidation', attributes)
# Write the formula1 element.
self._write_formula_1(options['value'])
# Write the formula2 element.
if options['maximum'] is not None:
self._write_formula_2(options['maximum'])
self._xml_end_tag('dataValidation')
def _write_formula_1(self, formula):
# Write the <formula1> element.
if type(formula) is list:
formula = self._csv_join(*formula)
formula = '"%s"' % formula
else:
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula1', formula)
def _write_formula_2(self, formula):
# Write the <formula2> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula2', formula)
def _write_conditional_formats(self):
# Write the Worksheet conditional formats.
ranges = sorted(self.cond_formats.keys())
if not ranges:
return
for cond_range in ranges:
self._write_conditional_formatting(cond_range,
self.cond_formats[cond_range])
def _write_conditional_formatting(self, cond_range, params):
# Write the <conditionalFormatting> element.
attributes = [('sqref', cond_range)]
self._xml_start_tag('conditionalFormatting', attributes)
for param in params:
# Write the cfRule element.
self._write_cf_rule(param)
self._xml_end_tag('conditionalFormatting')
def _write_cf_rule(self, params):
# Write the <cfRule> element.
attributes = [('type', params['type'])]
if 'format' in params and params['format'] is not None:
attributes.append(('dxfId', params['format']))
attributes.append(('priority', params['priority']))
if params.get('stop_if_true'):
attributes.append(('stopIfTrue', 1))
if params['type'] == 'cellIs':
attributes.append(('operator', params['criteria']))
self._xml_start_tag('cfRule', attributes)
if 'minimum' in params and 'maximum' in params:
self._write_formula_element(params['minimum'])
self._write_formula_element(params['maximum'])
else:
self._write_formula_element(params['value'])
self._xml_end_tag('cfRule')
elif params['type'] == 'aboveAverage':
if re.search('below', params['criteria']):
attributes.append(('aboveAverage', 0))
if re.search('equal', params['criteria']):
attributes.append(('equalAverage', 1))
if re.search('[123] std dev', params['criteria']):
match = re.search('([123]) std dev', params['criteria'])
attributes.append(('stdDev', match.group(1)))
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'top10':
if 'criteria' in params and params['criteria'] == '%':
attributes.append(('percent', 1))
if 'direction' in params:
attributes.append(('bottom', 1))
rank = params['value'] or 10
attributes.append(('rank', rank))
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'duplicateValues':
self._xml_empty_tag('cfRule', attributes)
elif params['type'] == 'uniqueValues':
self._xml_empty_tag('cfRule', attributes)
elif (params['type'] == 'containsText'
or params['type'] == 'notContainsText'
or params['type'] == 'beginsWith'
or params['type'] == 'endsWith'):
attributes.append(('operator', params['criteria']))
attributes.append(('text', params['value']))
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['formula'])
self._xml_end_tag('cfRule')
elif params['type'] == 'timePeriod':
attributes.append(('timePeriod', params['criteria']))
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['formula'])
self._xml_end_tag('cfRule')
elif (params['type'] == 'containsBlanks'
or params['type'] == 'notContainsBlanks'
or params['type'] == 'containsErrors'
or params['type'] == 'notContainsErrors'):
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['formula'])
self._xml_end_tag('cfRule')
elif params['type'] == 'colorScale':
self._xml_start_tag('cfRule', attributes)
self._write_color_scale(params)
self._xml_end_tag('cfRule')
elif params['type'] == 'dataBar':
self._xml_start_tag('cfRule', attributes)
self._write_data_bar(params)
if params.get('is_data_bar_2010'):
self._write_data_bar_ext(params)
self._xml_end_tag('cfRule')
elif params['type'] == 'expression':
self._xml_start_tag('cfRule', attributes)
self._write_formula_element(params['criteria'])
self._xml_end_tag('cfRule')
elif params['type'] == 'iconSet':
self._xml_start_tag('cfRule', attributes)
self._write_icon_set(params)
self._xml_end_tag('cfRule')
def _write_formula_element(self, formula):
# Write the <formula> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith('='):
formula = formula.lstrip('=')
self._xml_data_element('formula', formula)
def _write_color_scale(self, param):
# Write the <colorScale> element.
self._xml_start_tag('colorScale')
self._write_cfvo(param['min_type'], param['min_value'])
if param['mid_type'] is not None:
self._write_cfvo(param['mid_type'], param['mid_value'])
self._write_cfvo(param['max_type'], param['max_value'])
self._write_color('rgb', param['min_color'])
if param['mid_color'] is not None:
self._write_color('rgb', param['mid_color'])
self._write_color('rgb', param['max_color'])
self._xml_end_tag('colorScale')
def _write_data_bar(self, param):
# Write the <dataBar> element.
attributes = []
# Min and max bar lengths in in the spec but not supported directly by
# Excel.
if param.get('min_length'):
attributes.append(('minLength', param['min_length']))
if param.get('max_length'):
attributes.append(('maxLength', param['max_length']))
if param.get('bar_only'):
attributes.append(('showValue', 0))
self._xml_start_tag('dataBar', attributes)
self._write_cfvo(param['min_type'], param['min_value'])
self._write_cfvo(param['max_type'], param['max_value'])
self._write_color('rgb', param['bar_color'])
self._xml_end_tag('dataBar')
def _write_data_bar_ext(self, param):
# Write the <extLst> dataBar extension element.
# Create a pseudo GUID for each unique Excel 2010 data bar.
worksheet_count = self.index + 1
data_bar_count = len(self.data_bars_2010) + 1
guid = "{DA7ABA51-AAAA-BBBB-%04X-%012X}" % (worksheet_count,
data_bar_count)
# Store the 2010 data bar parameters to write the extLst elements.
param['guid'] = guid
self.data_bars_2010.append(param)
self._xml_start_tag('extLst')
self._write_ext('{B025F937-C7B1-47D3-B67F-A62EFF666E3E}')
self._xml_data_element('x14:id', guid)
self._xml_end_tag('ext')
self._xml_end_tag('extLst')
def _write_icon_set(self, param):
# Write the <iconSet> element.
attributes = []
# Don't set attribute for default style.
if param['icon_style'] != '3TrafficLights':
attributes = [('iconSet', param['icon_style'])]
if param.get('icons_only'):
attributes.append(('showValue', 0))
if param.get('reverse_icons'):
attributes.append(('reverse', 1))
self._xml_start_tag('iconSet', attributes)
# Write the properties for different icon styles.
for icon in reversed(param['icons']):
self._write_cfvo(
icon['type'],
icon['value'],
icon['criteria'])
self._xml_end_tag('iconSet')
def _write_cfvo(self, cf_type, val, criteria=None):
# Write the <cfvo> element.
attributes = [('type', cf_type)]
if val is not None:
attributes.append(('val', val))
if criteria:
attributes.append(('gte', 0))
self._xml_empty_tag('cfvo', attributes)
def _write_color(self, name, value):
# Write the <color> element.
attributes = [(name, value)]
self._xml_empty_tag('color', attributes)
def _write_selections(self):
# Write the <selection> elements.
for selection in self.selections:
self._write_selection(*selection)
def _write_selection(self, pane, active_cell, sqref):
# Write the <selection> element.
attributes = []
if pane:
attributes.append(('pane', pane))
if active_cell:
attributes.append(('activeCell', active_cell))
if sqref:
attributes.append(('sqref', sqref))
self._xml_empty_tag('selection', attributes)
def _write_panes(self):
# Write the frozen or split <pane> elements.
panes = self.panes
if not len(panes):
return
if panes[4] == 2:
self._write_split_panes(*panes)
else:
self._write_freeze_panes(*panes)
def _write_freeze_panes(self, row, col, top_row, left_col, pane_type):
# Write the <pane> element for freeze panes.
attributes = []
y_split = row
x_split = col
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
active_pane = ''
state = ''
active_cell = ''
sqref = ''
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
# Set the active pane.
if row and col:
active_pane = 'bottomRight'
row_cell = xl_rowcol_to_cell(row, 0)
col_cell = xl_rowcol_to_cell(0, col)
self.selections.append(['topRight', col_cell, col_cell])
self.selections.append(['bottomLeft', row_cell, row_cell])
self.selections.append(['bottomRight', active_cell, sqref])
elif col:
active_pane = 'topRight'
self.selections.append(['topRight', active_cell, sqref])
else:
active_pane = 'bottomLeft'
self.selections.append(['bottomLeft', active_cell, sqref])
# Set the pane type.
if pane_type == 0:
state = 'frozen'
elif pane_type == 1:
state = 'frozenSplit'
else:
state = 'split'
if x_split:
attributes.append(('xSplit', x_split))
if y_split:
attributes.append(('ySplit', y_split))
attributes.append(('topLeftCell', top_left_cell))
attributes.append(('activePane', active_pane))
attributes.append(('state', state))
self._xml_empty_tag('pane', attributes)
def _write_split_panes(self, row, col, top_row, left_col, pane_type):
# Write the <pane> element for split panes.
attributes = []
has_selection = 0
active_pane = ''
active_cell = ''
sqref = ''
y_split = row
x_split = col
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
has_selection = 1
# Convert the row and col to 1/20 twip units with padding.
if y_split:
y_split = int(20 * y_split + 300)
if x_split:
x_split = self._calculate_x_split_width(x_split)
# For non-explicit topLeft definitions, estimate the cell offset based
# on the pixels dimensions. This is only a workaround and doesn't take
# adjusted cell dimensions into account.
if top_row == row and left_col == col:
top_row = int(0.5 + (y_split - 300) / 20 / 15)
left_col = int(0.5 + (x_split - 390) / 20 / 3 * 4 / 64)
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
# If there is no selection set the active cell to the top left cell.
if not has_selection:
active_cell = top_left_cell
sqref = top_left_cell
# Set the Cell selections.
if row and col:
active_pane = 'bottomRight'
row_cell = xl_rowcol_to_cell(top_row, 0)
col_cell = xl_rowcol_to_cell(0, left_col)
self.selections.append(['topRight', col_cell, col_cell])
self.selections.append(['bottomLeft', row_cell, row_cell])
self.selections.append(['bottomRight', active_cell, sqref])
elif col:
active_pane = 'topRight'
self.selections.append(['topRight', active_cell, sqref])
else:
active_pane = 'bottomLeft'
self.selections.append(['bottomLeft', active_cell, sqref])
# Format splits to the same precision as Excel.
if x_split:
attributes.append(('xSplit', "%.16g" % x_split))
if y_split:
attributes.append(('ySplit', "%.16g" % y_split))
attributes.append(('topLeftCell', top_left_cell))
if has_selection:
attributes.append(('activePane', active_pane))
self._xml_empty_tag('pane', attributes)
def _calculate_x_split_width(self, width):
# Convert column width from user units to pane split width.
max_digit_width = 7 # For Calabri 11.
padding = 5
# Convert to pixels.
if width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
# Convert to points.
points = pixels * 3 / 4
# Convert to twips (twentieths of a point).
twips = points * 20
# Add offset/padding.
width = twips + 390
return width
def _write_table_parts(self):
# Write the <tableParts> element.
tables = self.tables
count = len(tables)
# Return if worksheet doesn't contain any tables.
if not count:
return
attributes = [('count', count,)]
self._xml_start_tag('tableParts', attributes)
for _ in tables:
# Write the tablePart element.
self.rel_count += 1
self._write_table_part(self.rel_count)
self._xml_end_tag('tableParts')
def _write_table_part(self, r_id):
# Write the <tablePart> element.
r_id = 'rId' + str(r_id)
attributes = [('r:id', r_id,)]
self._xml_empty_tag('tablePart', attributes)
def _write_ext_list(self):
# Write the <extLst> element for data bars and sparklines.
has_data_bars = len(self.data_bars_2010)
has_sparklines = len(self.sparklines)
if not has_data_bars and not has_sparklines:
return
# Write the extLst element.
self._xml_start_tag('extLst')
if has_data_bars:
self._write_ext_list_data_bars()
if has_sparklines:
self._write_ext_list_sparklines()
self._xml_end_tag('extLst')
def _write_ext_list_data_bars(self):
# Write the Excel 2010 data_bar subelements.
self._write_ext('{78C0D931-6437-407d-A8EE-F0AAD7539E65}')
self._xml_start_tag('x14:conditionalFormattings')
# Write the Excel 2010 conditional formatting data bar elements.
for data_bar in self.data_bars_2010:
# Write the x14:conditionalFormatting element.
self._write_conditional_formatting_2010(data_bar)
self._xml_end_tag('x14:conditionalFormattings')
self._xml_end_tag('ext')
def _write_conditional_formatting_2010(self, data_bar):
# Write the <x14:conditionalFormatting> element.
xmlns_xm = 'http://schemas.microsoft.com/office/excel/2006/main'
attributes = [('xmlns:xm', xmlns_xm)]
self._xml_start_tag('x14:conditionalFormatting', attributes)
# Write the x14:cfRule element.
self._write_x14_cf_rule(data_bar)
# Write the x14:dataBar element.
self._write_x14_data_bar(data_bar)
# Write the x14 max and min data bars.
self._write_x14_cfvo(data_bar['x14_min_type'], data_bar['min_value'])
self._write_x14_cfvo(data_bar['x14_max_type'], data_bar['max_value'])
if not data_bar['bar_no_border']:
# Write the x14:borderColor element.
self._write_x14_border_color(data_bar['bar_border_color'])
# Write the x14:negativeFillColor element.
if not data_bar['bar_negative_color_same']:
self._write_x14_negative_fill_color(
data_bar['bar_negative_color'])
# Write the x14:negativeBorderColor element.
if (not data_bar['bar_no_border'] and
not data_bar['bar_negative_border_color_same']):
self._write_x14_negative_border_color(
data_bar['bar_negative_border_color'])
# Write the x14:axisColor element.
if data_bar['bar_axis_position'] != 'none':
self._write_x14_axis_color(data_bar['bar_axis_color'])
self._xml_end_tag('x14:dataBar')
self._xml_end_tag('x14:cfRule')
# Write the xm:sqref element.
self._xml_data_element('xm:sqref', data_bar['range'])
self._xml_end_tag('x14:conditionalFormatting')
def _write_x14_cf_rule(self, data_bar):
# Write the <x14:cfRule> element.
rule_type = 'dataBar'
guid = data_bar['guid']
attributes = [('type', rule_type), ('id', guid)]
self._xml_start_tag('x14:cfRule', attributes)
def _write_x14_data_bar(self, data_bar):
# Write the <x14:dataBar> element.
min_length = 0
max_length = 100
attributes = [
('minLength', min_length),
('maxLength', max_length),
]
if not data_bar['bar_no_border']:
attributes.append(('border', 1))
if data_bar['bar_solid']:
attributes.append(('gradient', 0))
if data_bar['bar_direction'] == 'left':
attributes.append(('direction', 'leftToRight'))
if data_bar['bar_direction'] == 'right':
attributes.append(('direction', 'rightToLeft'))
if data_bar['bar_negative_color_same']:
attributes.append(('negativeBarColorSameAsPositive', 1))
if (not data_bar['bar_no_border'] and
not data_bar['bar_negative_border_color_same']):
attributes.append(('negativeBarBorderColorSameAsPositive', 0))
if data_bar['bar_axis_position'] == 'middle':
attributes.append(('axisPosition', 'middle'))
if data_bar['bar_axis_position'] == 'none':
attributes.append(('axisPosition', 'none'))
self._xml_start_tag('x14:dataBar', attributes)
def _write_x14_cfvo(self, rule_type, value):
# Write the <x14:cfvo> element.
attributes = [('type', rule_type)]
if rule_type in ('min', 'max', 'autoMin', 'autoMax'):
self._xml_empty_tag('x14:cfvo', attributes)
else:
self._xml_start_tag('x14:cfvo', attributes)
self._xml_data_element('xm:f', value)
self._xml_end_tag('x14:cfvo')
def _write_x14_border_color(self, rgb):
# Write the <x14:borderColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:borderColor', attributes)
def _write_x14_negative_fill_color(self, rgb):
# Write the <x14:negativeFillColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:negativeFillColor', attributes)
def _write_x14_negative_border_color(self, rgb):
# Write the <x14:negativeBorderColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:negativeBorderColor', attributes)
def _write_x14_axis_color(self, rgb):
# Write the <x14:axisColor> element.
attributes = [('rgb', rgb)]
self._xml_empty_tag('x14:axisColor', attributes)
def _write_ext_list_sparklines(self):
# Write the sparkline extension sub-elements.
self._write_ext('{05C60535-1F16-4fd2-B633-F4F36F0B64E0}')
# Write the x14:sparklineGroups element.
self._write_sparkline_groups()
# Write the sparkline elements.
for sparkline in reversed(self.sparklines):
# Write the x14:sparklineGroup element.
self._write_sparkline_group(sparkline)
# Write the x14:colorSeries element.
self._write_color_series(sparkline['series_color'])
# Write the x14:colorNegative element.
self._write_color_negative(sparkline['negative_color'])
# Write the x14:colorAxis element.
self._write_color_axis()
# Write the x14:colorMarkers element.
self._write_color_markers(sparkline['markers_color'])
# Write the x14:colorFirst element.
self._write_color_first(sparkline['first_color'])
# Write the x14:colorLast element.
self._write_color_last(sparkline['last_color'])
# Write the x14:colorHigh element.
self._write_color_high(sparkline['high_color'])
# Write the x14:colorLow element.
self._write_color_low(sparkline['low_color'])
if sparkline['date_axis']:
self._xml_data_element('xm:f', sparkline['date_axis'])
self._write_sparklines(sparkline)
self._xml_end_tag('x14:sparklineGroup')
self._xml_end_tag('x14:sparklineGroups')
self._xml_end_tag('ext')
def _write_sparklines(self, sparkline):
# Write the <x14:sparklines> element and <x14:sparkline> sub-elements.
# Write the sparkline elements.
self._xml_start_tag('x14:sparklines')
for i in range(sparkline['count']):
spark_range = sparkline['ranges'][i]
location = sparkline['locations'][i]
self._xml_start_tag('x14:sparkline')
self._xml_data_element('xm:f', spark_range)
self._xml_data_element('xm:sqref', location)
self._xml_end_tag('x14:sparkline')
self._xml_end_tag('x14:sparklines')
def _write_ext(self, uri):
# Write the <ext> element.
schema = 'http://schemas.microsoft.com/office/'
xmlns_x14 = schema + 'spreadsheetml/2009/9/main'
attributes = [
('xmlns:x14', xmlns_x14),
('uri', uri),
]
self._xml_start_tag('ext', attributes)
def _write_sparkline_groups(self):
# Write the <x14:sparklineGroups> element.
xmlns_xm = 'http://schemas.microsoft.com/office/excel/2006/main'
attributes = [('xmlns:xm', xmlns_xm)]
self._xml_start_tag('x14:sparklineGroups', attributes)
def _write_sparkline_group(self, options):
# Write the <x14:sparklineGroup> element.
#
# Example for order.
#
# <x14:sparklineGroup
# manualMax="0"
# manualMin="0"
# lineWeight="2.25"
# type="column"
# dateAxis="1"
# displayEmptyCellsAs="span"
# markers="1"
# high="1"
# low="1"
# first="1"
# last="1"
# negative="1"
# displayXAxis="1"
# displayHidden="1"
# minAxisType="custom"
# maxAxisType="custom"
# rightToLeft="1">
#
empty = options.get('empty')
attributes = []
if options.get('max') is not None:
if options['max'] == 'group':
options['cust_max'] = 'group'
else:
attributes.append(('manualMax', options['max']))
options['cust_max'] = 'custom'
if options.get('min') is not None:
if options['min'] == 'group':
options['cust_min'] = 'group'
else:
attributes.append(('manualMin', options['min']))
options['cust_min'] = 'custom'
# Ignore the default type attribute (line).
if options['type'] != 'line':
attributes.append(('type', options['type']))
if options.get('weight'):
attributes.append(('lineWeight', options['weight']))
if options.get('date_axis'):
attributes.append(('dateAxis', 1))
if empty:
attributes.append(('displayEmptyCellsAs', empty))
if options.get('markers'):
attributes.append(('markers', 1))
if options.get('high'):
attributes.append(('high', 1))
if options.get('low'):
attributes.append(('low', 1))
if options.get('first'):
attributes.append(('first', 1))
if options.get('last'):
attributes.append(('last', 1))
if options.get('negative'):
attributes.append(('negative', 1))
if options.get('axis'):
attributes.append(('displayXAxis', 1))
if options.get('hidden'):
attributes.append(('displayHidden', 1))
if options.get('cust_min'):
attributes.append(('minAxisType', options['cust_min']))
if options.get('cust_max'):
attributes.append(('maxAxisType', options['cust_max']))
if options.get('reverse'):
attributes.append(('rightToLeft', 1))
self._xml_start_tag('x14:sparklineGroup', attributes)
def _write_spark_color(self, element, color):
# Helper function for the sparkline color functions below.
attributes = []
if color.get('rgb'):
attributes.append(('rgb', color['rgb']))
if color.get('theme'):
attributes.append(('theme', color['theme']))
if color.get('tint'):
attributes.append(('tint', color['tint']))
self._xml_empty_tag(element, attributes)
def _write_color_series(self, color):
# Write the <x14:colorSeries> element.
self._write_spark_color('x14:colorSeries', color)
def _write_color_negative(self, color):
# Write the <x14:colorNegative> element.
self._write_spark_color('x14:colorNegative', color)
def _write_color_axis(self):
# Write the <x14:colorAxis> element.
self._write_spark_color('x14:colorAxis', {'rgb': 'FF000000'})
def _write_color_markers(self, color):
# Write the <x14:colorMarkers> element.
self._write_spark_color('x14:colorMarkers', color)
def _write_color_first(self, color):
# Write the <x14:colorFirst> element.
self._write_spark_color('x14:colorFirst', color)
def _write_color_last(self, color):
# Write the <x14:colorLast> element.
self._write_spark_color('x14:colorLast', color)
def _write_color_high(self, color):
# Write the <x14:colorHigh> element.
self._write_spark_color('x14:colorHigh', color)
def _write_color_low(self, color):
# Write the <x14:colorLow> element.
self._write_spark_color('x14:colorLow', color)
def _write_phonetic_pr(self):
# Write the <phoneticPr> element.
attributes = [
('fontId', '0'),
('type', 'noConversion'),
]
self._xml_empty_tag('phoneticPr', attributes)
| 34.185373 | 79 | 0.544113 |
e8eeaaa7bf4fd3a74f4b1c592887712cabfc158a | 3,231 | py | Python | Codes/Python32/Lib/test/test_fcntl.py | eyantra/FireBird_Swiss_Knife | cac322cf28e2d690b86ba28a75e87551e5e47988 | [
"MIT"
] | 2 | 2019-03-03T00:04:36.000Z | 2020-10-06T16:22:38.000Z | Codes/Python32/Lib/test/test_fcntl.py | eyantra/FireBird_Swiss_Knife | cac322cf28e2d690b86ba28a75e87551e5e47988 | [
"MIT"
] | null | null | null | Codes/Python32/Lib/test/test_fcntl.py | eyantra/FireBird_Swiss_Knife | cac322cf28e2d690b86ba28a75e87551e5e47988 | [
"MIT"
] | 1 | 2019-04-11T11:27:01.000Z | 2019-04-11T11:27:01.000Z | """Test program for the fcntl C module.
OS/2+EMX doesn't support the file locking operations.
"""
import os
import struct
import sys
import unittest
from test.support import verbose, TESTFN, unlink, run_unittest, import_module
# Skip test if no fnctl module.
fcntl = import_module('fcntl')
# TODO - Write tests for flock() and lockf().
def get_lockdata():
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if sys.platform in ('netbsd1', 'netbsd2', 'netbsd3',
'Darwin1.2', 'darwin',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5',
'freebsd6', 'freebsd7', 'freebsd8',
'bsdos2', 'bsdos3', 'bsdos4',
'openbsd', 'openbsd2', 'openbsd3', 'openbsd4'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
elif sys.platform in ['os2emx']:
lockdata = None
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print('struct.pack: ', repr(lockdata))
return lockdata
lockdata = get_lockdata()
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def test_fcntl_fileno(self):
# the example from the library docs
self.f = open(TESTFN, 'w')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'w')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
self.f.close()
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
fcntl.fcntl(fd, cmd, flags)
finally:
os.close(fd)
def test_main():
run_unittest(TestFcntl)
if __name__ == '__main__':
test_main()
| 31.067308 | 81 | 0.56546 |
85aa7236446f5af041a804ae250926e5b36e68ae | 26,189 | py | Python | SmartFoxServer_PRO_1.6.6/Server/lib/Lib/random.py | ChisdealHD/DetlasWorldLinux | 336465a4df1a48c9a273329fc7a09d8099c4e4d5 | [
"MIT"
] | 8 | 2016-11-24T09:38:31.000Z | 2021-04-23T13:04:48.000Z | SmartFoxServer_PRO_1.6.6/Server/lib/Lib/random.py | ChisdealHD/DetlasWorldLinux | 336465a4df1a48c9a273329fc7a09d8099c4e4d5 | [
"MIT"
] | 4 | 2018-02-22T07:42:13.000Z | 2021-12-13T10:53:09.000Z | SmartFoxServer_PRO_1.6.6/Server/lib/Lib/random.py | ChisdealHD/DetlasWorldLinux | 336465a4df1a48c9a273329fc7a09d8099c4e4d5 | [
"MIT"
] | 4 | 2018-12-04T12:21:05.000Z | 2021-02-05T08:00:14.000Z | """Random variable generators.
integers
--------
uniform within range
sequences
---------
pick random element
generate random permutation
distributions on the real line:
------------------------------
uniform
normal (Gaussian)
lognormal
negative exponential
gamma
beta
distributions on the circle (angles 0 to 2pi)
---------------------------------------------
circular uniform
von Mises
Translated from anonymously contributed C/C++ source.
Multi-threading note: the random number generator used here is not thread-
safe; it is possible that two calls return the same random value. However,
you can instantiate a different instance of Random() in each thread to get
generators that don't share state, then use .setstate() and .jumpahead() to
move the generators to disjoint segments of the full period. For example,
def create_generators(num, delta, firstseed=None):
""\"Return list of num distinct generators.
Each generator has its own unique segment of delta elements from
Random.random()'s full period.
Seed the first generator with optional arg firstseed (default is
None, to seed from current time).
""\"
from random import Random
g = Random(firstseed)
result = [g]
for i in range(num - 1):
laststate = g.getstate()
g = Random()
g.setstate(laststate)
g.jumpahead(delta)
result.append(g)
return result
gens = create_generators(10, 1000000)
That creates 10 distinct generators, which can be passed out to 10 distinct
threads. The generators don't share state so can be called safely in
parallel. So long as no thread calls its g.random() more than a million
times (the second argument to create_generators), the sequences seen by
each thread will not overlap.
The period of the underlying Wichmann-Hill generator is 6,953,607,871,644,
and that limits how far this technique can be pushed.
Just for fun, note that since we know the period, .jumpahead() can also be
used to "move backward in time":
>>> g = Random(42) # arbitrary
>>> g.random()
0.25420336316883324
>>> g.jumpahead(6953607871644L - 1) # move *back* one
>>> g.random()
0.25420336316883324
"""
# XXX The docstring sucks.
from math import log as _log, exp as _exp, pi as _pi, e as _e
from math import sqrt as _sqrt, acos as _acos, cos as _cos, sin as _sin
from math import floor as _floor
__all__ = ["Random","seed","random","uniform","randint","choice",
"randrange","shuffle","normalvariate","lognormvariate",
"cunifvariate","expovariate","vonmisesvariate","gammavariate",
"stdgamma","gauss","betavariate","paretovariate","weibullvariate",
"getstate","setstate","jumpahead","whseed"]
def _verify(name, computed, expected):
if abs(computed - expected) > 1e-7:
raise ValueError(
"computed value for %s deviates too much "
"(computed %g, expected %g)" % (name, computed, expected))
NV_MAGICCONST = 4 * _exp(-0.5)/_sqrt(2.0)
_verify('NV_MAGICCONST', NV_MAGICCONST, 1.71552776992141)
TWOPI = 2.0*_pi
_verify('TWOPI', TWOPI, 6.28318530718)
LOG4 = _log(4.0)
_verify('LOG4', LOG4, 1.38629436111989)
SG_MAGICCONST = 1.0 + _log(4.5)
_verify('SG_MAGICCONST', SG_MAGICCONST, 2.50407739677627)
del _verify
# Translated by Guido van Rossum from C source provided by
# Adrian Baddeley.
class Random:
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state. Especially useful for multi-threaded programs, creating
a different instance of Random for each thread, and using the jumpahead()
method to ensure that the generated sequences seen by each thread don't
overlap.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), setstate() and jumpahead().
"""
VERSION = 1 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self.seed(x)
## -------------------- core generator -------------------
# Specific to Wichmann-Hill generator. Subclasses wishing to use a
# different core generator should override the seed(), random(),
# getstate(), setstate() and jumpahead() methods.
def seed(self, a=None):
"""Initialize internal state from hashable object.
None or no argument seeds from current time.
If a is not None or an int or long, hash(a) is used instead.
If a is an int or long, a is used directly. Distinct values between
0 and 27814431486575L inclusive are guaranteed to yield distinct
internal states (this guarantee is specific to the default
Wichmann-Hill generator).
"""
if a is None:
# Initialize from current time
import time
a = long(time.time() * 256)
if type(a) not in (type(3), type(3L)):
a = hash(a)
a, x = divmod(a, 30268)
a, y = divmod(a, 30306)
a, z = divmod(a, 30322)
self._seed = int(x)+1, int(y)+1, int(z)+1
self.gauss_next = None
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
# Wichman-Hill random number generator.
#
# Wichmann, B. A. & Hill, I. D. (1982)
# Algorithm AS 183:
# An efficient and portable pseudo-random number generator
# Applied Statistics 31 (1982) 188-190
#
# see also:
# Correction to Algorithm AS 183
# Applied Statistics 33 (1984) 123
#
# McLeod, A. I. (1985)
# A remark on Algorithm AS 183
# Applied Statistics 34 (1985),198-200
# This part is thread-unsafe:
# BEGIN CRITICAL SECTION
x, y, z = self._seed
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
self._seed = x, y, z
# END CRITICAL SECTION
# Note: on a platform using IEEE-754 double arithmetic, this can
# never return 0.0 (asserted by Tim; proof too long for a comment).
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self.VERSION, self._seed, self.gauss_next
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
version = state[0]
if version == 1:
version, self._seed, self.gauss_next = state
else:
raise ValueError("state with version %s passed to "
"Random.setstate() of version %s" %
(version, self.VERSION))
def jumpahead(self, n):
"""Act as if n calls to random() were made, but quickly.
n is an int, greater than or equal to 0.
Example use: If you have 2 threads and know that each will
consume no more than a million random numbers, create two Random
objects r1 and r2, then do
r2.setstate(r1.getstate())
r2.jumpahead(1000000)
Then r1 and r2 will use guaranteed-disjoint segments of the full
period.
"""
if not n >= 0:
raise ValueError("n must be >= 0")
x, y, z = self._seed
x = int(x * pow(171, n, 30269)) % 30269
y = int(y * pow(172, n, 30307)) % 30307
z = int(z * pow(170, n, 30323)) % 30323
self._seed = x, y, z
def __whseed(self, x=0, y=0, z=0):
"""Set the Wichmann-Hill seed from (x, y, z).
These must be integers in the range [0, 256).
"""
if not type(x) == type(y) == type(z) == type(0):
raise TypeError('seeds must be integers')
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError('seeds must be in range(0, 256)')
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
self.gauss_next = None
def whseed(self, a=None):
"""Seed from hashable object's hash code.
None or no argument seeds from current time. It is not guaranteed
that objects with distinct hash codes lead to distinct internal
states.
This is obsolete, provided for compatibility with the seed routine
used prior to Python 2.1. Use the .seed() method instead.
"""
if a is None:
self.__whseed()
return
a = hash(a)
a, x = divmod(a, 256)
a, y = divmod(a, 256)
a, z = divmod(a, 256)
x = (x + a) % 256 or 1
y = (y + a) % 256 or 1
z = (z + a) % 256 or 1
self.__whseed(x, y, z)
## ---- Methods below this point do not need to be overridden when
## ---- subclassing for the purpose of using a different core generator.
## -------------------- pickle support -------------------
def __getstate__(self): # for pickle
return self.getstate()
def __setstate__(self, state): # for pickle
self.setstate(state)
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, int=int, default=None):
"""Choose a random item from range(start, stop[, step]).
This fixes the problem with randint() which includes the
endpoint; in Python this is usually not what you want.
Do not supply the 'int' and 'default' arguments.
"""
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError, "non-integer arg 1 for randrange()"
if stop is default:
if istart > 0:
return int(self.random() * istart)
raise ValueError, "empty range for randrange()"
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError, "non-integer stop for randrange()"
if step == 1 and istart < istop:
try:
return istart + int(self.random()*(istop - istart))
except OverflowError:
# This can happen if istop-istart > sys.maxint + 1, and
# multiplying by random() doesn't reduce it to something
# <= sys.maxint. We know that the overall result fits
# in an int, and can still do it correctly via math.floor().
# But that adds another function call, so for speed we
# avoided that whenever possible.
return int(istart + _floor(self.random()*(istop - istart)))
if step == 1:
raise ValueError, "empty range for randrange()"
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError, "non-integer step for randrange()"
if istep > 0:
n = (istop - istart + istep - 1) / istep
elif istep < 0:
n = (istop - istart + istep + 1) / istep
else:
raise ValueError, "zero step for randrange()"
if n <= 0:
raise ValueError, "empty range for randrange()"
return istart + istep*int(self.random() * n)
def randint(self, a, b):
"""Return random integer in range [a, b], including both end points.
"""
return self.randrange(a, b+1)
## -------------------- sequence methods -------------------
def choice(self, seq):
"""Choose a random element from a non-empty sequence."""
return seq[int(self.random() * len(seq))]
def shuffle(self, x, random=None, int=int):
"""x, random=random.random -> shuffle list x in place; return None.
Optional arg random is a 0-argument function returning a random
float in [0.0, 1.0); by default, the standard random.random.
Note that for even rather small len(x), the total number of
permutations of x is larger than the period of most random number
generators; this implies that "most" permutations of a long
sequence can never be generated.
"""
if random is None:
random = self.random
for i in xrange(len(x)-1, 0, -1):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(random() * (i+1))
x[i], x[j] = x[j], x[i]
## -------------------- real-valued distributions -------------------
## -------------------- uniform distribution -------------------
def uniform(self, a, b):
"""Get a random number in the range [a, b)."""
return a + (b-a) * self.random()
## -------------------- normal distribution --------------------
def normalvariate(self, mu, sigma):
"""Normal distribution.
mu is the mean, and sigma is the standard deviation.
"""
# mu = mean, sigma = standard deviation
# Uses Kinderman and Monahan method. Reference: Kinderman,
# A.J. and Monahan, J.F., "Computer generation of random
# variables using the ratio of uniform deviates", ACM Trans
# Math Software, 3, (1977), pp257-260.
random = self.random
while 1:
u1 = random()
u2 = 1.0 - random()
z = NV_MAGICCONST*(u1-0.5)/u2
zz = z*z/4.0
if zz <= -_log(u2):
break
return mu + z*sigma
## -------------------- lognormal distribution --------------------
def lognormvariate(self, mu, sigma):
"""Log normal distribution.
If you take the natural logarithm of this distribution, you'll get a
normal distribution with mean mu and standard deviation sigma.
mu can have any value, and sigma must be greater than zero.
"""
return _exp(self.normalvariate(mu, sigma))
## -------------------- circular uniform --------------------
def cunifvariate(self, mean, arc):
"""Circular uniform distribution.
mean is the mean angle, and arc is the range of the distribution,
centered around the mean angle. Both values must be expressed in
radians. Returned values range between mean - arc/2 and
mean + arc/2 and are normalized to between 0 and pi.
Deprecated in version 2.3. Use:
(mean + arc * (Random.random() - 0.5)) % Math.pi
"""
# mean: mean angle (in radians between 0 and pi)
# arc: range of distribution (in radians between 0 and pi)
return (mean + arc * (self.random() - 0.5)) % _pi
## -------------------- exponential distribution --------------------
def expovariate(self, lambd):
"""Exponential distribution.
lambd is 1.0 divided by the desired mean. (The parameter would be
called "lambda", but that is a reserved word in Python.) Returned
values range from 0 to positive infinity.
"""
# lambd: rate lambd = 1/mean
# ('lambda' is a Python reserved word)
random = self.random
u = random()
while u <= 1e-7:
u = random()
return -_log(u)/lambd
## -------------------- von Mises distribution --------------------
def vonmisesvariate(self, mu, kappa):
"""Circular data distribution.
mu is the mean angle, expressed in radians between 0 and 2*pi, and
kappa is the concentration parameter, which must be greater than or
equal to zero. If kappa is equal to zero, this distribution reduces
to a uniform random angle over the range 0 to 2*pi.
"""
# mu: mean angle (in radians between 0 and 2*pi)
# kappa: concentration parameter kappa (>= 0)
# if kappa = 0 generate uniform random angle
# Based upon an algorithm published in: Fisher, N.I.,
# "Statistical Analysis of Circular Data", Cambridge
# University Press, 1993.
# Thanks to Magnus Kessler for a correction to the
# implementation of step 4.
random = self.random
if kappa <= 1e-6:
return TWOPI * random()
a = 1.0 + _sqrt(1.0 + 4.0 * kappa * kappa)
b = (a - _sqrt(2.0 * a))/(2.0 * kappa)
r = (1.0 + b * b)/(2.0 * b)
while 1:
u1 = random()
z = _cos(_pi * u1)
f = (1.0 + r * z)/(r + z)
c = kappa * (r - f)
u2 = random()
if not (u2 >= c * (2.0 - c) and u2 > c * _exp(1.0 - c)):
break
u3 = random()
if u3 > 0.5:
theta = (mu % TWOPI) + _acos(f)
else:
theta = (mu % TWOPI) - _acos(f)
return theta
## -------------------- gamma distribution --------------------
def gammavariate(self, alpha, beta):
"""Gamma distribution. Not the gamma function!
Conditions on the parameters are alpha > 0 and beta > 0.
"""
# alpha > 0, beta > 0, mean is alpha*beta, variance is alpha*beta**2
# Warning: a few older sources define the gamma distribution in terms
# of alpha > -1.0
if alpha <= 0.0 or beta <= 0.0:
raise ValueError, 'gammavariate: alpha and beta must be > 0.0'
random = self.random
if alpha > 1.0:
# Uses R.C.H. Cheng, "The generation of Gamma
# variables with non-integral shape parameters",
# Applied Statistics, (1977), 26, No. 1, p71-74
ainv = _sqrt(2.0 * alpha - 1.0)
bbb = alpha - LOG4
ccc = alpha + ainv
while 1:
u1 = random()
if not 1e-7 < u1 < .9999999:
continue
u2 = 1.0 - random()
v = _log(u1/(1.0-u1))/ainv
x = alpha*_exp(v)
z = u1*u1*u2
r = bbb+ccc*v-x
if r + SG_MAGICCONST - 4.5*z >= 0.0 or r >= _log(z):
return x * beta
elif alpha == 1.0:
# expovariate(1)
u = random()
while u <= 1e-7:
u = random()
return -_log(u) * beta
else: # alpha is between 0 and 1 (exclusive)
# Uses ALGORITHM GS of Statistical Computing - Kennedy & Gentle
while 1:
u = random()
b = (_e + alpha)/_e
p = b*u
if p <= 1.0:
x = pow(p, 1.0/alpha)
else:
# p > 1
x = -_log((b-p)/alpha)
u1 = random()
if not (((p <= 1.0) and (u1 > _exp(-x))) or
((p > 1) and (u1 > pow(x, alpha - 1.0)))):
break
return x * beta
def stdgamma(self, alpha, ainv, bbb, ccc):
# This method was (and shall remain) undocumented.
# This method is deprecated
# for the following reasons:
# 1. Returns same as .gammavariate(alpha, 1.0)
# 2. Requires caller to provide 3 extra arguments
# that are functions of alpha anyway
# 3. Can't be used for alpha < 0.5
# ainv = sqrt(2 * alpha - 1)
# bbb = alpha - log(4)
# ccc = alpha + ainv
import warnings
warnings.warn("The stdgamma function is deprecated; "
"use gammavariate() instead",
DeprecationWarning)
return self.gammavariate(alpha, 1.0)
## -------------------- Gauss (faster alternative) --------------------
def gauss(self, mu, sigma):
"""Gaussian distribution.
mu is the mean, and sigma is the standard deviation. This is
slightly faster than the normalvariate() function.
Not thread-safe without a lock around calls.
"""
# When x and y are two variables from [0, 1), uniformly
# distributed, then
#
# cos(2*pi*x)*sqrt(-2*log(1-y))
# sin(2*pi*x)*sqrt(-2*log(1-y))
#
# are two *independent* variables with normal distribution
# (mu = 0, sigma = 1).
# (Lambert Meertens)
# (corrected version; bug discovered by Mike Miller, fixed by LM)
# Multithreading note: When two threads call this function
# simultaneously, it is possible that they will receive the
# same return value. The window is very small though. To
# avoid this, you have to use a lock around all calls. (I
# didn't want to slow this down in the serial case by using a
# lock here.)
random = self.random
z = self.gauss_next
self.gauss_next = None
if z is None:
x2pi = random() * TWOPI
g2rad = _sqrt(-2.0 * _log(1.0 - random()))
z = _cos(x2pi) * g2rad
self.gauss_next = _sin(x2pi) * g2rad
return mu + z*sigma
## -------------------- beta --------------------
## See
## http://sourceforge.net/bugs/?func=detailbug&bug_id=130030&group_id=5470
## for Ivan Frohne's insightful analysis of why the original implementation:
##
## def betavariate(self, alpha, beta):
## # Discrete Event Simulation in C, pp 87-88.
##
## y = self.expovariate(alpha)
## z = self.expovariate(1.0/beta)
## return z/(y+z)
##
## was dead wrong, and how it probably got that way.
def betavariate(self, alpha, beta):
"""Beta distribution.
Conditions on the parameters are alpha > -1 and beta} > -1.
Returned values range between 0 and 1.
"""
# This version due to Janne Sinkkonen, and matches all the std
# texts (e.g., Knuth Vol 2 Ed 3 pg 134 "the beta distribution").
y = self.gammavariate(alpha, 1.)
if y == 0:
return 0.0
else:
return y / (y + self.gammavariate(beta, 1.))
## -------------------- Pareto --------------------
def paretovariate(self, alpha):
"""Pareto distribution. alpha is the shape parameter."""
# Jain, pg. 495
u = 1.0 - self.random()
return 1.0 / pow(u, 1.0/alpha)
## -------------------- Weibull --------------------
def weibullvariate(self, alpha, beta):
"""Weibull distribution.
alpha is the scale parameter and beta is the shape parameter.
"""
# Jain, pg. 499; bug fix courtesy Bill Arms
u = 1.0 - self.random()
return alpha * pow(-_log(u), 1.0/beta)
## -------------------- test program --------------------
def _test_generator(n, funccall):
import time
print n, 'times', funccall
code = compile(funccall, funccall, 'eval')
sum = 0.0
sqsum = 0.0
smallest = 1e10
largest = -1e10
t0 = time.time()
for i in range(n):
x = eval(code)
sum = sum + x
sqsum = sqsum + x*x
smallest = min(x, smallest)
largest = max(x, largest)
t1 = time.time()
print round(t1-t0, 3), 'sec,',
avg = sum/n
stddev = _sqrt(sqsum/n - avg*avg)
print 'avg %g, stddev %g, min %g, max %g' % \
(avg, stddev, smallest, largest)
def _test(N=20000):
print 'TWOPI =', TWOPI
print 'LOG4 =', LOG4
print 'NV_MAGICCONST =', NV_MAGICCONST
print 'SG_MAGICCONST =', SG_MAGICCONST
_test_generator(N, 'random()')
_test_generator(N, 'normalvariate(0.0, 1.0)')
_test_generator(N, 'lognormvariate(0.0, 1.0)')
_test_generator(N, 'cunifvariate(0.0, 1.0)')
_test_generator(N, 'expovariate(1.0)')
_test_generator(N, 'vonmisesvariate(0.0, 1.0)')
_test_generator(N, 'gammavariate(0.01, 1.0)')
_test_generator(N, 'gammavariate(0.1, 1.0)')
_test_generator(N, 'gammavariate(0.1, 2.0)')
_test_generator(N, 'gammavariate(0.5, 1.0)')
_test_generator(N, 'gammavariate(0.9, 1.0)')
_test_generator(N, 'gammavariate(1.0, 1.0)')
_test_generator(N, 'gammavariate(2.0, 1.0)')
_test_generator(N, 'gammavariate(20.0, 1.0)')
_test_generator(N, 'gammavariate(200.0, 1.0)')
_test_generator(N, 'gauss(0.0, 1.0)')
_test_generator(N, 'betavariate(3.0, 3.0)')
_test_generator(N, 'paretovariate(1.0)')
_test_generator(N, 'weibullvariate(1.0, 1.0)')
# Test jumpahead.
s = getstate()
jumpahead(N)
r1 = random()
# now do it the slow way
setstate(s)
for i in range(N):
random()
r2 = random()
if r1 != r2:
raise ValueError("jumpahead test failed " + `(N, r1, r2)`)
# Create one instance, seeded from current time, and export its methods
# as module-level functions. The functions are not threadsafe, and state
# is shared across all uses (both in the user's code and in the Python
# libraries), but that's fine for most programs and is easier for the
# casual user than making them instantiate their own Random() instance.
_inst = Random()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
randint = _inst.randint
choice = _inst.choice
randrange = _inst.randrange
shuffle = _inst.shuffle
normalvariate = _inst.normalvariate
lognormvariate = _inst.lognormvariate
cunifvariate = _inst.cunifvariate
expovariate = _inst.expovariate
vonmisesvariate = _inst.vonmisesvariate
gammavariate = _inst.gammavariate
stdgamma = _inst.stdgamma
gauss = _inst.gauss
betavariate = _inst.betavariate
paretovariate = _inst.paretovariate
weibullvariate = _inst.weibullvariate
getstate = _inst.getstate
setstate = _inst.setstate
jumpahead = _inst.jumpahead
whseed = _inst.whseed
if __name__ == '__main__':
_test()
| 33.575641 | 77 | 0.564741 |
5bf9a122aebe319b3d78e31855d1c3fc0adda79a | 2,965 | py | Python | tests/test_task.py | igorcoding/asynctnt_queue | 75719b2dd27e8314ae924aea6a7a85be8f48ecc5 | [
"Apache-2.0"
] | null | null | null | tests/test_task.py | igorcoding/asynctnt_queue | 75719b2dd27e8314ae924aea6a7a85be8f48ecc5 | [
"Apache-2.0"
] | null | null | null | tests/test_task.py | igorcoding/asynctnt_queue | 75719b2dd27e8314ae924aea6a7a85be8f48ecc5 | [
"Apache-2.0"
] | null | null | null | from asynctnt_queue import Queue, Tube, Task
from asynctnt_queue.task import Status
from tests import BaseTarantoolTestCase
class TaskTestCase(BaseTarantoolTestCase):
def create_tube(self):
q = Queue(self.conn)
return q.tube("test_tube")
def _data_obj(self):
return {
'key': 'value'
}
async def test__task_tube(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
self.assertIs(t.tube, tube, 'tube is the same object')
async def test__task_repr(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
self.assertEqual(repr(t), '<Task id=0 status=Status.READY>')
async def test__task_ack(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.ack()
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.EXECUTED)
self.assertEqual(t2.data, t.data)
async def test__task_release(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.release()
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.READY)
self.assertEqual(t2.data, t.data)
async def test__task_release_delay(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.release(delay=5)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.DELAYED)
self.assertEqual(t2.data, t.data)
async def test__task_bury(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.bury()
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.BURIED)
self.assertEqual(t2.data, t.data)
async def test__task_peek(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.peek()
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.TAKEN)
self.assertEqual(t2.data, t.data)
async def test__task_touch(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.touch(1)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.TAKEN)
self.assertEqual(t2.data, t.data)
async def test__task_delete(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await t2.delete()
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.EXECUTED)
self.assertEqual(t2.data, t.data)
| 33.314607 | 68 | 0.619562 |
86997d5eda19f50affe745405ee36c4ab51a686c | 975 | py | Python | examples/hci/10-spin.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 2 | 2021-11-17T05:52:32.000Z | 2022-02-07T02:44:53.000Z | examples/hci/10-spin.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 36 | 2018-08-22T19:44:03.000Z | 2020-05-09T10:02:36.000Z | examples/hci/10-spin.py | robert-anderson/pyscf | cdc56e168cb15f47e8cdc791a92d689fa9b655af | [
"Apache-2.0"
] | 4 | 2018-02-14T16:28:28.000Z | 2019-08-12T16:40:30.000Z | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Assign spin state for HCI wavefunction.
If spin-contamination is observed for HCI wavefunction, :func:`hci.fix_spin`
function can be used to level shift the states of wrong spin. This is often
helpful to reduce the spin-contamination.
'''
from pyscf import gto, scf, ao2mo
from pyscf.hci import hci
mol = gto.M(atom='O 0 0 0', basis='631g', spin=0)
myhf = scf.RHF(mol).run()
cisolver = hci.SCI(mol)
nmo = myhf.mo_coeff.shape[1]
nelec = mol.nelec
h1 = myhf.mo_coeff.T.dot(myhf.get_hcore()).dot(myhf.mo_coeff)
h2 = ao2mo.full(mol, myhf.mo_coeff)
e, civec = cisolver.kernel(h1, h2, nmo, nelec, verbose=4)
print('E = %.12f 2S+1 = %.7f' %
(e, cisolver.spin_square(civec[0], nmo, nelec)[1]))
cisolver = hci.fix_spin(cisolver, ss=0) # ss = S^2
e, civec = cisolver.kernel(h1, h2, nmo, nelec, verbose=4)
print('E = %.12f 2S+1 = %.7f' %
(e, cisolver.spin_square(civec[0], nmo, nelec)[1]))
| 29.545455 | 76 | 0.681026 |
da49e1ed49e1344cba429343efcbb4d8ae857a34 | 643 | py | Python | plotly/validators/scatterternary/textfont/_family.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 12 | 2020-04-18T18:10:22.000Z | 2021-12-06T10:11:15.000Z | plotly/validators/scatterternary/textfont/_family.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/scatterternary/textfont/_family.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 6 | 2020-04-18T23:07:08.000Z | 2021-11-18T07:53:06.000Z | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='scatterternary.textfont',
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'calc'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'style'),
strict=kwargs.pop('strict', True),
**kwargs
)
| 29.227273 | 68 | 0.597201 |
e6a52c1679dc30b610058a0307aa705cc848abaa | 9,646 | py | Python | alf/environments/suite_unittest.py | jesbu1/alf | def59fe39bdbca70a6c80e9b8f2c7c785cb59ea7 | [
"Apache-2.0"
] | null | null | null | alf/environments/suite_unittest.py | jesbu1/alf | def59fe39bdbca70a6c80e9b8f2c7c785cb59ea7 | [
"Apache-2.0"
] | null | null | null | alf/environments/suite_unittest.py | jesbu1/alf | def59fe39bdbca70a6c80e9b8f2c7c785cb59ea7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environments for unittest."""
from abc import abstractmethod
from enum import Enum
import numpy as np
import torch
import alf
from alf.data_structures import StepType, TimeStep
from alf.tensor_specs import BoundedTensorSpec, TensorSpec
ActionType = Enum('ActionType', ('Discrete', 'Continuous'))
class UnittestEnv(object):
"""Abstract base for unittest environment.
Every episode ends in `episode_length` steps (including LAST step).
The observation is one dimensional.
The action is binary {0, 1} when action_type is ActionType.Discrete
and a float value in range (0.0, 1.0) when action_type is ActionType.Continuous
"""
def __init__(self,
batch_size,
episode_length,
obs_dim=1,
action_type=ActionType.Discrete):
"""Initializes the environment.
Args:
batch_size (int): The batch size expected for the actions and
observations.
episode_length (int): length of each episode
action_type (nest): ActionType
"""
self._steps = 0
self._episode_length = episode_length
super(UnittestEnv, self).__init__()
self._action_type = action_type
def _create_action_spec(act_type):
if act_type == ActionType.Discrete:
return BoundedTensorSpec(
shape=(), dtype=torch.int64, minimum=0, maximum=1)
else:
return BoundedTensorSpec(
shape=(1, ), dtype=torch.float32, minimum=[0], maximum=[1])
self._action_spec = alf.nest.map_structure(_create_action_spec,
action_type)
self._observation_spec = TensorSpec(
shape=(obs_dim, ), dtype=torch.float32)
self._batch_size = batch_size
self.reset()
@property
def batched(self):
return True
@property
def batch_size(self):
return self._batch_size
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def reset(self):
self._steps = 0
time_step = self._gen_time_step(0, None)
self._current_time_step = time_step._replace(
prev_action=alf.nest.map_structure(
lambda spec: spec.zeros([self.batch_size]), self._action_spec),
env_id=torch.arange(self.batch_size, dtype=torch.int32))
return self._current_time_step
def step(self, action):
self._steps += 1
time_step = self._gen_time_step(self._steps % self._episode_length,
action)
self._current_time_step = time_step._replace(
prev_action=action,
env_id=torch.arange(self.batch_size, dtype=torch.int32))
return self._current_time_step
def current_time_step(self):
return self._current_time_step
@abstractmethod
def _gen_time_step(self, s, action):
"""Generate time step.
Args:
s (int): step count in current episode. It ranges from 0 to
`episode_length` - 1.
action: action from agent.
Returns:
time_step (TimeStep)
"""
pass
class ValueUnittestEnv(UnittestEnv):
"""Environment for testing value estimation.
Every episode ends in `episode_length` steps. It always give reward
1 at each step.
"""
def _gen_time_step(self, s, action):
"""Return the current `TimeStep`."""
step_type = StepType.MID
discount = 1.0
if s == 0:
step_type = StepType.FIRST
elif s == self._episode_length - 1:
step_type = StepType.LAST
discount = 0.0
return TimeStep(
step_type=torch.full([self.batch_size],
step_type,
dtype=torch.int32),
reward=torch.ones(self.batch_size),
discount=torch.full([
self.batch_size,
], discount),
observation=torch.ones(self.batch_size))
class PolicyUnittestEnv(UnittestEnv):
"""Environment for testing policy.
The agent receives 1-diff(action, observation) as reward
"""
def _gen_time_step(self, s, action):
step_type = StepType.MID
discount = 1.0
if s == 0:
step_type = StepType.FIRST
elif s == self._episode_length - 1:
step_type = StepType.LAST
discount = 0.0
if s == 0:
reward = torch.zeros(self.batch_size)
else:
prev_observation = self._current_time_step.observation
reward = 1.0 - torch.abs(prev_observation -
action.reshape(prev_observation.shape))
reward = reward.reshape(self.batch_size)
observation = torch.randint(
0, 2, size=(self.batch_size, 1), dtype=torch.float32)
return TimeStep(
step_type=torch.full([self.batch_size],
step_type,
dtype=torch.int32),
reward=reward,
discount=torch.full([self.batch_size], discount),
observation=observation)
class MixedPolicyUnittestEnv(UnittestEnv):
"""Environment for testing a mixed policy.
Given the agent's `(discrete, continuous)` action pair ``(a_d, a_c)``, if
``'a_d == (a_c > 0.5)``, the agent receives a reward of 1; otherwise it
receives 0.
"""
def __init__(self, batch_size, episode_length, obs_dim=1):
"""Initializes the environment.
Args:
batch_size (int): The batch size expected for the actions and
observations.
episode_length (int): length of each episode
"""
super().__init__(
batch_size=batch_size,
episode_length=episode_length,
obs_dim=obs_dim,
action_type=[ActionType.Discrete, ActionType.Continuous])
def _gen_time_step(self, s, action):
step_type = StepType.MID
discount = 1.0
reward = torch.zeros(self.batch_size)
if s == 0:
step_type = StepType.FIRST
elif s == self._episode_length - 1:
step_type = StepType.LAST
discount = 0.0
if s > 0:
reward = (action[0] == (action[1].squeeze(-1) > 0.5).to(
torch.int64)).to(torch.float32)
observation = self._observation_spec.randn(
outer_dims=(self.batch_size, ))
return TimeStep(
step_type=torch.full([self.batch_size],
step_type,
dtype=torch.int32),
reward=reward,
discount=torch.full([self.batch_size], discount),
observation=observation)
class RNNPolicyUnittestEnv(UnittestEnv):
"""Environment for testing RNN policy.
The agent receives reward 1 after initial `gap` steps if its
actions action match the observation given at the first step.
"""
def __init__(self,
batch_size,
episode_length,
gap=3,
action_type=ActionType.Discrete,
obs_dim=1):
self._gap = gap
self._obs_dim = obs_dim
super(RNNPolicyUnittestEnv, self).__init__(
batch_size,
episode_length,
action_type=action_type,
obs_dim=obs_dim)
def _gen_time_step(self, s, action):
step_type = StepType.MID
discount = 1.0
obs_dim = self._obs_dim
if s == 0:
self._observation0 = 2. * torch.randint(
0, 2, size=(self.batch_size, 1)) - 1.
if obs_dim > 1:
self._observation0 = torch.cat([
self._observation0,
torch.ones(self.batch_size, obs_dim - 1)
],
dim=-1)
step_type = StepType.FIRST
elif s == self._episode_length - 1:
step_type = StepType.LAST
discount = 0.0
if s <= self._gap:
reward = torch.zeros(self.batch_size)
else:
obs0 = self._observation0[:, 0].reshape(self.batch_size, 1)
reward = 1.0 - 0.5 * torch.abs(2 * action.reshape(obs0.shape) - 1 -
obs0)
reward = reward.reshape(self.batch_size)
if s == 0:
observation = self._observation0
else:
observation = torch.zeros(self.batch_size, obs_dim)
return TimeStep(
step_type=torch.full([self.batch_size],
step_type,
dtype=torch.int32),
reward=reward,
discount=torch.full([self.batch_size], discount),
observation=observation)
| 32.809524 | 87 | 0.576094 |
7b86e3b290a68336b14777ffb570df1e576a3140 | 19,413 | py | Python | robot_sim/robots/fr5/fr5.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | robot_sim/robots/fr5/fr5.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | robot_sim/robots/fr5/fr5.py | wangyan-hlab/wrs | 8f81cdd33a419d5b4ffe18d13cd4cbf9f258bc7c | [
"MIT"
] | null | null | null | import os
import math
import numpy as np
import basis.robot_math as rm
import modeling.model_collection as mc
import modeling.collision_model as cm
import robot_sim._kinematics.jlchain as jl
import robot_sim.manipulators.fr5.fr5 as fr
import robot_sim.end_effectors.gripper.robotiq85.robotiq85 as rtq
from panda3d.core import CollisionNode, CollisionBox, Point3
import robot_sim.robots.robot_interface as ri
class FR5_robot(ri.RobotInterface):
"""
author: wangyan
date: 2022/02/21, Suzhou
"""
def __init__(self, pos=np.zeros(3), rotmat=np.eye(3), name='fr5', homeconf=np.zeros(6),
enable_cc=True, arm_jacobian_offset=np.zeros(3), hnd_attached=False, zrot_to_gndbase=np.radians(135)):
super().__init__(pos=pos, rotmat=rotmat, name=name)
this_dir, this_filename = os.path.split(__file__)
self.ground_base = jl.JLChain(pos=pos, rotmat=rotmat, homeconf=np.zeros(0), name="fr5_to_ground_base")
self.ground_base.jnts[0]['loc_pos'] = np.array([0, 0, 0])
self.ground_base.lnks[0]['name'] = "ground_base"
self.ground_base.lnks[0]['loc_pos'] = np.array([0, 0, 0])
self.ground_base.lnks[0]['collision_model'] = cm.CollisionModel(
os.path.join(this_dir, "meshes/ground_base.stl"),
cdprimit_type="user_defined", expand_radius=.002,
userdefined_cdprimitive_fn=self._base_combined_cdnp)
self.ground_base.lnks[0]['rgba'] = [.5, .5, .5, 1.0]
self.ground_base.reinitialize()
self.arm = fr.FR5(pos=self.ground_base.jnts[0]['gl_posq'],
rotmat=np.dot(self.ground_base.jnts[0]['gl_rotmatq'],
rm.rotmat_from_euler(0, 0, zrot_to_gndbase)),
homeconf=homeconf,
enable_cc=False,
arm_offset=arm_jacobian_offset)
self.manipulator_dict['arm'] = self.arm
self.manipulator_dict['hnd'] = self.arm
self.hnd_attached = hnd_attached
if hnd_attached:
self.hnd_offset = -arm_jacobian_offset
self.hnd_jaw_center_pos = arm_jacobian_offset + self.hnd_offset
self.hnd = rtq.Robotiq85(pos=np.dot(self.arm.jnts[-1]['gl_rotmatq'],
self.hnd_offset) + self.arm.jnts[-1]['gl_posq'],
rotmat=self.arm.jnts[-1]['gl_rotmatq'],
enable_cc=False,
jaw_center_pos=self.hnd_jaw_center_pos)
# tool center point
self.arm.tcp_jntid = -1
self.arm.tcp_loc_pos = self.hnd.jaw_center_pos
self.arm.tcp_loc_rotmat = self.hnd.jaw_center_rotmat
# a list of detailed information about objects in hand, see CollisionChecker.add_objinhnd
self.oih_infos = []
self.hnd_dict['arm'] = self.hnd
self.hnd_dict['hnd'] = self.hnd
# collision detection
if enable_cc:
self.enable_cc()
@staticmethod
def _base_combined_cdnp(name, radius):
collision_node = CollisionNode(name)
collision_primitive_c0 = CollisionBox(Point3(0.0, 0.0, -.01),
x=.075 + radius, y=.075 + radius, z=.01 + radius)
collision_node.addSolid(collision_primitive_c0)
collision_primitive_c1 = CollisionBox(Point3(0.0, 0.0, -.3325),
x=.05 + radius, y=.05 + radius, z=.3125 + radius)
collision_node.addSolid(collision_primitive_c1)
collision_primitive_c2 = CollisionBox(Point3(0.0, 0.0, -.655),
x=.225 + radius, y=.225 + radius, z=.01 + radius)
collision_node.addSolid(collision_primitive_c2)
collision_primitive_l0 = CollisionBox(Point3(.1534, .1675, -.505),
Point3(.2445, .2304, -.645))
collision_node.addSolid(collision_primitive_l0)
collision_primitive_r0 = CollisionBox(Point3(-.1534, .1675, -.505),
Point3(-.2445, .2304, -.645))
collision_node.addSolid(collision_primitive_r0)
collision_primitive_l1 = CollisionBox(Point3(-.1534, -.1675, -.505),
Point3(-.2445, -.2304, -.645))
collision_node.addSolid(collision_primitive_l1)
collision_primitive_r1 = CollisionBox(Point3(.1534, -.1675, -.505),
Point3(.2445, -.2304, -.645))
collision_node.addSolid(collision_primitive_r1)
return collision_node
def enable_cc(self):
super().enable_cc()
self.cc.add_cdlnks(self.ground_base, [0])
self.cc.add_cdlnks(self.arm, [0, 1, 2, 3, 4, 5, 6])
if self.hnd_attached:
self.cc.add_cdlnks(self.hnd.lft_outer, [0, 1, 2, 3, 4])
self.cc.add_cdlnks(self.hnd.rgt_outer, [1, 2, 3, 4])
# lnks used for cd with external stationary objects
activelist_arm = [self.arm.lnks[0],
self.arm.lnks[1],
self.arm.lnks[2],
self.arm.lnks[3],
self.arm.lnks[4],
self.arm.lnks[5],
self.arm.lnks[6]]
if self.hnd_attached:
activelist_hnd = [self.hnd.lft_outer.lnks[0],
self.hnd.lft_outer.lnks[1],
self.hnd.lft_outer.lnks[2],
self.hnd.lft_outer.lnks[3],
self.hnd.lft_outer.lnks[4],
self.hnd.rgt_outer.lnks[1],
self.hnd.rgt_outer.lnks[2],
self.hnd.rgt_outer.lnks[3],
self.hnd.rgt_outer.lnks[4]]
activelist = activelist_arm + activelist_hnd
else:
activelist = activelist_arm
self.cc.set_active_cdlnks(activelist)
# lnks used for arm-body collision detection
fromlist = [self.ground_base.lnks[0]]
intolist_arm = [self.arm.lnks[3],
self.arm.lnks[4],
self.arm.lnks[5],
self.arm.lnks[6]]
if self.hnd_attached:
intolist_hnd = [self.hnd.lft_outer.lnks[0],
self.hnd.lft_outer.lnks[1],
self.hnd.lft_outer.lnks[2],
self.hnd.lft_outer.lnks[3],
self.hnd.lft_outer.lnks[4],
self.hnd.rgt_outer.lnks[1],
self.hnd.rgt_outer.lnks[2],
self.hnd.rgt_outer.lnks[3],
self.hnd.rgt_outer.lnks[4]]
intolist = intolist_arm + intolist_hnd
else:
intolist = intolist_arm
self.cc.set_cdpair(fromlist, intolist)
fromlist = [self.arm.lnks[0],
self.arm.lnks[1]]
intolist_arm = [self.arm.lnks[3],
self.arm.lnks[5],
self.arm.lnks[6]]
if self.hnd_attached:
intolist_hnd = [self.hnd.lft_outer.lnks[0],
self.hnd.lft_outer.lnks[1],
self.hnd.lft_outer.lnks[2],
self.hnd.lft_outer.lnks[3],
self.hnd.lft_outer.lnks[4],
self.hnd.rgt_outer.lnks[1],
self.hnd.rgt_outer.lnks[2],
self.hnd.rgt_outer.lnks[3],
self.hnd.rgt_outer.lnks[4]]
intolist = intolist_arm + intolist_hnd
else:
intolist = intolist_arm
self.cc.set_cdpair(fromlist, intolist)
fromlist = [self.arm.lnks[2]]
intolist_arm = [self.arm.lnks[4],
self.arm.lnks[5],
self.arm.lnks[6]]
if self.hnd_attached:
intolist_hnd = [self.hnd.lft_outer.lnks[0],
self.hnd.lft_outer.lnks[1],
self.hnd.lft_outer.lnks[2],
self.hnd.lft_outer.lnks[3],
self.hnd.lft_outer.lnks[4],
self.hnd.rgt_outer.lnks[1],
self.hnd.rgt_outer.lnks[2],
self.hnd.rgt_outer.lnks[3],
self.hnd.rgt_outer.lnks[4]]
intolist = intolist_arm + intolist_hnd
else:
intolist = intolist_arm
self.cc.set_cdpair(fromlist, intolist)
fromlist = [self.arm.lnks[3]]
intolist_arm = [self.arm.lnks[6]]
if self.hnd_attached:
intolist_hnd = [self.hnd.lft_outer.lnks[0],
self.hnd.lft_outer.lnks[1],
self.hnd.lft_outer.lnks[2],
self.hnd.lft_outer.lnks[3],
self.hnd.lft_outer.lnks[4],
self.hnd.rgt_outer.lnks[1],
self.hnd.rgt_outer.lnks[2],
self.hnd.rgt_outer.lnks[3],
self.hnd.rgt_outer.lnks[4]]
intolist = intolist_arm + intolist_hnd
else:
intolist = intolist_arm
self.cc.set_cdpair(fromlist, intolist)
def get_hnd_on_manipulator(self, manipulator_name):
if manipulator_name == 'arm':
return self.hnd
else:
raise ValueError("The given jlc does not have a hand!")
def get_gl_tcp(self, manipulator_name="arm"):
return super().get_gl_tcp(manipulator_name=manipulator_name)
def get_jnt_values(self, component_name="arm"):
if component_name in self.manipulator_dict:
return self.manipulator_dict[component_name].get_jnt_values()
def align_axis_down(self):
seed_jnt_values = self.arm.get_jnt_values()
position = self.arm.get_gl_tcp()[0]
orientation_new = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
jnt_values = self.arm.ik(tgt_pos=position, tgt_rotmat=orientation_new, seed_jnt_values=seed_jnt_values)
return jnt_values
def fix_to(self, pos, rotmat):
super().fix_to(pos, rotmat)
self.pos = pos
self.rotmat = rotmat
self.ground_base.fix_to(self.pos, self.rotmat)
self.arm.fix_to(pos=self.ground_base.jnts[0]['gl_posq'],
rotmat=np.dot(self.ground_base.jnts[0]['gl_rotmatq'],
rm.rotmat_from_euler(0,0,0)))
if self.hnd_attached:
self.hnd.fix_to(pos=np.dot(self.arm.jnts[-1]['gl_rotmatq'],
self.hnd_offset) + self.arm.jnts[-1]['gl_posq'],
rotmat=self.arm.jnts[-1]['gl_rotmatq'])
def jaw_to(self, hnd_name='hnd', jawwidth=0.0):
self.hnd_dict[hnd_name].jaw_to(jawwidth)
def hold(self, hnd_name, objcm, jawwidth=None):
"""
the objcm is added as a part of the robot_s to the cd checker
:param jawwidth:
:param objcm:
:return:
"""
if hnd_name not in self.hnd_dict:
raise ValueError("Hand name does not exist!")
if jawwidth is not None:
self.hnd_dict[hnd_name].jaw_to(jawwidth)
rel_pos, rel_rotmat = self.manipulator_dict[hnd_name].cvt_gl_to_loc_tcp(objcm.get_pos(), objcm.get_rotmat())
intolist = [self.arm.lnks[0],
self.arm.lnks[1],
self.arm.lnks[2],
self.arm.lnks[3],
self.arm.lnks[4],
self.arm.lnks[5],
self.arm.lnks[6]]
self.oih_infos.append(self.cc.add_cdobj(objcm, rel_pos, rel_rotmat, intolist))
return rel_pos, rel_rotmat
def get_oih_list(self):
return_list = []
for obj_info in self.oih_infos:
objcm = obj_info['collisionmodel']
objcm.set_pos(obj_info['gl_pos'])
objcm.set_rotmat(obj_info['gl_rotmat'])
return_list.append(objcm)
return return_list
def get_gl_pose_from_hio(self, component_name, hio_pos, hio_rotmat):
"""
get the global pose of an object from a grasp pose described in an object's local frame
:param hio_pos: a grasp pose described in an object's local frame -- pos
:param hio_rotmat: a grasp pose described in an object's local frame -- rotmat
:return:
author: weiwei
date: 20210302
"""
if component_name != 'arm':
raise ValueError("Component name for Fr5_Robot must be \'arm\'!")
hnd_pos = self.arm.jnts[-1]['gl_posq']
hnd_rotmat = self.arm.jnts[-1]['gl_rotmatq']
hnd_homomat = rm.homomat_from_posrot(hnd_pos, hnd_rotmat)
hio_homomat = rm.homomat_from_posrot(hio_pos, hio_rotmat)
oih_homomat = rm.homomat_inverse(hio_homomat)
gl_obj_homomat = hnd_homomat.dot(oih_homomat)
return gl_obj_homomat[:3, 3], gl_obj_homomat[:3, :3]
def release(self, hnd_name, objcm, jawwidth=None):
"""
the objcm is added as a part of the robot_s to the cd checker
:param jawwidth:
:param objcm:
:return:
"""
if hnd_name not in self.hnd_dict:
raise ValueError("Hand name does not exist!")
if jawwidth is not None:
self.hnd_dict[hnd_name].jaw_to(jawwidth)
for obj_info in self.oih_infos:
if obj_info['collision_model'] is objcm:
self.cc.delete_cdobj(obj_info)
self.oih_infos.remove(obj_info)
break
def fk(self, component_name, jnt_values):
"""
:param jnt_values: nparray 1x6
:param component_name:
:return:
author: weiwei
date: 20201208toyonaka, 20210403osaka
"""
def update_oih(component_name='arm'):
for obj_info in self.oih_infos:
gl_pos, gl_rotmat = self.cvt_loc_tcp_to_gl(component_name, obj_info['rel_pos'], obj_info['rel_rotmat'])
obj_info['gl_pos'] = gl_pos
obj_info['gl_rotmat'] = gl_rotmat
def update_component(component_name='arm', jnt_values=np.zeros(6)):
self.manipulator_dict[component_name].fk(jnt_values=jnt_values)
if self.hnd_attached:
self.hnd_dict[component_name].fix_to(
pos=np.dot(self.manipulator_dict[component_name].jnts[-1]['gl_rotmatq'],
self.hnd_offset)+self.manipulator_dict[component_name].jnts[-1]['gl_posq'],
rotmat=self.manipulator_dict[component_name].jnts[-1]['gl_rotmatq'])
update_oih(component_name=component_name)
super().fk(component_name, jnt_values)
# examine length
if component_name in self.manipulator_dict:
if not isinstance(jnt_values, np.ndarray) or jnt_values.size != 6:
raise ValueError("An 1x6 npdarray must be specified to move the arm!")
update_component(component_name, jnt_values)
elif component_name == "fr5_to_ground_base":
self.ground_base.fk(jnt_values)
self.arm.fix_to(pos=self.ground_base.jnts[0]['gl_posq'],
rotmat=np.dot(self.ground_base.jnts[0]['gl_rotmatq'],
rm.rotmat_from_euler(0,0,0)))
else:
raise ValueError("The given component name is not available!")
def gen_meshmodel(self,
tcp_jnt_id=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=False,
rgba=None,
name='fr5_meshmodel'):
meshmodel = mc.ModelCollection(name=name)
self.ground_base.gen_meshmodel(tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
self.arm.gen_meshmodel(tcp_jnt_id=tcp_jnt_id,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
if self.hnd_attached:
self.hnd.gen_meshmodel(toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
rgba=rgba).attach_to(meshmodel)
return meshmodel
def gen_stickmodel(self,
tcp_jnt_id=None,
tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=False,
toggle_connjnt=False,
name='fr5_stickmodel'):
stickmodel = mc.ModelCollection(name=name)
self.ground_base.gen_stickmodel(tcp_loc_pos=None,
tcp_loc_rotmat=None,
toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs).attach_to(stickmodel)
self.arm.gen_stickmodel(tcp_jnt_id=tcp_jnt_id,
tcp_loc_pos=tcp_loc_pos,
tcp_loc_rotmat=tcp_loc_rotmat,
toggle_tcpcs=toggle_tcpcs,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
if self.hnd_attached:
self.hnd.gen_stickmodel(toggle_tcpcs=False,
toggle_jntscs=toggle_jntscs,
toggle_connjnt=toggle_connjnt).attach_to(stickmodel)
return stickmodel
if __name__ == '__main__':
import visualization.panda.world as wd
import modeling.geometric_model as gm
base = wd.World(cam_pos=[2, -2, 1], lookat_pos=[0, 0, 0], w=960, h=720)
gm.gen_frame().attach_to(base)
fr5 = FR5_robot()
conf1 = np.radians([0, 0, 0, 0, 0, 20])
fr5.fk(component_name="arm", jnt_values=conf1)
print("collision=", fr5.is_collided())
fr5.gen_meshmodel(toggle_tcpcs=True).attach_to(base)
arm_jacobian_offset = np.array([0, 0, .145])
fr5 = FR5_robot(arm_jacobian_offset=arm_jacobian_offset, hnd_attached=True)
conf2 = np.radians([-93, -98, -73, -97, 90, 91])
fr5.fk(component_name="arm", jnt_values=conf2)
print("global_tcp=", fr5.get_gl_tcp())
print("collision=", fr5.is_collided())
print("jacobian=", fr5.jacobian())
# print("manipulability=", fr5.manipulability())
fr5.gen_meshmodel(toggle_tcpcs=True).attach_to(base)
fr5.show_cdprimit() # show the collision model
# ns = rm.null_space(fr5.jacobian())
# print("null space = ", ns)
# print("check = ", np.dot(fr5.jacobian(), ns))
base.run() | 47.004843 | 119 | 0.548138 |
017da3bde710a4c88cc2e18c98a9cd3f099997c2 | 3,704 | py | Python | kuha/importer/__init__.py | aryla/kuha | 4fc418d546c6cc5876deda61258dcb61c52c549c | [
"BSD-3-Clause"
] | 4 | 2016-11-30T08:29:00.000Z | 2021-04-16T15:21:22.000Z | kuha/importer/__init__.py | aryla/kuha | 4fc418d546c6cc5876deda61258dcb61c52c549c | [
"BSD-3-Clause"
] | null | null | null | kuha/importer/__init__.py | aryla/kuha | 4fc418d546c6cc5876deda61258dcb61c52c549c | [
"BSD-3-Clause"
] | 3 | 2020-11-22T15:21:02.000Z | 2021-05-16T11:09:48.000Z | import errno
import importlib
import logging
import os
import sys
from pyramid.paster import get_appsettings, setup_logging
from pyramid.scripts.common import parse_vars
from ..exception import HarvestError
from ..config import clean_importer_settings
from ..models import create_engine, ensure_oai_dc_exists
from ..util import (
datestamp_now,
parse_date,
format_datestamp,
)
from ..importer.harvest import update
def usage(argv):
usage_string = '''Usage: {0} <config_uri> [var=value]...
Update the Kuha database.
See the sample configuration file for details.'''
cmd = os.path.basename(argv[0])
print(usage_string.format(cmd))
sys.exit(1)
def read_timestamp(path):
log = logging.getLogger(__name__)
if not path:
log.warning('Timestamp file has not been configured.')
return None
try:
with open(path, 'r') as file_:
(time, _) = parse_date(file_.read())
return time
except ValueError as error:
log.error('Invalid timestamp file "{0}"'.format(path))
except IOError as error:
if error.errno == errno.ENOENT:
log.info('Timestamp file does not exist.')
else:
log.error(
'Failed to read timestamp file "{0}": {1}'
''.format(path, error)
)
return None
def write_timestamp(path, time):
log = logging.getLogger(__name__)
if not path:
return
text = format_datestamp(time)
try:
with open(path, 'w') as file_:
file_.write(text)
except IOError as error:
log.error(
'Failed to record timestamp to "{0}": {1}'
''.format(path, error)
)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
settings = get_appsettings(config_uri, options=options)
clean_importer_settings(settings)
setup_logging(settings['logging_config'])
log = logging.getLogger(__name__)
purge = settings['deleted_records'] == 'no'
dry_run = settings['dry_run']
if dry_run:
log.info('Starting metadata import (dry run)...')
else:
log.info('Starting metadata import...')
timestamp_file = settings['timestamp_file']
old_timestamp = (None if settings['force_update'] else
read_timestamp(timestamp_file))
# Get timestamp before harvest.
new_timestamp = datestamp_now()
create_engine(settings)
if not dry_run:
ensure_oai_dc_exists()
log.debug('Loading the metadata provider...')
try:
modulename, classname = settings['metadata_provider_class']
log.debug('Using class "{0}" from module "{1}"'
''.format(classname, modulename))
provider_module = importlib.import_module(modulename)
Provider = getattr(provider_module, classname)
args = settings['metadata_provider_args'].split()
metadata_provider = Provider(*args)
except Exception as error:
log.critical(
'Failed to initialize the metadata provider: {0}'
''.format(error),
exc_info=True,
)
raise
log.debug('Harvesting metadata...')
try:
update(metadata_provider, old_timestamp, purge, dry_run)
except HarvestError as error:
log.critical(
'Failed to harvest metadata: {0}'
''.format(error)
)
raise
if not dry_run:
write_timestamp(timestamp_file, new_timestamp)
log.info('Done.')
| 27.849624 | 68 | 0.606911 |
557a1df2e7be1f9812b297facc266ac4f5bce272 | 19,028 | py | Python | api/api/problem.py | alphagov/xgovctf | 52f491ba6c72aed7fee13b9b05291160415340ed | [
"MIT"
] | 6 | 2017-02-03T17:33:56.000Z | 2019-07-17T19:15:50.000Z | api/api/problem.py | alphagov/xgovctf | 52f491ba6c72aed7fee13b9b05291160415340ed | [
"MIT"
] | null | null | null | api/api/problem.py | alphagov/xgovctf | 52f491ba6c72aed7fee13b9b05291160415340ed | [
"MIT"
] | 2 | 2019-08-29T11:51:23.000Z | 2021-04-10T19:59:08.000Z | """ Module for interacting with the problems """
import imp
import pymongo
import api
from datetime import datetime
from api.common import validate, check, safe_fail, InternalException, SevereInternalException, WebException
from voluptuous import Schema, Length, Required, Range
from bson import json_util
from os.path import join, isfile
from api.annotations import log_action
grader_base_path = "./graders"
submission_schema = Schema({
Required("tid"): check(
("This does not look like a valid tid.", [str, Length(max=100)])),
Required("pid"): check(
("This does not look like a valid pid.", [str, Length(max=100)])),
Required("key"): check(
("This does not look like a valid key.", [str, Length(max=100)]))
})
problem_schema = Schema({
Required("name"): check(
("The problem's display name must be a string.", [str])),
Required("score"): check(
("Score must be a positive integer.", [int, Range(min=0)])),
Required("category"): check(
("Category must be a string.", [str])),
Required("grader"): check(
("The grader path must be a string.", [str])),
Required("description"): check(
("The problem description must be a string.", [str])),
Required("threshold"): check(
("Threshold must be a positive integer.", [int, Range(min=0)])),
"disabled": check(
("A problem's disabled state is either True or False.", [
lambda disabled: type(disabled) == bool])),
"autogen": check(
("A problem should either be autogenerated or not, True/False", [
lambda autogen: type(autogen) == bool])),
"related_problems": check(
("Related problems should be a list of related problems.", [list])),
"pid": check(
("You should not specify a pid for a problem.", [lambda _: False])),
"weightmap": check(
("Weightmap should be a dict.", [dict])),
"tags": check(
("Tags must be described as a list.", [list])),
"hint": check(
("A hint must be a string.", [str])),
"generator": check(
("A generator must be a path.", [str])),
"_id": check(
("Your problems should not already have _ids.", [lambda id: False]))
})
def get_all_categories(show_disabled=False):
"""
Gets the set of distinct problem categories.
Args:
show_disabled: Whether to include categories that are only on disabled problems
Returns:
The set of distinct problem categories.
"""
db = api.common.get_conn()
match = {}
if not show_disabled:
match.update({"disabled": False})
return db.problems.find(match).distinct("category")
#TODO: Sanity checks for autogen
def analyze_problems():
"""
Checks the sanity of inserted problems.
Includes weightmap and grader verification.
Returns:
A list of error strings describing the problems.
"""
grader_missing_error = "{}: Missing grader at '{}'."
unknown_weightmap_pid = "{}: Has weightmap entry '{}' which does not exist."
problems = get_all_problems()
errors = []
for problem in problems:
if not isfile(join(grader_base_path, problem["grader"])):
errors.append(grader_missing_error.format(problem["name"], problem["grader"]))
for pid in problem["weightmap"].keys():
if safe_fail(get_problem, pid=pid) is None:
errors.append(unknown_weightmap_pid.format(problem["name"], pid))
return errors
def insert_problem(problem):
"""
Inserts a problem into the database. Does sane validation.
Args:
Problem dict.
score: points awarded for completing the problem.
category: problem's category
description: description of the problem.
grader: path relative to grader_base_path
threshold: Amount of points necessary for a team to unlock this problem.
Optional:
disabled: True or False. Defaults to False.
hint: hint for completing the problem.
tags: list of problem tags.
relatedproblems: list of related problems.
weightmap: problem's unlock weightmap
autogen: Whether or not the problem will be auto generated.
Returns:
The newly created problem id.
"""
db = api.common.get_conn()
validate(problem_schema, problem)
problem["disabled"] = problem.get("disabled", False)
problem["pid"] = api.common.hash(problem["name"])
weightmap = {}
if problem.get("weightmap"):
for name, weight in problem["weightmap"].items():
name_hash = api.common.hash(name)
weightmap[name_hash] = weight
problem["weightmap"] = weightmap
if safe_fail(get_problem, pid=problem["pid"]) is not None:
raise WebException("Problem with identical pid already exists.")
if safe_fail(get_problem, name=problem["name"]) is not None:
raise WebException("Problem with identical name already exists.")
db.problems.insert(problem)
api.cache.fast_cache.clear()
return problem["pid"]
def remove_problem(pid):
"""
Removes a problem from the given database.
Args:
pid: the pid of the problem to remove.
Returns:
The removed problem object.
"""
db = api.common.get_conn()
problem = get_problem(pid=pid)
db.problems.remove({"pid": pid})
api.cache.fast_cache.clear()
return problem
def set_problem_disabled(pid, disabled):
"""
Updates a problem's availability.
Args:
pid: the problem's pid
disabled: whether or not the problem should be disabled.
Returns:
The updated problem object.
"""
return update_problem(pid, {"disabled": disabled})
def update_problem(pid, updated_problem):
"""
Updates a problem with new properties.
Args:
pid: the pid of the problem to update.
updated_problem: an updated problem object.
Returns:
The updated problem object.
"""
db = api.common.get_conn()
if updated_problem.get("name", None) is not None:
if safe_fail(get_problem, name=updated_problem["name"]) is not None:
raise WebException("Problem with identical name already exists.")
problem = get_problem(pid=pid, show_disabled=True).copy()
problem.update(updated_problem)
# pass validation by removing/readding pid
problem.pop("pid", None)
validate(problem_schema, problem)
problem["pid"] = pid
db.problems.update({"pid": pid}, problem)
api.cache.fast_cache.clear()
return problem
def search_problems(*conditions):
"""
Aggregates all problems that contain all of the given properties from the list specified.
Args:
conditions: multiple mongo queries to search.
Returns:
The list of matching problems.
"""
db = api.common.get_conn()
return list(db.problems.find({"$or": list(conditions)}, {"_id":0}))
def insert_problem_from_json(blob):
"""
Converts json blob of problem(s) into dicts. Runs insert_problem on each one.
See insert_problem for more information.
Returns:
A list of the created problem pids if an array of problems is specified.
"""
result = json_util.loads(blob)
if type(result) == list:
return [insert_problem(problem) for problem in result]
elif type(result) == dict:
return insert_problem(result)
else:
raise InternalException("JSON blob does not appear to be a list of problems or a single problem.")
def get_grader(pid):
"""
Returns the grader module for a given problem.
Args:
pid: the problem id
Returns:
The grader module
"""
try:
path = get_problem(pid=pid, show_disabled=True)["grader"]
return imp.load_source(path[:-3], join(grader_base_path, path))
except FileNotFoundError:
raise InternalException("Problem grader for {} is offline.".format(get_problem(pid=pid)['name']))
def grade_problem(pid, key, tid=None):
"""
Grades the problem with its associated grader script.
Args:
tid: tid if provided
pid: problem's pid
key: user's submission
Returns:
A dict.
correct: boolean
points: number of points the problem is worth.
message: message returned from the grader.
"""
if tid is None:
tid = api.user.get_user()["tid"]
#If the problem is autogenerated, let
#api.autogen deal with it.
if api.autogen.is_autogen_problem(pid):
return api.autogen.grade_problem_instance(pid, tid, key)
problem = get_problem(pid=pid, show_disabled=True)
grader = get_grader(pid)
(correct, message) = grader.grade(tid, key)
return {
"correct": correct,
"points": problem["score"],
"message": message
}
@log_action
def submit_key(tid, pid, key, uid=None, ip=None):
"""
User problem submission. Problem submission is inserted into the database.
Args:
tid: user's team id
pid: problem's pid
key: answer text
uid: user's uid
Returns:
A dict.
correct: boolean
points: number of points the problem is worth.
message: message returned from the grader.
"""
db = api.common.get_conn()
validate(submission_schema, {"tid": tid, "pid": pid, "key": key})
if pid not in get_unlocked_pids(tid):
raise InternalException("You can't submit flags to problems you haven't unlocked.")
if pid in get_solved_pids(tid=tid):
exp = WebException("You have already solved this problem.")
exp.data = {'code': 'solved'}
raise exp
user = api.user.get_user(uid=uid)
if user is None:
raise InternalException("User submitting flag does not exist.")
uid = user["uid"]
result = grade_problem(pid, key, tid)
problem = get_problem(pid=pid)
eligibility = api.team.get_team(tid=tid)['eligible']
submission = {
'uid': uid,
'tid': tid,
'timestamp': datetime.utcnow(),
'pid': pid,
'ip': ip,
'key': key,
'eligible': eligibility,
'category': problem['category'],
'correct': result['correct']
}
if (key, pid) in [(submission["key"], submission["pid"]) for submission in get_submissions(tid=tid)]:
exp = WebException("You or one of your teammates has already tried this solution.")
exp.data = {'code': 'repeat'}
raise exp
db.submissions.insert(submission)
if submission["correct"]:
api.cache.invalidate_memoization(api.stats.get_score, {"kwargs.tid":tid}, {"kwargs.uid":uid})
api.cache.invalidate_memoization(get_unlocked_pids, {"args":tid})
api.cache.invalidate_memoization(get_solved_pids, {"kwargs.tid":tid}, {"kwargs.uid":uid})
api.cache.invalidate_memoization(api.stats.get_score_progression, {"kwargs.tid":tid}, {"kwargs.uid":uid})
api.achievement.process_achievements("submit", {"uid": uid, "tid": tid, "pid": pid})
return result
def count_submissions(pid=None, uid=None, tid=None, category=None, correctness=None, eligibility=None):
db = api.common.get_conn()
match = {}
if uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
if pid is not None:
match.update({"pid": pid})
if category is not None:
match.update({"category": category})
if correctness is not None:
match.update({"correct": correctness})
if eligibility is not None:
match.update({"eligible": eligibility})
return db.submissions.find(match, {"_id": 0}).count()
def get_submissions(pid=None, uid=None, tid=None, category=None, correctness=None, eligibility=None):
"""
Gets the submissions from a team or user.
Optional filters of pid or category.
Args:
uid: the user id
tid: the team id
category: category filter.
pid: problem filter.
correctness: correct filter
Returns:
A list of submissions from the given entity.
"""
db = api.common.get_conn()
match = {}
if uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
if pid is not None:
match.update({"pid": pid})
if category is not None:
match.update({"category": category})
if correctness is not None:
match.update({"correct": correctness})
if eligibility is not None:
match.update({"eligible": eligibility})
return list(db.submissions.find(match, {"_id":0}))
def clear_all_submissions():
"""
Removes all submissions from the database.
"""
db = api.common.get_conn()
db.submissions.remove()
def clear_submissions(uid=None, tid=None, pid=None):
"""
Clear submissions for a given team, user, or problems.
Args:
uid: the user's uid to clear from.
tid: the team's tid to clear from.
pid: the pid to clear from.
"""
db = api.common.get_conn()
match = {}
if pid is not None:
match.update({"pid", pid})
elif uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
else:
raise InternalException("You must supply either a tid, uid, or pid")
return db.submissions.remove(match)
def invalidate_submissions(pid=None, uid=None, tid=None):
"""
Invalidates the submissions for a given problem. Can be filtered by uid or tid.
Passing no arguments will invalidate all submissions.
Args:
pid: the pid of the problem.
uid: the user's uid that will his submissions invalidated.
tid: the team's tid that will have their submissions invalidated.
"""
db = api.common.get_conn()
match = {}
if pid is not None:
match.update({"pid": pid})
if uid is not None:
match.update({"uid": uid})
elif tid is not None:
match.update({"tid": tid})
db.submissions.update(match, {"correct": False})
def reevaluate_submissions_for_problem(pid):
"""
In the case of the problem or grader being updated, this will reevaluate submissions for a problem.
Args:
pid: the pid of the problem to be reevaluated.
"""
db = api.common.get_conn()
get_problem(pid=pid, show_disabled=True)
keys = {}
for submission in get_submissions(pid=pid):
key = submission["key"]
if key not in keys:
result = grade_problem(pid, key, submission["tid"])
if result["correct"] != submission["correct"]:
keys[key] = result["correct"]
else:
keys[key] = None
for key, change in keys.items():
if change is not None:
db.submissions.update({"key": key}, {"$set": {"correct": change}}, multi=True)
def reevaluate_all_submissions():
"""
In the case of the problem or grader being updated, this will reevaluate all submissions.
"""
api.cache.clear_all()
for problem in get_all_problems(show_disabled=True):
reevaluate_submissions_for_problem(problem["pid"])
def get_problem(pid=None, name=None, tid=None, show_disabled=False):
"""
Gets a single problem.
Args:
pid: The problem id
name: The name of the problem
show_disabled: Boolean indicating whether or not to show disabled problems.
Returns:
The problem dictionary from the database
"""
db = api.common.get_conn()
match = {}
if pid is not None:
match.update({'pid': pid})
elif name is not None:
match.update({'name': name})
else:
raise InternalException("Must supply pid or display name")
if tid is not None and pid not in get_unlocked_pids(tid):
raise InternalException("You cannot get this problem")
if not show_disabled:
match.update({"disabled": False})
db = api.common.get_conn()
problem = db.problems.find_one(match, {"_id":0})
if problem is None:
raise SevereInternalException("Could not find problem! You gave " + str(match))
return problem
def get_all_problems(category=None, show_disabled=False):
"""
Gets all of the problems in the database.
Args:
category: Optional parameter to restrict which problems are returned
show_disabled: Boolean indicating whether or not to show disabled problems.
Returns:
List of problems from the database
"""
db = api.common.get_conn()
match = {}
if category is not None:
match.update({'category': category})
if not show_disabled:
match.update({'disabled': False})
return list(db.problems.find(match, {"_id":0}).sort('score', pymongo.ASCENDING))
def get_solved_pids(tid=None, uid=None, category=None):
"""
Gets the solved pids for a given team or user.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of solved problem ids
"""
return list(set([sub['pid'] for sub in get_submissions(tid=tid, uid=uid, category=category, correctness=True)]))
def get_solved_problems(tid=None, uid=None, category=None):
"""
Gets the solved problems for a given team or user.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of solved problem dictionaries
"""
return [get_problem(pid=pid) for pid in get_solved_pids(tid=tid, uid=uid, category=category)]
def get_unlocked_pids(tid, category=None):
"""
Gets the unlocked pids for a given team.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of unlocked problem ids
"""
solved = get_solved_problems(tid=tid, category=category)
unlocked = []
for problem in get_all_problems():
if 'weightmap' not in problem or 'threshold' not in problem:
unlocked.append(problem['pid'])
else:
weightsum = sum(problem['weightmap'].get(p['pid'], 0) for p in solved)
if weightsum >= problem['threshold']:
unlocked.append(problem['pid'])
return unlocked
def get_unlocked_problems(tid, category=None):
"""
Gets the unlocked problems for a given team.
Args:
tid: The team id
category: Optional parameter to restrict which problems are returned
Returns:
List of unlocked problem dictionaries
"""
solved = get_solved_problems(tid=tid)
unlocked = [get_problem(pid=pid) for pid in get_unlocked_pids(tid, category=category)]
for problem in unlocked:
if api.autogen.is_autogen_problem(problem["pid"]):
problem.update(api.autogen.get_problem_instance(problem["pid"], tid))
problem['solved'] = problem in solved
return unlocked
| 29.006098 | 116 | 0.637849 |
9b0dfe34d1469353836905b77e42abeba1337484 | 1,023 | py | Python | Chapter07/logistic_function.py | PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition | 830ad0124dc72c3a24929ff1b67081a66894f1f9 | [
"MIT"
] | 31 | 2019-05-25T11:28:23.000Z | 2022-02-09T15:19:20.000Z | Chapter07/logistic_function.py | PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition | 830ad0124dc72c3a24929ff1b67081a66894f1f9 | [
"MIT"
] | null | null | null | Chapter07/logistic_function.py | PacktPublishing/Python-Machine-Learning-By-Example-Second-Edition | 830ad0124dc72c3a24929ff1b67081a66894f1f9 | [
"MIT"
] | 22 | 2019-02-27T20:11:39.000Z | 2022-03-07T21:46:38.000Z | '''
Source codes for Python Machine Learning By Example 2nd Edition (Packt Publishing)
Chapter 7: Predicting Online Ads Click-through with Logistic Regression
Author: Yuxi (Hayden) Liu
'''
import numpy as np
def sigmoid(input):
return 1.0 / (1 + np.exp(-input))
import matplotlib.pyplot as plt
z = np.linspace(-8, 8, 1000)
y = sigmoid(z)
plt.plot(z, y)
plt.axhline(y=0, ls='dotted', color='k')
plt.axhline(y=0.5, ls='dotted', color='k')
plt.axhline(y=1, ls='dotted', color='k')
plt.yticks([0.0, 0.25, 0.5, 0.75, 1.0])
plt.xlabel('z')
plt.ylabel('y(z)')
plt.show()
# plot sample cost vs y_hat (prediction), for y (truth) = 1
y_hat = np.linspace(0, 1, 1000)
cost = -np.log(y_hat)
plt.plot(y_hat, cost)
plt.xlabel('Prediction')
plt.ylabel('Cost')
plt.xlim(0, 1)
plt.ylim(0, 7)
plt.show()
# plot sample cost vs y_hat (prediction), for y (truth) = 0
y_hat = np.linspace(0, 1, 1000)
cost = -np.log(1 - y_hat)
plt.plot(y_hat, cost)
plt.xlabel('Prediction')
plt.ylabel('Cost')
plt.xlim(0, 1)
plt.ylim(0, 7)
plt.show()
| 21.765957 | 82 | 0.671554 |
d08eb0be2bbc455513fc52827210e703aade4a4f | 7,786 | py | Python | tests/utils_tests/test_html.py | chadgh/django | 0f90f741de913a4502adc51063df77278d01a5ca | [
"BSD-3-Clause"
] | 1 | 2019-01-31T17:16:56.000Z | 2019-01-31T17:16:56.000Z | tests/utils_tests/test_html.py | rmutter/django | 5d044339037be879a11b03fe8bd8c3ef1d520b1a | [
"BSD-3-Clause"
] | null | null | null | tests/utils_tests/test_html.py | rmutter/django | 5d044339037be879a11b03fe8bd8c3ef1d520b1a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import os
from unittest import TestCase
import warnings
from django.utils import html, safestring
from django.utils._os import upath
from django.utils.encoding import force_text
class TestUtilsHtml(TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_format_html(self):
self.assertEqual(
html.format_html("{0} {1} {third} {fourth}",
"< Dangerous >",
html.mark_safe("<b>safe</b>"),
third="< dangerous again",
fourth=html.mark_safe("<i>safe again</i>")
),
"< Dangerous > <b>safe</b> < dangerous again <i>safe again</i>"
)
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<p>See: 'é is an apostrophe followed by e acute</p>',
'See: 'é is an apostrophe followed by e acute'),
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('hi, <f x', 'hi, <f x'),
('234<235, right?', '234<235, right?'),
('a4<a5 right?', 'a4<a5 right?'),
('b7>b2!', 'b7>b2!'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
('a<p onclick="alert(\'<test>\')">b</p>c', 'abc'),
('a<p a >b</p>c', 'abc'),
('d<a:b c:d>e</p>f', 'def'),
('<strong>foo</strong><a href="http://example.com">bar</a>', 'foobar'),
)
for value, output in items:
self.check_output(f, value, output)
# Some convoluted syntax for which parsing may differ between python versions
output = html.strip_tags('<sc<!-- -->ript>test<<!-- -->/script>')
self.assertNotIn('<script>', output)
self.assertIn('test', output)
output = html.strip_tags('<script>alert()</script>&h')
self.assertNotIn('<script>', output)
self.assertIn('alert()', output)
# Test with more lengthy content (also catching performance regressions)
for filename in ('strip_tags1.html', 'strip_tags2.txt'):
path = os.path.join(os.path.dirname(upath(__file__)), 'files', filename)
with open(path, 'r') as fp:
content = force_text(fp.read())
start = datetime.now()
stripped = html.strip_tags(content)
elapsed = datetime.now() - start
self.assertEqual(elapsed.seconds, 0)
self.assertIn("Please try again.", stripped)
self.assertNotIn('<', stripped)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_escapejs(self):
f = html.escapejs
items = (
('"double quotes" and \'single quotes\'', '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(r'\ : backslashes, too', '\\u005C : backslashes, too'),
('and lots of whitespace: \r\n\t\v\f\b', 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(r'<script>and this</script>', '\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
('paragraph separator:\u2029and line separator:\u2028', 'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
def test_remove_tags(self):
f = html.remove_tags
items = (
("<b><i>Yes</i></b>", "b i", "Yes"),
("<a>x</a> <p><b>y</b></p>", "a b", "x <p>y</p>"),
)
for value, tags, output in items:
with warnings.catch_warnings(record=True):
warnings.simplefilter("always")
self.assertEqual(f(value, tags), output)
def test_smart_urlquote(self):
quote = html.smart_urlquote
# Ensure that IDNs are properly quoted
self.assertEqual(quote('http://öäü.com/'), 'http://xn--4ca9at.com/')
self.assertEqual(quote('http://öäü.com/öäü/'), 'http://xn--4ca9at.com/%C3%B6%C3%A4%C3%BC/')
# Ensure that everything unsafe is quoted, !*'();:@&=+$,/?#[]~ is considered safe as per RFC
self.assertEqual(quote('http://example.com/path/öäü/'), 'http://example.com/path/%C3%B6%C3%A4%C3%BC/')
self.assertEqual(quote('http://example.com/%C3%B6/ä/'), 'http://example.com/%C3%B6/%C3%A4/')
self.assertEqual(quote('http://example.com/?x=1&y=2'), 'http://example.com/?x=1&y=2')
def test_conditional_escape(self):
s = '<h1>interop</h1>'
self.assertEqual(html.conditional_escape(s),
'<h1>interop</h1>')
self.assertEqual(html.conditional_escape(safestring.mark_safe(s)), s)
| 42.546448 | 125 | 0.524146 |
eb056ca3c0a6814d79a8308c78f244a66674d2b2 | 2,283 | py | Python | kymatio/scattering2d/tests/test_numpy_scattering2d.py | edouardoyallon/kymatio | eeed6ac9e59bc6645b90fc4e7ff8ce4f693887bc | [
"BSD-3-Clause"
] | 2 | 2019-04-08T14:58:48.000Z | 2020-02-28T01:11:17.000Z | kymatio/scattering2d/tests/test_numpy_scattering2d.py | edouardoyallon/kymatio | eeed6ac9e59bc6645b90fc4e7ff8ce4f693887bc | [
"BSD-3-Clause"
] | 7 | 2018-12-14T01:42:21.000Z | 2019-10-05T21:40:42.000Z | kymatio/scattering2d/tests/test_numpy_scattering2d.py | edouardoyallon/kymatio | eeed6ac9e59bc6645b90fc4e7ff8ce4f693887bc | [
"BSD-3-Clause"
] | 1 | 2018-12-13T20:52:53.000Z | 2018-12-13T20:52:53.000Z | import os
import io
import numpy as np
from kymatio.scattering2d import Scattering2D
import torch
from kymatio.backend.fake_backend import backend as fake_backend
import pytest
class TestScattering2DNumpy:
def reorder_coefficients_from_interleaved(self, J, L):
# helper function to obtain positions of order0, order1, order2 from interleaved
order0, order1, order2 = [], [], []
n_order0, n_order1, n_order2 = 1, J * L, L ** 2 * J * (J - 1) // 2
n = 0
order0.append(n)
for j1 in range(J):
for l1 in range(L):
n += 1
order1.append(n)
for j2 in range(j1 + 1, J):
for l2 in range(L):
n += 1
order2.append(n)
assert len(order0) == n_order0
assert len(order1) == n_order1
assert len(order2) == n_order2
return order0, order1, order2
def test_Scattering2D(self):
test_data_dir = os.path.dirname(__file__)
data = None
with open(os.path.join(test_data_dir, 'test_data_2d.pt'), 'rb') as f:
buffer = io.BytesIO(f.read())
data = torch.load(buffer)
x = data['x'].numpy()
S = data['Sx'].numpy()
J = data['J']
# we need to reorder S from interleaved (how it's saved) to o0, o1, o2
# (which is how it's now computed)
o0, o1, o2 = self.reorder_coefficients_from_interleaved(J, 8)
reorder = np.concatenate((o0, o1, o2))
S = S[..., reorder, :, :]
pre_pad = data['pre_pad']
M = x.shape[2]
N = x.shape[3]
# Then, let's check when using pure pytorch code
scattering = Scattering2D(J, shape=(M, N), pre_pad=pre_pad, frontend='numpy')
x = x
S = S
Sg = scattering(x)
assert np.allclose(Sg, S)
def test_inputs(self):
with pytest.raises(RuntimeError) as ve:
scattering = Scattering2D(2, shape=(10, 10), frontend='numpy', backend=fake_backend)
assert 'not supported' in ve.value.args[0]
with pytest.raises(RuntimeError) as ve:
scattering = Scattering2D(10, shape=(10, 10), frontend='numpy')
assert 'smallest dimension' in ve.value.args[0]
| 33.086957 | 96 | 0.569864 |
5d4181014bba4902ad899b77ac99b897e925cb5d | 22 | py | Python | src/rtf2txt/__init__.py | joncutrer/rtf2txt | a9cfa6d80d6f38b805f7b041b5370e47a2f38049 | [
"MIT"
] | 1 | 2021-08-28T16:34:34.000Z | 2021-08-28T16:34:34.000Z | src/rtf2txt/__init__.py | joncutrer/rtf2txt | a9cfa6d80d6f38b805f7b041b5370e47a2f38049 | [
"MIT"
] | null | null | null | src/rtf2txt/__init__.py | joncutrer/rtf2txt | a9cfa6d80d6f38b805f7b041b5370e47a2f38049 | [
"MIT"
] | null | null | null |
__version__ = "0.2.4" | 11 | 21 | 0.636364 |
6acfb62bca45973d1a7b3fc34bdbb0e7a26ccb35 | 4,127 | py | Python | padinfo/view/otherinfo.py | bitwalk/pad-cogs | 40e4911841d165caf615c7459eb7b0a20aa4cbe1 | [
"MIT"
] | null | null | null | padinfo/view/otherinfo.py | bitwalk/pad-cogs | 40e4911841d165caf615c7459eb7b0a20aa4cbe1 | [
"MIT"
] | null | null | null | padinfo/view/otherinfo.py | bitwalk/pad-cogs | 40e4911841d165caf615c7459eb7b0a20aa4cbe1 | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
import prettytable
from discordmenu.embed.base import Box
from discordmenu.embed.components import EmbedMain, EmbedField
from discordmenu.embed.text import LabeledText, Text
from discordmenu.embed.view import EmbedView
from redbot.core.utils.chat_formatting import box
from tsutils import embed_footer_with_state
from padinfo.common.external_links import puzzledragonx
from padinfo.view.base import BaseIdView
from padinfo.view.components.monster.header import MonsterHeader
from padinfo.view.components.view_state_base_id import ViewStateBaseId
from padinfo.view.id import evos_embed_field
from padinfo.view.links import LinksView
if TYPE_CHECKING:
from dadguide.models.monster_model import MonsterModel
class OtherInfoViewState(ViewStateBaseId):
def serialize(self):
ret = super().serialize()
ret.update({
'pane_type': OtherInfoView.VIEW_TYPE,
})
return ret
def statsbox(m, plus: int):
stat_cols = ['', 'HP', 'ATK', 'RCV']
tbl = prettytable.PrettyTable(stat_cols)
tbl.hrules = prettytable.NONE
tbl.vrules = prettytable.NONE
tbl.align = "l"
levels = (m.level, 110, 120) if m.limit_mult > 0 else (m.level,)
inh_tuple = (False, True) if plus == 297 else (False,)
for lv in levels:
for inh in inh_tuple:
hp, atk, rcv, _ = m.stats(lv, plus=plus, inherit=inh)
row_name = '(Inh)' if inh else 'Lv{}'.format(lv)
tbl.add_row([row_name, hp, atk, rcv])
return box(tbl.get_string())
class OtherInfoView(BaseIdView):
VIEW_TYPE = 'OtherInfo'
@classmethod
def embed(cls, state: OtherInfoViewState):
m: "MonsterModel" = state.monster
return EmbedView(
EmbedMain(
color=state.color,
title=MonsterHeader.fmt_id_header(state.monster,
state.alt_monsters[0].monster.monster_id == cls.TSUBAKI,
state.is_jp_buffed).to_markdown(),
url=puzzledragonx(m)),
embed_footer=embed_footer_with_state(state),
embed_fields=[
EmbedField(
"Stats at +297:",
Box(
# need to put these on the same line to get around discord's insane
# whitespace margins around code blocks
Text(statsbox(m, plus=297) + 'Stats at +0:'),
Text(statsbox(m, plus=0)),
LabeledText("JP Name", m.name_ja),
LinksView.linksbox(m),
LabeledText("JP Added", str(m.reg_date)) if m.reg_date else None,
LabeledText("Series", m.series.name_en),
Box(
LabeledText("Sell MP", '{:,}'.format(m.sell_mp)),
LabeledText("Buy MP", '{:,}'.format(m.buy_mp)) if m.buy_mp else None,
delimiter=' '),
Box(
LabeledText("Sell Gold", '{:,}'.format(m.sell_gold))
),
Box(
LabeledText("XP to Max", '{:.1f}'.format(m.exp / 1000000).rstrip('0').rstrip('.') + 'M'
if m.exp >= 1000000 else '{:,}'.format(m.exp)),
LabeledText("Max Level", str(m.level)),
delimiter=' '),
Box(
LabeledText("Weighted Stats", str(m.stats()[3])),
Text('LB {} (+{}%)'.format(m.stats(lv=110)[3], m.limit_mult)) if m.limit_mult > 0 else None,
delimiter=' | '),
LabeledText("Fodder EXP", '{:,}'.format(m.fodder_exp)),
Box(
LabeledText("Rarity", str(m.rarity)),
LabeledText("Cost", str(m.cost)),
delimiter=' '))),
evos_embed_field(state)])
| 43.442105 | 120 | 0.53259 |
86d48173a0d3b38e74166330375d04b4f73cce5c | 811 | py | Python | setup.py | kevinwylder/podman-py | fefc036d109b51e5cdf8754b8c0740188e74e938 | [
"Apache-2.0"
] | 106 | 2020-02-01T18:19:04.000Z | 2022-03-25T04:34:30.000Z | setup.py | kevinwylder/podman-py | fefc036d109b51e5cdf8754b8c0740188e74e938 | [
"Apache-2.0"
] | 152 | 2020-02-04T01:52:34.000Z | 2022-03-29T14:57:05.000Z | setup.py | kevinwylder/podman-py | fefc036d109b51e5cdf8754b8c0740188e74e938 | [
"Apache-2.0"
] | 61 | 2020-02-01T16:19:58.000Z | 2022-03-25T17:58:34.000Z | import setuptools
import fnmatch
from setuptools import find_packages
from setuptools.command.build_py import build_py as build_py_orig
excluded = [
"podman/api_connection.py",
"podman/containers/*",
"podman/images/*",
"podman/manifests/*",
"podman/networks/*",
"podman/pods/*",
"podman/system/*",
"podman/system/*",
"podman/tests/*",
]
class build_py(build_py_orig):
def find_package_modules(self, package, package_dir):
modules = super().find_package_modules(package, package_dir)
return [
(pkg, mod, file)
for (pkg, mod, file) in modules
if not any(fnmatch.fnmatchcase(file, pat=pattern) for pattern in excluded)
]
setuptools.setup(
packages=find_packages(),
cmdclass={"build_py": build_py},
)
| 23.852941 | 86 | 0.654747 |
8c2160c1f4c2d1a5db269c2e6def957af0d658b9 | 3,489 | py | Python | que_me_pongo/settings.py | hernancabral/que_me_pongo | 7cffc6bdf9cf8b9e9b0c6395aa0076eddb88dbeb | [
"MIT"
] | 1 | 2021-06-16T21:27:09.000Z | 2021-06-16T21:27:09.000Z | que_me_pongo/settings.py | hernancabral/que_me_pongo | 7cffc6bdf9cf8b9e9b0c6395aa0076eddb88dbeb | [
"MIT"
] | null | null | null | que_me_pongo/settings.py | hernancabral/que_me_pongo | 7cffc6bdf9cf8b9e9b0c6395aa0076eddb88dbeb | [
"MIT"
] | null | null | null | """
Django settings for que_me_pongo project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-(@%c!re^dgdbh=qbr%xqa4i8pdj(v&^z#f#z!n7hb$*(pg3gq4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core.apps.CoreConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'que_me_pongo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'que_me_pongo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'es-AR'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_URL = 'login' # this is the name of the url
LOGOUT_REDIRECT_URL = 'index' # this is the name of the url
LOGIN_REDIRECT_URL = 'dashboard' # this is the name of the url
| 27.257813 | 91 | 0.70192 |
403addafd592d57864ef1d38646f747265e3013d | 14,782 | py | Python | plugins/modules/oci_file_storage_file_system_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_file_storage_file_system_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_file_storage_file_system_facts.py | sagar2938/oci-ansible-collection | 5b8ce583a0d5d0aabf14494d61aea4649e18d1e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_file_storage_file_system_facts
short_description: Fetches details about one or multiple FileSystem resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple FileSystem resources in Oracle Cloud Infrastructure
- Lists the file system resources in the specified compartment.
- If I(file_system_id) is specified, the details of a single FileSystem will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
file_system_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the file system.
- Required to get a specific file_system.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
- Required to list multiple file_systems.
type: str
availability_domain:
description:
- The name of the availability domain.
- "Example: `Uocm:PHX-AD-1`"
- Required to list multiple file_systems.
type: str
display_name:
description:
- A user-friendly name. It does not have to be unique, and it is changeable.
- "Example: `My resource`"
type: str
aliases: ["name"]
lifecycle_state:
description:
- Filter results by the specified lifecycle state. Must be a valid
state for the resource type.
type: str
choices:
- "CREATING"
- "ACTIVE"
- "DELETING"
- "DELETED"
- "FAILED"
source_snapshot_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the snapshot used to create a cloned file system. See
L(Cloning a File System,https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm).
type: str
parent_file_system_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the file system that contains the source snapshot of a
cloned file system. See L(Cloning a File System,https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm).
type: str
sort_by:
description:
- The field to sort by. You can provide either value, but not both.
By default, when you sort by time created, results are shown
in descending order. When you sort by display name, results are
shown in ascending order.
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either 'asc' or 'desc', where 'asc' is
ascending and 'desc' is descending. The default order is 'desc'
except for numeric values.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific file_system
oci_file_storage_file_system_facts:
# required
file_system_id: "ocid1.filesystem.oc1..xxxxxxEXAMPLExxxxxx"
- name: List file_systems
oci_file_storage_file_system_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
availability_domain: Uocm:PHX-AD-1
# optional
display_name: My resource
lifecycle_state: CREATING
source_snapshot_id: "ocid1.sourcesnapshot.oc1..xxxxxxEXAMPLExxxxxx"
parent_file_system_id: "ocid1.parentfilesystem.oc1..xxxxxxEXAMPLExxxxxx"
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
file_systems:
description:
- List of FileSystem resources
returned: on success
type: complex
contains:
availability_domain:
description:
- The availability domain the file system is in. May be unset
as a blank or NULL value.
- "Example: `Uocm:PHX-AD-1`"
returned: on success
type: str
sample: Uocm:PHX-AD-1
metered_bytes:
description:
- The number of bytes consumed by the file system, including
any snapshots. This number reflects the metered size of the file
system and is updated asynchronously with respect to
updates to the file system.
For more information, see L(File System Usage and Metering,https://docs.cloud.oracle.com/Content/File/Concepts/FSutilization.htm).
returned: on success
type: int
sample: 56
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment that contains the file system.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name. It does not have to be unique, and it is changeable.
Avoid entering confidential information.
- "Example: `My file system`"
returned: on success
type: str
sample: My file system
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the file system.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of the file system.
returned: on success
type: str
sample: CREATING
time_created:
description:
- The date and time the file system was created, expressed in
L(RFC 3339,https://tools.ietf.org/rfc/rfc3339) timestamp format.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2016-08-25T21:10:29.600Z"
freeform_tags:
description:
- "Free-form tags for this resource. Each tag is a simple key-value pair
with no predefined name, type, or namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
kms_key_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the KMS key which is the master encryption key for the
file system.
returned: on success
type: str
sample: "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx"
source_details:
description:
- ""
returned: on success
type: complex
contains:
parent_file_system_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the file system that contains the source
snapshot of a cloned file system.
See L(Cloning a File System,https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm).
returned: on success
type: str
sample: "ocid1.parentfilesystem.oc1..xxxxxxEXAMPLExxxxxx"
source_snapshot_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the source snapshot used to create a cloned file
system.
See L(Cloning a File System,https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm).
returned: on success
type: str
sample: "ocid1.sourcesnapshot.oc1..xxxxxxEXAMPLExxxxxx"
is_clone_parent:
description:
- Specifies whether the file system has been cloned.
See L(Cloning a File System,https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm).
returned: on success
type: bool
sample: true
is_hydrated:
description:
- Specifies whether the data has finished copying from the source to the clone.
Hydration can take up to several hours to complete depending on the size of the source.
The source and clone remain available during hydration, but there may be some performance impact.
See L(Cloning a File System,https://docs.cloud.oracle.com/iaas/Content/File/Tasks/cloningafilesystem.htm#hydration).
returned: on success
type: bool
sample: true
lifecycle_details:
description:
- Additional information about the current 'lifecycleState'.
returned: on success
type: str
sample: lifecycle_details_example
sample: [{
"availability_domain": "Uocm:PHX-AD-1",
"metered_bytes": 56,
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "My file system",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"lifecycle_state": "CREATING",
"time_created": "2016-08-25T21:10:29.600Z",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"kms_key_id": "ocid1.kmskey.oc1..xxxxxxEXAMPLExxxxxx",
"source_details": {
"parent_file_system_id": "ocid1.parentfilesystem.oc1..xxxxxxEXAMPLExxxxxx",
"source_snapshot_id": "ocid1.sourcesnapshot.oc1..xxxxxxEXAMPLExxxxxx"
},
"is_clone_parent": true,
"is_hydrated": true,
"lifecycle_details": "lifecycle_details_example"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.file_storage import FileStorageClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class FileSystemFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"file_system_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
"availability_domain",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_file_system,
file_system_id=self.module.params.get("file_system_id"),
)
def list_resources(self):
optional_list_method_params = [
"display_name",
"lifecycle_state",
"source_snapshot_id",
"parent_file_system_id",
"sort_by",
"sort_order",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_file_systems,
compartment_id=self.module.params.get("compartment_id"),
availability_domain=self.module.params.get("availability_domain"),
**optional_kwargs
)
FileSystemFactsHelperCustom = get_custom_class("FileSystemFactsHelperCustom")
class ResourceFactsHelper(FileSystemFactsHelperCustom, FileSystemFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
file_system_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
availability_domain=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
lifecycle_state=dict(
type="str",
choices=["CREATING", "ACTIVE", "DELETING", "DELETED", "FAILED"],
),
source_snapshot_id=dict(type="str"),
parent_file_system_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="file_system",
service_client_class=FileStorageClient,
namespace="file_storage",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(file_systems=result)
if __name__ == "__main__":
main()
| 39.31383 | 160 | 0.615884 |
ca14cd7ff7f12c5d094af77f0fe3546d7922122e | 1,332 | py | Python | libevent/fields.py | adamvinueza/eventlogger | 23053cbb78fb40a654af1e6a441d5dcc256b6594 | [
"Apache-2.0"
] | null | null | null | libevent/fields.py | adamvinueza/eventlogger | 23053cbb78fb40a654af1e6a441d5dcc256b6594 | [
"Apache-2.0"
] | 1 | 2020-10-26T15:05:24.000Z | 2020-12-12T16:17:47.000Z | libevent/fields.py | adamvinueza/libevent | 23053cbb78fb40a654af1e6a441d5dcc256b6594 | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
from typing import Any, Dict
import libevent.json_serializer as json_serializer
"""
ADAPTED FROM FieldsHolder CLASS AT https://github.com/honeycombio/libhoney-py/
"""
ERROR = "error"
LOG_LEVEL = "logLevel"
class Fields:
"""A field that can be logged."""
def __init__(self):
self._data = {}
def __add__(self, other: Any) -> Fields:
self._data.update(other.get_data())
return self
def __eq__(self, other: Any) -> bool:
return self._data == other.get_data()
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def __contains__(self, key: str) -> bool:
return key in self._data
def add_field(self, name: str, val: Any) -> None:
self._data[name] = val
def add(self, data: Dict) -> None:
try:
for k, v in data.items():
self.add_field(k, v)
except AttributeError:
raise TypeError("add requires a dict-like argument")
def get_data(self) -> Dict:
return self._data
def is_empty(self) -> bool:
return len(self._data) == 0
def __str__(self) -> str:
return json.dumps(self._data, default=json_serializer.default)
| 25.615385 | 78 | 0.622372 |
76bb2709f8431dbee347fdef91920885309c2d16 | 1,516 | py | Python | otodom/notifier.py | grodowski/spider | 863465097fe813833be7aa651d05af83e70c3fdc | [
"MIT"
] | 1 | 2021-11-19T14:57:32.000Z | 2021-11-19T14:57:32.000Z | otodom/notifier.py | grodowski/spider | 863465097fe813833be7aa651d05af83e70c3fdc | [
"MIT"
] | null | null | null | otodom/notifier.py | grodowski/spider | 863465097fe813833be7aa651d05af83e70c3fdc | [
"MIT"
] | null | null | null | # coding: utf8
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from server.renderer import Renderer
smtp_login = os.getenv('SMTP_LOGIN')
smtp_pwd = os.getenv('SMTP_PWD')
recipients = os.getenv('EMAIL_TO')
def deliver_now(items):
Notifier(items).deliver_now()
class Notifier(object):
def __init__(self, items):
self.items = items
def build_html(self):
return f"""\
<html>
<head></head>
<body>
<p>Hi!<br>
I have some new offers for you! 😇
</p>
<table>
{Renderer().render(self.items)}
</table>
</body>
</html>
"""
def deliver_now(self):
if not (smtp_login and smtp_pwd):
print("Notifier: pass, SMTP not configured. Set SMTP_LOGIN and SMTP_PWD")
return
if len(self.items) is 0:
print("Notifier: no new search results - skipping")
return
msg = MIMEMultipart('alternative')
msg['Subject'] = "New search results"
msg['From'] = 'OtoDom Bot'
msg['To'] = recipients
msg.attach(MIMEText(self.build_html(), 'html'))
server_ssl = smtplib.SMTP_SSL('smtp.mailgun.com', 465)
server_ssl.ehlo()
server_ssl.login(smtp_login, smtp_pwd)
server_ssl.sendmail(smtp_login, recipients.split(','), msg.as_string())
server_ssl.quit()
print(f"Notifier: sent mail to {recipients}")
| 28.074074 | 85 | 0.594327 |
afd3d7e94494d24d32abdfb7b161dd10fcf93918 | 3,887 | py | Python | nvtabular/ops/hash_bucket.py | deepyaman/NVTabular | b814b5ed9866be29d3c13fd00154965a3fec7fc0 | [
"Apache-2.0"
] | null | null | null | nvtabular/ops/hash_bucket.py | deepyaman/NVTabular | b814b5ed9866be29d3c13fd00154965a3fec7fc0 | [
"Apache-2.0"
] | null | null | null | nvtabular/ops/hash_bucket.py | deepyaman/NVTabular | b814b5ed9866be29d3c13fd00154965a3fec7fc0 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Dict, Union
import cudf
from cudf.utils.dtypes import is_list_dtype
from nvtx import annotate
from ..dispatch import _encode_list_column
from .categorify import _emb_sz_rule, _get_embedding_order
from .operator import ColumnNames, Operator
class HashBucket(Operator):
"""
This op maps categorical columns to a contiguous integer range
by first hashing the column then modulating by the number of
buckets as indicated by `num_buckets`.
Example usage::
cat_names = ["feature_a", "feature_b"]
# this will hash both features a and b to 100 buckets
hash_features = cat_names >> ops.HashBucket({"feature_a": 100, "feature_b": 50})
processor = nvtabular.Workflow(hash_features)
The output of this op would be::
feature_a feature_b
0 90 11
1 70 40
2 52 9
If you would like to do frequency capping or frequency hashing,
you should use Categorify op instead. See
`Categorify op <https://github.com/NVIDIA/NVTabular/blob/main/nvtabular/ops/categorify.py#L43>`_
for example usage.
Parameters
----------
num_buckets : int or dictionary:{column: num_hash_buckets}
Column-wise modulo to apply after hash function. Note that this
means that the corresponding value will be the categorical cardinality
of the transformed categorical feature. If given as an int, that value
will be used as the number of "hash buckets" for every feature.
If a dictionary is passed, it will be used to specify
explicit mappings from a column name to a number of buckets. In
this case, only the columns specified in the keys of `num_buckets`
will be transformed.
"""
def __init__(self, num_buckets: Union[int, Dict[str, int]]):
if isinstance(num_buckets, dict):
self.num_buckets = num_buckets
elif isinstance(num_buckets, int):
self.num_buckets = num_buckets
else:
raise TypeError(
"`num_buckets` must be dict, iterable, or int, got type {}".format(
type(num_buckets)
)
)
super(HashBucket, self).__init__()
@annotate("HashBucket_op", color="darkgreen", domain="nvt_python")
def transform(self, columns: ColumnNames, gdf: cudf.DataFrame) -> cudf.DataFrame:
if isinstance(self.num_buckets, int):
num_buckets = {name: self.num_buckets for name in columns}
else:
num_buckets = self.num_buckets
for col, nb in num_buckets.items():
if is_list_dtype(gdf[col].dtype):
gdf[col] = _encode_list_column(gdf[col], gdf[col].list.leaves.hash_values() % nb)
else:
gdf[col] = gdf[col].hash_values() % nb
return gdf
transform.__doc__ = Operator.transform.__doc__
def get_embedding_sizes(self, columns):
columns = _get_embedding_order(columns)
if isinstance(self.num_buckets, int):
embedding_size = _emb_sz_rule(self.num_buckets)
return {col: embedding_size for col in columns}
else:
return {col: _emb_sz_rule(self.num_buckets[col]) for col in columns}
| 37.375 | 100 | 0.663236 |
4f3ef9ecc50d351aa63cea5b71eccf91c795d633 | 96 | py | Python | python/PracticeA.py | teinen/atcoder-beginners-selection-answers | 446a7a6de0f8d9ce6e2fd798ef467e55d7dc0119 | [
"MIT"
] | null | null | null | python/PracticeA.py | teinen/atcoder-beginners-selection-answers | 446a7a6de0f8d9ce6e2fd798ef467e55d7dc0119 | [
"MIT"
] | null | null | null | python/PracticeA.py | teinen/atcoder-beginners-selection-answers | 446a7a6de0f8d9ce6e2fd798ef467e55d7dc0119 | [
"MIT"
] | null | null | null | a = int(input())
b,c = map(int, input().split())
s = input()
print("{} {}".format(a + b + c, s)) | 24 | 35 | 0.5 |
1ed23f35ca26a24f7c638638524338262c7c309f | 753 | py | Python | common/views.py | themarshallproject/hall-of-justice | 8c2ce47f792e0182c84d13dca808564a6ac376b4 | [
"BSD-3-Clause"
] | 7 | 2017-04-28T20:18:29.000Z | 2020-11-23T16:35:05.000Z | common/views.py | themarshallproject/hall-of-justice | 8c2ce47f792e0182c84d13dca808564a6ac376b4 | [
"BSD-3-Clause"
] | 44 | 2015-03-20T17:16:58.000Z | 2016-04-27T20:17:29.000Z | common/views.py | sunlightlabs/hall-of-justice | 8c2ce47f792e0182c84d13dca808564a6ac376b4 | [
"BSD-3-Clause"
] | 5 | 2015-06-22T13:49:08.000Z | 2016-01-30T23:19:16.000Z | from django.views.generic.list import MultipleObjectMixin
from django.http import StreamingHttpResponse
from common.utils import generate_csv
class CSVExportMixin(MultipleObjectMixin):
"""Mixin for exporting data as CSV using GET requests"""
def get(self, request, *args, **kwargs):
concrete_model = self.model._meta.concrete_model
output_fieldnames = [f.name for f in concrete_model._meta.get_fields() if f.name != 'id']
qs = self.get_queryset()
csv_data = generate_csv(qs, output_fieldnames)
response = StreamingHttpResponse(csv_data, content_type="text/csv")
response['Content-Disposition'] = 'attachment; filename="criminal-justice-{}-rows.csv"'.format(qs.count())
return response
| 41.833333 | 114 | 0.7251 |
ef14902f6337804a41110a8995c392053059a3a5 | 1,822 | py | Python | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/asyncio/asyncio_echo_client_ssl.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/asyncio/asyncio_echo_client_ssl.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/CD_Sata-Structures/_MISC/misc-examples/python3-book-examples/asyncio/asyncio_echo_client_ssl.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # Copyright (c) 2014 Doug Hellmann. All rights reserved.
"""Echo client using coroutines with SSL enabled
"""
# end_pymotw_header
import asyncio
import logging
import ssl
import sys
MESSAGES = [b"This is the message. ", b"It will be sent ", b"in parts."]
SERVER_ADDRESS = ("localhost", 10000)
async def echo_client(server_address, messages):
log = logging.getLogger("echo_client")
# The certificate is created with pymotw.com as the hostname,
# which will not match when the example code runs
# elsewhere, so disable hostname verification.
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.check_hostname = False
ssl_context.load_verify_locations("pymotw.crt")
log.debug("connecting to {} port {}".format(*server_address))
reader, writer = await asyncio.open_connection(*server_address, ssl=ssl_context)
# This could be writer.writelines() except that
# would make it harder to show each part of the message
# being sent.
for msg in messages:
writer.write(msg)
log.debug("sending {!r}".format(msg))
# SSL does not support EOF, so send a null byte to indicate
# the end of the message.
writer.write(b"\x00")
await writer.drain()
log.debug("waiting for response")
while True:
data = await reader.read(128)
if data:
log.debug("received {!r}".format(data))
else:
log.debug("closing")
writer.close()
return
logging.basicConfig(
level=logging.DEBUG, format="%(name)s: %(message)s", stream=sys.stderr
)
log = logging.getLogger("main")
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(echo_client(SERVER_ADDRESS, MESSAGES))
finally:
log.debug("closing event loop")
event_loop.close()
| 28.920635 | 84 | 0.686059 |
b58d0d9d611aa10b5a1ba8db5f2c54424a05e0c1 | 31,374 | py | Python | sysinv/sysinv/sysinv/sysinv/tests/api/test_controller_fs.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-02-07T18:57:44.000Z | 2021-09-11T10:29:34.000Z | sysinv/sysinv/sysinv/sysinv/tests/api/test_controller_fs.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:01:55.000Z | 2021-01-14T12:01:55.000Z | sysinv/sysinv/sysinv/sysinv/tests/api/test_controller_fs.py | albailey/config | 40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9 | [
"Apache-2.0"
] | 10 | 2020-10-13T08:37:46.000Z | 2022-02-09T00:21:25.000Z | #
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
Tests for the API / controller-fs / methods.
"""
import mock
from six.moves import http_client
from sysinv.tests.api import base
from sysinv.tests.db import base as dbbase
from sysinv.tests.db import utils as dbutils
class FakeConductorAPI(object):
def __init__(self):
self.get_controllerfs_lv_sizes = mock.MagicMock()
self.update_storage_config = mock.MagicMock()
class FakeException(Exception):
pass
class ApiControllerFSTestCaseMixin(base.FunctionalTest,
dbbase.ControllerHostTestCase):
# API_HEADERS are a generic header passed to most API calls
API_HEADERS = {'User-Agent': 'sysinv-test'}
# API_PREFIX is the prefix for the URL
API_PREFIX = '/controller_fs'
# RESULT_KEY is the python table key for the list of results
RESULT_KEY = 'controller_fs'
# expected_api_fields are attributes that should be populated by
# an API query
expected_api_fields = ['logical_volume',
'uuid',
'links',
'created_at',
'updated_at',
'name',
'state',
'isystem_uuid',
'replicated',
'forisystemid',
'size']
# hidden_api_fields are attributes that should not be populated by
# an API query
hidden_api_fields = ['forisystemid']
def setUp(self):
super(ApiControllerFSTestCaseMixin, self).setUp()
self.controller_fs_first = self._create_db_object('platform',
10,
'platform-lv')
self.controller_fs_second = self._create_db_object('database',
5,
'pgsql-lv')
self.controller_fs_third = self._create_db_object('extension',
1,
'extension-lv')
self.fake_conductor_api = FakeConductorAPI()
p = mock.patch('sysinv.conductor.rpcapi.ConductorAPI')
self.mock_conductor_api = p.start()
self.mock_conductor_api.return_value = self.fake_conductor_api
self.addCleanup(p.stop)
def get_show_url(self, uuid):
return '%s/%s' % (self.API_PREFIX, uuid)
def get_detail_url(self):
return '%s/detail' % (self.API_PREFIX)
def get_update_url(self, system_uuid):
return '/isystems/%s/controller_fs/update_many' % (system_uuid)
def get_sorted_list_url(self, sort_attr, sort_dir):
return '%s/?sort_key=%s&sort_dir=%s' % (self.API_PREFIX, sort_attr,
sort_dir)
def _create_db_object(self, controller_fs_name, controller_fs_size,
controller_lv, obj_id=None):
return dbutils.create_test_controller_fs(id=obj_id,
uuid=None,
name=controller_fs_name,
forisystemid=self.system.id,
state='available',
size=controller_fs_size,
logical_volume=controller_lv,
replicated=True,
isystem_uuid=self.system.uuid)
class ApiControllerFSListTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem List GET operations
"""
def setUp(self):
super(ApiControllerFSListTestSuiteMixin, self).setUp()
def test_success_fetch_controller_fs_list(self):
response = self.get_json(self.API_PREFIX, headers=self.API_HEADERS)
# Verify the values of the response with the values stored in database
result_one = response[self.RESULT_KEY][0]
result_two = response[self.RESULT_KEY][1]
self.assertTrue(result_one['name'] == self.controller_fs_first.name or
result_two['name'] == self.controller_fs_first.name)
self.assertTrue(result_one['name'] == self.controller_fs_second.name or
result_two['name'] == self.controller_fs_second.name)
def test_success_fetch_controller_fs_sorted_list(self):
response = self.get_json(self.get_sorted_list_url('name', 'asc'))
# Verify the values of the response are returned in a sorted order
result_one = response[self.RESULT_KEY][0]
result_two = response[self.RESULT_KEY][1]
result_three = response[self.RESULT_KEY][2]
self.assertEqual(result_one['name'], self.controller_fs_second.name)
self.assertEqual(result_two['name'], self.controller_fs_third.name)
self.assertEqual(result_three['name'], self.controller_fs_first.name)
class ApiControllerFSShowTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem Show GET operations
"""
def setUp(self):
super(ApiControllerFSShowTestSuiteMixin, self).setUp()
def test_fetch_controller_fs_object(self):
url = self.get_show_url(self.controller_fs_first.uuid)
response = self.get_json(url)
# Verify the values of the response with the values stored in database
self.assertTrue(response['name'], self.controller_fs_first.name)
self.assertTrue(response['logical_volume'],
self.controller_fs_first.logical_volume)
self.assertTrue(response['state'], self.controller_fs_first.state)
self.assertTrue(response['replicated'],
self.controller_fs_first.replicated)
self.assertTrue(response['size'], self.controller_fs_first.size)
self.assertTrue(response['uuid'], self.controller_fs_first.uuid)
class ApiControllerFSPutTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem Put operations
"""
def setUp(self):
super(ApiControllerFSPutTestSuiteMixin, self).setUp()
self.fake_lv_size = self.fake_conductor_api.get_controllerfs_lv_sizes
p = mock.patch(
'sysinv.api.controllers.v1.utils.is_host_state_valid_for_fs_resize')
self.mock_utils_is_virtual = p.start()
self.mock_utils_is_virtual.return_value = True
self.addCleanup(p.stop)
def exception_controller_fs(self):
print('Raised a fake exception')
raise FakeException
def test_put_duplicate_fs_name(self):
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("Duplicate fs_name 'extension' in parameter list",
response.json['error_message'])
def test_put_invalid_fs_name(self):
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "invalid_name",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("ControllerFs update failed: invalid filesystem",
response.json['error_message'])
def test_put_invalid_fs_size(self):
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "invalid_size",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "4",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("ControllerFs update failed: filesystem \'extension\' "
"size must be an integer", response.json['error_message'])
def test_put_smaller_than_existing_fs_size(self):
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "4",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("ControllerFs update failed: size for "
"filesystem \'database\' should be bigger than 5",
response.json['error_message'])
@mock.patch('sysinv.api.controllers.v1.utils.is_drbd_fs_resizing')
def test_put_drbd_sync_error(self, is_drbd_fs_resizing):
is_drbd_fs_resizing.return_value = True
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "4",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("A drbd sync operation is currently in progress. "
"Retry again later.",
response.json['error_message'])
def test_put_size_not_found(self):
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'platform-lv': 10}
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("Unable to determine the current size of pgsql-lv. "
"Rejecting modification request.",
response.json['error_message'])
def test_put_minimum_size(self):
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'pgsql-lv': 5,
'platform-lv': 16}
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("'platform' must be at least: 16",
response.json['error_message'])
def test_put_insufficient_backup_size(self):
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'pgsql-lv': 5,
'platform-lv': 10}
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("backup size of 0 is insufficient for host controller-0. "
"Minimum backup size of 21 is required based upon "
"platform size 10 and database size 6. "
"Rejecting modification request.",
response.json['error_message'])
def test_put_unprovisioned_physical_volume(self):
# Create an unprovisioned physical volume in database
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
forihostid=1,
pv_state='unprovisioned')
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'pgsql-lv': 5,
'platform-lv': 10}
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("Cannot resize filesystem. There are still "
"unprovisioned physical volumes on controller-0.",
response.json['error_message'])
def test_put_exceed_growth_limit(self):
# Create a provisioned physical volume in database
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
forihostid=1,
pv_state='provisioned')
# Create a logical volume
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
forihostid=self.host.id,
lvm_vg_size=200,
lvm_vg_free_pe=50)
# Create a host filesystem
dbutils.create_test_host_fs(name='backup',
forihostid=self.host.id)
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'pgsql-lv': 5,
'platform-lv': 10}
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("Total target growth size 9 GiB "
"exceeds growth limit of 0 GiB.",
response.json['error_message'])
def test_put_update_exception(self):
# Create a provisioned physical volume in database
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
forihostid=self.host.id,
pv_state='provisioned')
# Create a logical volume
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
forihostid=self.host.id)
# Create a host filesystem
dbutils.create_test_host_fs(name='backup',
forihostid=self.host.id)
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'pgsql-lv': 5,
'platform-lv': 10}
# Throw a fake exception
fake_update = self.fake_conductor_api.update_storage_config
fake_update.side_effect = self.exception_controller_fs
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.BAD_REQUEST)
self.assertIn("Failed to update filesystem size",
response.json['error_message'])
def test_put_success(self):
# Create a provisioned physical volume in database
dbutils.create_test_pv(lvm_vg_name='cgts-vg',
forihostid=self.host.id,
pv_state='provisioned')
# Create a logical volume
dbutils.create_test_lvg(lvm_vg_name='cgts-vg',
forihostid=self.host.id)
# Create a host filesystem
dbutils.create_test_host_fs(name='backup',
forihostid=self.host.id)
# Return fake dictionary for logical volume and size
self.fake_lv_size.return_value = {'extension-lv': 1,
'pgsql-lv': 5,
'platform-lv': 10}
response = self.put_json(self.get_update_url(self.system.uuid),
[[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
[{"path": "/name",
"value": "database",
"op": "replace"},
{"path": "/size",
"value": "6",
"op": "replace"}]],
headers=self.API_HEADERS,
expect_errors=True)
# Verify a NO CONTENT response is given
self.assertEqual(response.status_code, http_client.NO_CONTENT)
class ApiControllerFSDetailTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem detail operations
"""
def setUp(self):
super(ApiControllerFSDetailTestSuiteMixin, self).setUp()
# Test that a valid PATCH operation is blocked by the API
def test_success_detail(self):
# Test that a valid PATCH operation is blocked by the API
response = self.get_json(self.get_detail_url(),
headers=self.API_HEADERS,
expect_errors=True)
self.assertEqual(response.status_code, http_client.OK)
result_one = response.json[self.RESULT_KEY][0]
result_two = response.json[self.RESULT_KEY][1]
result_three = response.json[self.RESULT_KEY][2]
# Response object 1
self.assertEqual(result_one['size'], self.controller_fs_first.size)
self.assertEqual(result_one['isystem_uuid'], self.controller_fs_first.isystem_uuid)
self.assertEqual(result_one['name'], self.controller_fs_first.name)
self.assertEqual(result_one['logical_volume'], self.controller_fs_first.logical_volume)
self.assertEqual(result_one['forisystemid'], self.controller_fs_first.forisystemid)
self.assertEqual(result_one['action'], None)
self.assertEqual(result_one['uuid'], self.controller_fs_first.uuid)
self.assertEqual(result_one['state'], self.controller_fs_first.state)
self.assertEqual(result_one['replicated'], self.controller_fs_first.replicated)
# Response object 2
self.assertEqual(result_two['size'], self.controller_fs_second.size)
self.assertEqual(result_two['isystem_uuid'], self.controller_fs_second.isystem_uuid)
self.assertEqual(result_two['name'], self.controller_fs_second.name)
self.assertEqual(result_two['logical_volume'], self.controller_fs_second.logical_volume)
self.assertEqual(result_two['forisystemid'], self.controller_fs_second.forisystemid)
self.assertEqual(result_two['action'], None)
self.assertEqual(result_two['uuid'], self.controller_fs_second.uuid)
self.assertEqual(result_two['state'], self.controller_fs_second.state)
self.assertEqual(result_two['replicated'], self.controller_fs_second.replicated)
# Response object 3
self.assertEqual(result_three['size'], self.controller_fs_third.size)
self.assertEqual(result_three['isystem_uuid'], self.controller_fs_third.isystem_uuid)
self.assertEqual(result_three['name'], self.controller_fs_third.name)
self.assertEqual(result_three['logical_volume'], self.controller_fs_third.logical_volume)
self.assertEqual(result_three['forisystemid'], self.controller_fs_third.forisystemid)
self.assertEqual(result_three['action'], None)
self.assertEqual(result_three['uuid'], self.controller_fs_third.uuid)
self.assertEqual(result_three['state'], self.controller_fs_third.state)
self.assertEqual(result_three['replicated'], self.controller_fs_third.replicated)
class ApiControllerFSPatchTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem patch operations
"""
def setUp(self):
super(ApiControllerFSPatchTestSuiteMixin, self).setUp()
# Test that a valid PATCH operation is blocked by the API
# API should return 400 BAD_REQUEST or FORBIDDEN 403
def test_patch_not_allowed(self):
uuid = self.controller_fs_third.uuid
response = self.patch_json(self.get_show_url(uuid),
[{"path": "/name",
"value": "extension",
"op": "replace"},
{"path": "/size",
"value": "2",
"op": "replace"}],
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.FORBIDDEN)
self.assertIn("Operation not permitted", response.json['error_message'])
class ApiControllerFSDeleteTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem delete operations
"""
def setUp(self):
super(ApiControllerFSDeleteTestSuiteMixin, self).setUp()
# Test that a valid DELETE operation is blocked by the API
# API should return 400 BAD_REQUEST or FORBIDDEN 403
def test_delete_not_allowed(self):
uuid = self.controller_fs_third.uuid
response = self.delete(self.get_show_url(uuid),
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.FORBIDDEN)
self.assertIn("Operation not permitted", response.json['error_message'])
class ApiControllerFSPostTestSuiteMixin(ApiControllerFSTestCaseMixin):
""" Controller FileSystem post operations
"""
def setUp(self):
super(ApiControllerFSPostTestSuiteMixin, self).setUp()
# Test that a valid POST operation is blocked by the API
# API should return 400 BAD_REQUEST or FORBIDDEN 403
def test_post_not_allowed(self):
response = self.post_json(self.API_PREFIX,
{'name': 'platform-new',
'size': 10,
'logical_volume': 'platform-lv'},
headers=self.API_HEADERS,
expect_errors=True)
# Verify appropriate exception is raised
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status_code, http_client.FORBIDDEN)
self.assertIn("Operation not permitted", response.json['error_message'])
| 48.045942 | 97 | 0.494295 |
9f6e9dea24ab05d642090ae8b271beb082315aeb | 1,180 | py | Python | project/db/admin.py | samsonosiomwan/Devs-Prime-Api | 7b43078bb1f848f17f85e8bb94292d1b776eee92 | [
"MIT"
] | null | null | null | project/db/admin.py | samsonosiomwan/Devs-Prime-Api | 7b43078bb1f848f17f85e8bb94292d1b776eee92 | [
"MIT"
] | 1 | 2021-10-21T22:13:56.000Z | 2021-10-21T22:13:57.000Z | project/db/admin.py | Favourkass/Devsprime-api | 2414a2541efeb76b6a7ebb26c2d05a3bfead153c | [
"MIT"
] | null | null | null | from django.contrib import admin
from db.models import (user, learner, instructors,
course, course_type,
course_category, learner_course,
blogs, comment, contact, reply,
course_payment, order_status, orders,
course_category, order_status,
blogs, comment, course_video, contact, reply, orders, learner_course, cart)
admin.site.register(course.Course)
admin.site.register(course_video.CourseVideo)
admin.site.register(course_category.CourseCategory)
admin.site.register(course_type.CourseType)
admin.site.register(user.User)
admin.site.register(learner.LearnerProfile)
admin.site.register(instructors.Instructor)
admin.site.register(blogs.Blog)
admin.site.register(comment.Comment)
admin.site.register(reply.Reply)
admin.site.register(contact.Contact)
admin.site.register(learner_course.LearnerCourse)
admin.site.register(order_status.OrderStatus)
admin.site.register(orders.Order)
admin.site.register(cart.Cart)
@admin.register(course_payment.CoursePayment)
class CoursePayment(admin.ModelAdmin):
readonly_fields = ('paystack_id', )
| 35.757576 | 98 | 0.730508 |
2f84dd0b4b31fb42a3ad5089cf2a8399f16b34b9 | 5,859 | py | Python | lib/twitter/utils.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 206 | 2015-10-15T07:05:08.000Z | 2021-02-19T11:48:36.000Z | lib/twitter/utils.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 8 | 2017-10-16T10:18:31.000Z | 2022-03-09T14:24:27.000Z | lib/twitter/utils.py | goztrk/django-htk | c56bf112e5d627780d2f4288460eae5cce80fa9e | [
"MIT"
] | 61 | 2015-10-15T08:12:44.000Z | 2022-03-10T12:25:06.000Z | # Python Standard Library Imports
import time
# Third Party (PyPI) Imports
import rollbar
import tweepy
# HTK Imports
from htk.utils import chunks
def _get_auth_keys():
from django.conf import settings
consumer_key=settings.SOCIAL_AUTH_TWITTER_KEY
consumer_secret=settings.SOCIAL_AUTH_TWITTER_SECRET
access_token_key=settings.SOCIAL_AUTH_TWITTER_ACCESS_TOKEN
access_token_secret=settings.SOCIAL_AUTH_TWITTER_ACCESS_TOKEN_SECRET
auth_keys = (
consumer_key,
consumer_secret,
access_token_key,
access_token_secret,
)
return auth_keys
def get_api(consumer_key=None, consumer_secret=None, access_token_key=None, access_token_secret=None, wait_on_rate_limit=False):
from htk.lib.twitter.api import HtkTwitterAPI
api = HtkTwitterAPI(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret,
wait_on_rate_limit=wait_on_rate_limit
)
return api
def get_twitter_api(consumer_key=None, consumer_secret=None, access_token_key=None, access_token_secret=None, wait_on_rate_limit=False):
import twitter
if not(all((consumer_key, consumer_secret, access_token_key, access_token_secret,))):
(consumer_key, consumer_secret, access_token_key, access_token_secret,) = _get_auth_keys()
api = twitter.Api(
consumer_key=consumer_key,
consumer_secret=consumer_secret,
access_token_key=access_token_key,
access_token_secret=access_token_secret
)
return api
def get_tweepy_api(consumer_key=None, consumer_secret=None, access_token_key=None, access_token_secret=None, wait_on_rate_limit=False):
if not(all((consumer_key, consumer_secret, access_token_key, access_token_secret,))):
(consumer_key, consumer_secret, access_token_key, access_token_secret,) = _get_auth_keys()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token_key, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=wait_on_rate_limit)
return api
def get_user(screen_name):
api = get_tweepy_api()
twitter_user = api.get_user(screen_name=screen_name)
return twitter_user
def lookup_users_by_id(user_ids):
"""
https://dev.twitter.com/rest/reference/get/users/lookup
Requests / 15-min window (app auth): 60
"""
api = get_tweepy_api()
users = []
for chunk in chunks(user_ids, 100):
users.extend(api.lookup_users(user_ids=chunk))
time.sleep(15)
return users
def get_lists(screen_name):
api = get_tweepy_api()
lists = api.lists_all(screen_name)
return lists
def get_lists_with_members(screen_name):
api = get_twitter_api()
lists = get_lists(screen_name)
lists_with_members = []
for l in lists:
list_obj = {}
list_obj['list'] = l
list_id = l.id
list_members = api.GetListMembers(list_id, None)
list_obj['members'] = list_members
lists_with_members.append(list_obj)
return lists_with_members
def get_lists_members_deduped(screen_name):
api = get_twitter_api()
lists = get_lists(screen_name)
members_dict = {}
for l in lists:
list_id = l.id
list_members = api.GetListMembers(list_id, None)
for member in list_members:
members_dict[member.screen_name] = True
members = sorted(members_dict.keys())
return members
def get_following(screen_name):
friends = get_friends(screen_name)
return friends
def get_friends(screen_name):
"""
https://dev.twitter.com/rest/reference/get/friends/list
Requests / 15-min window (app auth): 30
"""
api = get_tweepy_api()
friends = []
is_first = True
for page in tweepy.Cursor(api.friends, screen_name=screen_name, count=200).pages():
if not is_first:
time.sleep(30)
else:
is_first = False
friends.extend(page)
return friends
# api = get_twitter_api()
# friends = api.GetFriends(screen_name=screen_name, count=200)
# return friends
def get_friends_ids(screen_name):
api = get_tweepy_api()
ids = []
is_first = True
try:
for page in tweepy.Cursor(api.friends_ids, screen_name=screen_name, count=5000).pages():
if not is_first:
time.sleep(60)
else:
is_first = False
ids.extend(page)
except tweepy.RateLimitError:
extra_data = {
'screen_name' : screen_name,
}
rollbar.report_exc_info(extra_data=extra_data)
return ids
def get_followers(screen_name):
"""
https://dev.twitter.com/rest/reference/get/followers/list
Requests / 15-min window (app auth): 30
"""
api = get_twitter_api()
followers = api.GetFollowers(screen_name=screen_name)
return followers
def get_followers_ids(screen_name):
"""
https://dev.twitter.com/rest/reference/get/followers/ids
Requests / 15-min window (app auth): 15
"""
api = get_tweepy_api()
ids = []
is_first = True
try:
for page in tweepy.Cursor(api.followers_ids, screen_name=screen_name, count=5000).pages():
if not is_first:
time.sleep(60)
else:
is_first = False
ids.extend(page)
except tweepy.RateLimitError:
extra_data = {
'screen_name' : screen_name,
}
rollbar.report_exc_info(extra_data=extra_data)
return ids
def search_tweets(keyword, limit=None, api=None):
"""Get Tweet search results for `keyword`
"""
if api is None:
api = get_tweepy_api()
tweet_results = api.search(
q=keyword,
count=limit,
result_type='recent'
)
return tweet_results
| 30.675393 | 136 | 0.684246 |
32e9b88d53b19efa8b4443f5ae59a0167a05df30 | 1,098 | py | Python | cgtop/layout_creator.py | goyalankit/cgtop | 7ff137243cf32ff91a8da3823bd46396c38173ea | [
"MIT"
] | null | null | null | cgtop/layout_creator.py | goyalankit/cgtop | 7ff137243cf32ff91a8da3823bd46396c38173ea | [
"MIT"
] | null | null | null | cgtop/layout_creator.py | goyalankit/cgtop | 7ff137243cf32ff91a8da3823bd46396c38173ea | [
"MIT"
] | null | null | null | from constants import layout_grid
from models import Layout
from helpers import get_current_cgroups
class LayoutCreator:
"""Creates the blueprint for containers on screen."""
def __init__(self, max_width, max_height):
self.max_width = max_width
self.max_height = max_height
self.cgroup_names = get_current_cgroups()
self.num_apps = len(self.cgroup_names)
def create_layouts(self):
"""Create layout objects with dimensions."""
width_factor, height_factor = layout_grid[self.num_apps]
layouts = []
curr_y = 0
for y in xrange(height_factor):
curr_x = 0
for x in xrange(width_factor):
layouts.append(
Layout(
curr_x, curr_y,
self.max_width / width_factor,
self.max_height / height_factor,
(x, y)
))
curr_x += self.max_width / width_factor
curr_y += self.max_height / height_factor
return layouts
| 28.153846 | 64 | 0.570128 |
5296639de59d8d92c9e20b96eb8a7bbe01c1d828 | 490 | py | Python | tests/test_text_spotting.py | omri374/openvino-textspotting-docker | 1f63e3fbe8a40acd0c4fb12c184646b4f1aa985f | [
"Unlicense"
] | null | null | null | tests/test_text_spotting.py | omri374/openvino-textspotting-docker | 1f63e3fbe8a40acd0c4fb12c184646b4f1aa985f | [
"Unlicense"
] | null | null | null | tests/test_text_spotting.py | omri374/openvino-textspotting-docker | 1f63e3fbe8a40acd0c4fb12c184646b4f1aa985f | [
"Unlicense"
] | null | null | null | import base64
import os
from pathlib import Path
import imageio
import numpy as np
from text_spotting.text_spotting_model import TextSpottingModel
def test_model_correct_text_results():
src_image = open(Path(os.path.dirname(__file__), "../data/out1.png").resolve(), "rb").read()
model = TextSpottingModel()
image = np.asarray(imageio.imread(src_image))
texts, boxes, scores, _ = model.predict(image)
assert len([text for text in texts if '120' == text]) == 1
| 20.416667 | 96 | 0.720408 |
5af78cbcd0bf72c70692586a8d8e4cfc48185eb7 | 3,764 | py | Python | sqlitemodel/SQL.py | gravmatt/sqlitemodel | c4fcd5628520948054dad5b745071134f10889c2 | [
"MIT",
"Unlicense"
] | 15 | 2016-03-26T20:35:45.000Z | 2021-02-15T09:56:20.000Z | sqlitemodel/SQL.py | gravmatt/sqlitemodel | c4fcd5628520948054dad5b745071134f10889c2 | [
"MIT",
"Unlicense"
] | null | null | null | sqlitemodel/SQL.py | gravmatt/sqlitemodel | c4fcd5628520948054dad5b745071134f10889c2 | [
"MIT",
"Unlicense"
] | 3 | 2016-03-27T01:22:11.000Z | 2019-11-08T01:55:12.000Z | class SQL(object):
'''SQL builder to generate SQL statements'''
def __init__(self):
self.__command = None
self.__select = ''
self.__update = ''
self.__delete = ''
self.__insert = ''
self.__create = ''
self.__columns = []
self.__values = []
self.values = []
self.__from = ''
self.__where = []
self.__orderBy = ''
self.__limit = ''
def CREATE(self, table):
self.__command = 'create'
self.__create = 'CREATE TABLE IF NOT EXISTS %s ' % table
self.__create += '(%s);'
return self
def COLUMN(self, name, type):
self.__columns.append('%s %s' % (name, type))
return self
def SELECT(self, *fields):
self.__command = 'select'
self.__select = 'SELECT '
if(fields):
self.__select += ', '.join(fields)
else:
self.__select += 'rowid, *'
return self
def UPDATE(self, table):
self.__command = 'update'
self.__update = 'UPDATE %s SET ' % table
return self
def SET(self, field, value):
self.__values.append((field, value))
return self
def DELETE(self, table):
self.__command = 'delete'
self.__delete = 'DELETE FROM %s' % table
return self
def INSERT(self, table):
self.__command = 'insert'
self.__insert = 'INSERT INTO %s ' % table
return self
def VALUES(self, **values):
self.__values = list({(k, values[k]) for k in values})
return self
def FROM(self, table):
self.__from = ' FROM %s' % table
return self
def WHERE(self, field, operator, value, isRaw=False):
self.__where.append((field, operator, value, isRaw))
return self
def AND(self):
self.__where.append((None, 'AND', None, False))
return self
def OR(self):
self.__where.append((None, 'OR', None, False))
return self
def LIMIT(self, offset, max):
self.__limit = ' LIMIT %s,%s' % (offset, max)
return self
def ORDER_BY(self, field, direction):
self.__orderBy = ' ORDER BY %s %s' % (field, direction)
return self
def getValues(self):
self.toStr()
return tuple(self.values) if self.values else ();
def toStr(self):
sql = None
where = ''
if(self.__where):
where = ' WHERE '
wherebuild = []
for t in self.__where:
if(not t[0] and not t[2]):
wherebuild.append(t[1])
else:
wherebuild.append(('%s%s%s' % (t[0], t[1], t[2])) if t[3] else ('%s%s?' % (t[0], t[1])))
where += ' '.join(wherebuild)
if(self.__command == 'select'):
sql = self.__select + self.__from + where + self.__orderBy + self.__limit + ';'
elif (self.__command == 'insert'):
sql = self.__insert + '(' + ','.join(['%s' % t[0] for t in self.__values]) + ') VALUES (' + ('?,'*len(self.__values))[:-1] + ');'
self.values = [t[1] for t in self.__values]
elif (self.__command == 'update'):
sql = self.__update + ', '.join(['%s=%s' % (t[0], '?') for t in self.__values]) + where + ';'
self.values = [t[1] for t in self.__values]
elif (self.__command == 'delete'):
sql = self.__delete + where + ';'
self.values = [t[1] for t in self.__values]
elif(self.__command == 'create'):
sql = self.__create % ', '.join(self.__columns)
if(self.__where):
self.values = [t[2] for t in self.__where if (t[0] or t[2]) and not t[3]]
return sql
| 27.676471 | 141 | 0.513018 |
cbe144d9be17661cd58b8d2ec67a93223107250b | 14,663 | py | Python | local/dnsproxy.py | lbp0200/BeltRoad | 5f60fa497a0e1e1c13c2d71db56b35b382296c1b | [
"MIT"
] | 2 | 2019-07-15T01:08:12.000Z | 2020-02-19T04:09:25.000Z | local/dnsproxy.py | lbp0200/BeltRoad | 5f60fa497a0e1e1c13c2d71db56b35b382296c1b | [
"MIT"
] | null | null | null | local/dnsproxy.py | lbp0200/BeltRoad | 5f60fa497a0e1e1c13c2d71db56b35b382296c1b | [
"MIT"
] | 1 | 2019-07-14T05:39:25.000Z | 2019-07-14T05:39:25.000Z | #!/usr/bin/env python
# coding:utf-8
__version__ = '1.0'
import sys
import os
import sysconfig
sys.path += [os.path.abspath(os.path.join(__file__, '../packages.egg/%s' % x)) for x in ('noarch', sysconfig.get_platform().split('-')[0])]
import gevent
import gevent.server
import gevent.timeout
import gevent.monkey
gevent.monkey.patch_all(subprocess=True)
import re
import time
import logging
import heapq
import socket
import select
import struct
import errno
import thread
import dnslib
import Queue
import pygeoip
is_local_addr = re.compile(r'(?i)(?:[0-9a-f:]+0:5efe:)?(?:127(?:\.\d+){3}|10(?:\.\d+){3}|192\.168(?:\.\d+){2}|172\.(?:1[6-9]|2\d|3[01])(?:\.\d+){2})').match
def get_dnsserver_list():
if os.name == 'nt':
import ctypes, ctypes.wintypes, struct, socket
DNS_CONFIG_DNS_SERVER_LIST = 6
buf = ctypes.create_string_buffer(2048)
ctypes.windll.dnsapi.DnsQueryConfig(DNS_CONFIG_DNS_SERVER_LIST, 0, None, None, ctypes.byref(buf), ctypes.byref(ctypes.wintypes.DWORD(len(buf))))
ipcount = struct.unpack('I', buf[0:4])[0]
iplist = [socket.inet_ntoa(buf[i:i+4]) for i in xrange(4, ipcount*4+4, 4)]
return iplist
elif os.path.isfile('/etc/resolv.conf'):
with open('/etc/resolv.conf', 'rb') as fp:
return re.findall(r'(?m)^nameserver\s+(\S+)', fp.read())
else:
logging.warning("get_dnsserver_list failed: unsupport platform '%s-%s'", sys.platform, os.name)
return []
def parse_hostport(host, default_port=80):
m = re.match(r'(.+)[#](\d+)$', host)
if m:
return m.group(1).strip('[]'), int(m.group(2))
else:
return host.strip('[]'), default_port
class ExpireCache(object):
""" A dictionary-like object, supporting expire semantics."""
def __init__(self, max_size=1024):
self.__maxsize = max_size
self.__values = {}
self.__expire_times = {}
self.__expire_heap = []
def size(self):
return len(self.__values)
def clear(self):
self.__values.clear()
self.__expire_times.clear()
del self.__expire_heap[:]
def exists(self, key):
return key in self.__values
def set(self, key, value, expire):
try:
et = self.__expire_times[key]
pos = self.__expire_heap.index((et, key))
del self.__expire_heap[pos]
if pos < len(self.__expire_heap):
heapq._siftup(self.__expire_heap, pos)
except KeyError:
pass
et = int(time.time() + expire)
self.__expire_times[key] = et
heapq.heappush(self.__expire_heap, (et, key))
self.__values[key] = value
self.cleanup()
def get(self, key):
et = self.__expire_times[key]
if et < time.time():
self.cleanup()
raise KeyError(key)
return self.__values[key]
def delete(self, key):
et = self.__expire_times.pop(key)
pos = self.__expire_heap.index((et, key))
del self.__expire_heap[pos]
if pos < len(self.__expire_heap):
heapq._siftup(self.__expire_heap, pos)
del self.__values[key]
def cleanup(self):
t = int(time.time())
eh = self.__expire_heap
ets = self.__expire_times
v = self.__values
size = self.__maxsize
heappop = heapq.heappop
#Delete expired, ticky
while eh and eh[0][0] <= t or len(v) > size:
_, key = heappop(eh)
del v[key], ets[key]
def dnslib_resolve_over_udp(query, dnsservers, timeout, **kwargs):
"""
http://gfwrev.blogspot.com/2009/11/gfwdns.html
http://zh.wikipedia.org/wiki/%E5%9F%9F%E5%90%8D%E6%9C%8D%E5%8A%A1%E5%99%A8%E7%BC%93%E5%AD%98%E6%B1%A1%E6%9F%93
http://support.microsoft.com/kb/241352
https://gist.github.com/klzgrad/f124065c0616022b65e5
"""
if not isinstance(query, (basestring, dnslib.DNSRecord)):
raise TypeError('query argument requires string/DNSRecord')
blacklist = kwargs.get('blacklist', ())
turstservers = kwargs.get('turstservers', ())
dns_v4_servers = [x for x in dnsservers if ':' not in x]
dns_v6_servers = [x for x in dnsservers if ':' in x]
sock_v4 = sock_v6 = None
socks = []
if dns_v4_servers:
sock_v4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socks.append(sock_v4)
if dns_v6_servers:
sock_v6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
socks.append(sock_v6)
timeout_at = time.time() + timeout
try:
for _ in xrange(4):
try:
for dnsserver in dns_v4_servers:
if isinstance(query, basestring):
if dnsserver in ('8.8.8.8', '8.8.4.4'):
query = '.'.join(x[:-1] + x[-1].upper() for x in query.split('.')).title()
query = dnslib.DNSRecord(q=dnslib.DNSQuestion(query))
query_data = query.pack()
if query.q.qtype == 1 and dnsserver in ('8.8.8.8', '8.8.4.4'):
query_data = query_data[:-5] + '\xc0\x04' + query_data[-4:]
sock_v4.sendto(query_data, parse_hostport(dnsserver, 53))
for dnsserver in dns_v6_servers:
if isinstance(query, basestring):
query = dnslib.DNSRecord(q=dnslib.DNSQuestion(query, qtype=dnslib.QTYPE.AAAA))
query_data = query.pack()
sock_v6.sendto(query_data, parse_hostport(dnsserver, 53))
while time.time() < timeout_at:
ins, _, _ = select.select(socks, [], [], 0.1)
for sock in ins:
reply_data, reply_address = sock.recvfrom(512)
reply_server = reply_address[0]
record = dnslib.DNSRecord.parse(reply_data)
iplist = [str(x.rdata) for x in record.rr if x.rtype in (1, 28, 255)]
if any(x in blacklist for x in iplist):
logging.warning('query=%r dnsservers=%r record bad iplist=%r', query, dnsservers, iplist)
elif record.header.rcode and not iplist and reply_server in turstservers:
logging.info('query=%r trust reply_server=%r record rcode=%s', query, reply_server, record.header.rcode)
return record
elif iplist:
logging.debug('query=%r reply_server=%r record iplist=%s', query, reply_server, iplist)
return record
else:
logging.debug('query=%r reply_server=%r record null iplist=%s', query, reply_server, iplist)
continue
except socket.error as e:
logging.warning('handle dns query=%s socket: %r', query, e)
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsservers))
finally:
for sock in socks:
sock.close()
def dnslib_resolve_over_tcp(query, dnsservers, timeout, **kwargs):
"""dns query over tcp"""
if not isinstance(query, (basestring, dnslib.DNSRecord)):
raise TypeError('query argument requires string/DNSRecord')
blacklist = kwargs.get('blacklist', ())
def do_resolve(query, dnsserver, timeout, queobj):
if isinstance(query, basestring):
qtype = dnslib.QTYPE.AAAA if ':' in dnsserver else dnslib.QTYPE.A
query = dnslib.DNSRecord(q=dnslib.DNSQuestion(query, qtype=qtype))
query_data = query.pack()
sock_family = socket.AF_INET6 if ':' in dnsserver else socket.AF_INET
sock = socket.socket(sock_family)
rfile = None
try:
sock.settimeout(timeout or None)
sock.connect(parse_hostport(dnsserver, 53))
sock.send(struct.pack('>h', len(query_data)) + query_data)
rfile = sock.makefile('r', 1024)
reply_data_length = rfile.read(2)
if len(reply_data_length) < 2:
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsserver))
reply_data = rfile.read(struct.unpack('>h', reply_data_length)[0])
record = dnslib.DNSRecord.parse(reply_data)
iplist = [str(x.rdata) for x in record.rr if x.rtype in (1, 28, 255)]
if any(x in blacklist for x in iplist):
logging.debug('query=%r dnsserver=%r record bad iplist=%r', query, dnsserver, iplist)
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsserver))
else:
logging.debug('query=%r dnsserver=%r record iplist=%s', query, dnsserver, iplist)
queobj.put(record)
except socket.error as e:
logging.debug('query=%r dnsserver=%r failed %r', query, dnsserver, e)
queobj.put(e)
finally:
if rfile:
rfile.close()
sock.close()
queobj = Queue.Queue()
for dnsserver in dnsservers:
thread.start_new_thread(do_resolve, (query, dnsserver, timeout, queobj))
for i in range(len(dnsservers)):
try:
result = queobj.get(timeout)
except Queue.Empty:
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsservers))
if result and not isinstance(result, Exception):
return result
elif i == len(dnsservers) - 1:
logging.warning('dnslib_resolve_over_tcp %r with %s return %r', query, dnsservers, result)
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsservers))
class DNSServer(gevent.server.DatagramServer):
"""DNS Proxy based on gevent/dnslib"""
def __init__(self, *args, **kwargs):
dns_blacklist = kwargs.pop('dns_blacklist')
dns_servers = kwargs.pop('dns_servers')
dns_tcpover = kwargs.pop('dns_tcpover', [])
dns_timeout = kwargs.pop('dns_timeout', 2)
super(self.__class__, self).__init__(*args, **kwargs)
self.dns_servers = list(dns_servers)
self.dns_tcpover = tuple(dns_tcpover)
self.dns_intranet_servers = [x for x in self.dns_servers if is_local_addr(x)]
self.dns_blacklist = set(dns_blacklist)
self.dns_timeout = int(dns_timeout)
self.dns_cache = ExpireCache(max_size=65536)
self.dns_trust_servers = set(['8.8.8.8', '8.8.4.4', '2001:4860:4860::8888', '2001:4860:4860::8844'])
for dirname in ('.', '/usr/share/GeoIP/', '/usr/local/share/GeoIP/'):
filename = os.path.join(dirname, 'GeoIP.dat')
if os.path.isfile(filename):
geoip = pygeoip.GeoIP(filename)
for dnsserver in self.dns_servers:
if ':' not in dnsserver and geoip.country_name_by_addr(parse_hostport(dnsserver, 53)[0]) not in ('China',):
self.dns_trust_servers.add(dnsserver)
break
def do_read(self):
try:
return gevent.server.DatagramServer.do_read(self)
except socket.error as e:
if e[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE):
raise
def get_reply_record(self, data):
request = dnslib.DNSRecord.parse(data)
qname = str(request.q.qname).lower()
qtype = request.q.qtype
dnsservers = self.dns_servers
if qname.endswith('.in-addr.arpa'):
ipaddr = '.'.join(reversed(qname[:-13].split('.')))
record = dnslib.DNSRecord(header=dnslib.DNSHeader(id=request.header.id, qr=1,aa=1,ra=1), a=dnslib.RR(qname, rdata=dnslib.A(ipaddr)))
return record
if 'USERDNSDOMAIN' in os.environ:
user_dnsdomain = '.' + os.environ['USERDNSDOMAIN'].lower()
if qname.endswith(user_dnsdomain):
qname = qname[:-len(user_dnsdomain)]
if '.' not in qname:
if not self.dns_intranet_servers:
logging.warning('qname=%r is a plain hostname, need intranet dns server!!!', qname)
return dnslib.DNSRecord(header=dnslib.DNSHeader(id=request.header.id, rcode=3))
qname += user_dnsdomain
dnsservers = self.dns_intranet_servers
try:
return self.dns_cache.get((qname, qtype))
except KeyError:
pass
try:
dns_resolve = dnslib_resolve_over_tcp if qname.endswith(self.dns_tcpover) else dnslib_resolve_over_udp
kwargs = {'blacklist': self.dns_blacklist, 'turstservers': self.dns_trust_servers}
record = dns_resolve(request, dnsservers, self.dns_timeout, **kwargs)
ttl = max(x.ttl for x in record.rr) if record.rr else 600
self.dns_cache.set((qname, qtype), record, ttl * 2)
return record
except socket.gaierror as e:
logging.warning('resolve %r failed: %r', qname, e)
return dnslib.DNSRecord(header=dnslib.DNSHeader(id=request.header.id, rcode=3))
def handle(self, data, address):
logging.debug('receive from %r data=%r', address, data)
record = self.get_reply_record(data)
return self.sendto(data[:2] + record.pack()[2:], address)
def test():
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
dns_servers = '8.8.8.8|8.8.4.4|168.95.1.1|168.95.192.1|223.5.5.5|223.6.6.6|114.114.114.114|114.114.115.115'.split('|')
dns_blacklist = '1.1.1.1|255.255.255.255|74.125.127.102|74.125.155.102|74.125.39.102|74.125.39.113|209.85.229.138|4.36.66.178|8.7.198.45|37.61.54.158|46.82.174.68|59.24.3.173|64.33.88.161|64.33.99.47|64.66.163.251|65.104.202.252|65.160.219.113|66.45.252.237|72.14.205.104|72.14.205.99|78.16.49.15|93.46.8.89|128.121.126.139|159.106.121.75|169.132.13.103|192.67.198.6|202.106.1.2|202.181.7.85|203.161.230.171|203.98.7.65|207.12.88.98|208.56.31.43|209.145.54.50|209.220.30.174|209.36.73.33|211.94.66.147|213.169.251.35|216.221.188.182|216.234.179.13|243.185.187.3|243.185.187.39|23.89.5.60|37.208.111.120|49.2.123.56|54.76.135.1|77.4.7.92|118.5.49.6|188.5.4.96|189.163.17.5|197.4.4.12|249.129.46.48|253.157.14.165|183.207.229.|183.207.232.'.split('|')
dns_tcpover = ['.youtube.com', '.googlevideo.com']
logging.info('serving at port 53...')
DNSServer(('', 53), dns_servers=dns_servers, dns_blacklist=dns_blacklist, dns_tcpover=dns_tcpover).serve_forever()
if __name__ == '__main__':
test()
| 45.537267 | 753 | 0.598922 |
b35bfc105f51975efd30fea0b31ce06d50632e88 | 1,024 | py | Python | eve_cli/exceptions.py | SakiiR/eve-cli | dafce2572cb6995f21cd938142a2e6132e62a6b7 | [
"MIT"
] | null | null | null | eve_cli/exceptions.py | SakiiR/eve-cli | dafce2572cb6995f21cd938142a2e6132e62a6b7 | [
"MIT"
] | null | null | null | eve_cli/exceptions.py | SakiiR/eve-cli | dafce2572cb6995f21cd938142a2e6132e62a6b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
class EveException(Exception):
""" Exception thrown when eve return an error """
def __init__(self, message, errors=[]):
super(Exception, self).__init__(message)
self.errors = errors
class NotfoundException(EveException):
pass
class UnauthorizedException(EveException):
pass
class BadRequestException(EveException):
pass
def _handle_400(response, json):
raise BadRequestException(json.get("_error", {}).get("message", "Bad Request"))
def _handle_404(response, json):
raise NotfoundException(json.get("_error", {}).get("message", "Notfound"))
def _handle_401(response, json):
raise UnauthorizedException(json.get("_error", {}).get("message", "Unauthorized"))
def exception_handler(response, json):
""" Handle Eve response exceptions """
errors = {
400: _handle_400,
401: _handle_401,
404: _handle_404,
}
if response.status_code in errors:
errors[response.status_code](response, json)
| 20.897959 | 86 | 0.673828 |
ed2006efcaa8e3c33537d14ba7e6a8015dd2772b | 164 | py | Python | python/12372.py | ThePeeps191/online-judge-solutions | 1cc7d26233c7bd2da23b82ac0fd1d4132cf8d0ad | [
"MIT"
] | 1 | 2022-03-14T22:53:44.000Z | 2022-03-14T22:53:44.000Z | python/12372.py | ThePeeps191/online-judge-solutions | 1cc7d26233c7bd2da23b82ac0fd1d4132cf8d0ad | [
"MIT"
] | null | null | null | python/12372.py | ThePeeps191/online-judge-solutions | 1cc7d26233c7bd2da23b82ac0fd1d4132cf8d0ad | [
"MIT"
] | null | null | null | t = 1
for _ in range(int(input())):
L, W, H = [int(a) for a in input().split()]
print(f"Case {t}: {'good' if L <= 20 and W <= 20 and H <= 20 else 'bad'}")
t += 1 | 32.8 | 75 | 0.518293 |
c68e8b1385712dd70037ac43b5d42d1fbc92f9aa | 363 | py | Python | docs_src/response_model/tutorial002.py | mbhavya/fastapi | 1876ebc77949a9a254909ec61ea0c09365169ec2 | [
"MIT"
] | 1 | 2022-01-08T16:39:28.000Z | 2022-01-08T16:39:28.000Z | docs_src/response_model/tutorial002.py | mbhavya/fastapi | 1876ebc77949a9a254909ec61ea0c09365169ec2 | [
"MIT"
] | 1 | 2022-01-07T21:04:04.000Z | 2022-01-07T21:04:04.000Z | docs_src/response_model/tutorial002.py | mbhavya/fastapi | 1876ebc77949a9a254909ec61ea0c09365169ec2 | [
"MIT"
] | null | null | null | from typing import Union
from fastapi import FastAPI
from pydantic import BaseModel, EmailStr
app = FastAPI()
class UserIn(BaseModel):
username: str
password: str
email: EmailStr
full_name: Union[str, None] = None
# Don't do this in production!
@app.post("/user/", response_model=UserIn)
async def create_user(user: UserIn):
return user
| 18.15 | 42 | 0.721763 |
ae28528a8cb7076ca28fcc2578998dabcc984209 | 3,457 | py | Python | bnpy/birthmove/zzzdeprecated/BProposals.py | jun2tong/bnp-anomaly | c7fa106b5bb29ed6688a3d91e3f302a0a130b896 | [
"BSD-3-Clause"
] | 184 | 2016-12-13T21:05:48.000Z | 2022-02-28T11:47:23.000Z | bnpy/birthmove/zzzdeprecated/BProposals.py | jun2tong/bnp-anomaly | c7fa106b5bb29ed6688a3d91e3f302a0a130b896 | [
"BSD-3-Clause"
] | 37 | 2016-12-18T14:07:53.000Z | 2022-03-13T10:58:14.000Z | bnpy/birthmove/zzzdeprecated/BProposals.py | jun2tong/bnp-anomaly | c7fa106b5bb29ed6688a3d91e3f302a0a130b896 | [
"BSD-3-Clause"
] | 50 | 2017-01-25T19:44:34.000Z | 2022-03-15T10:22:01.000Z | import numpy as np
def expandLP_truelabels(
Data_t, curLP_t, tmpModel, curSS_nott,
**Plan):
''' Create single new state for all target data.
Returns
-------
propLP_t : dict of local params, with K + 1 states
xcurSS_nott : SuffStatBag
first K states are equal to curSS_nott
final few states are empty
'''
assert 'Z' in Data_t.TrueParams
Z = Data_t.TrueParams['Z']
uLabels = np.unique(Z)
origK = curSS_nott.K
propK = origK + len(uLabels)
propResp = np.zeros((curLP_t['resp'].shape[0], propK))
for uid, uval in enumerate(uLabels):
mask_uid = Z == uval
propResp[mask_uid, origK + uid] = 1.0
propLP_t = dict(resp=propResp)
if hasattr(tmpModel.allocModel, 'initLPFromResp'):
propLP_t = tmpModel.allocModel.initLPFromResp(Data_t, propLP_t)
# Make expanded xcurSS to match
xcurSS_nott = curSS_nott.copy(includeELBOTerms=1, includeMergeTerms=0)
xcurSS_nott.insertEmptyComps(propK - origK)
return propLP_t, xcurSS_nott
def expandLP_singleNewState(
Data_t, curLP_t, tmpModel, curSS_nott,
**Plan):
''' Create single new state for all target data.
Returns
-------
propLP_t : dict of local params, with K + 1 states
xcurSS_nott : SuffStatBag
first K states are equal to curSS_nott
final few states are empty
'''
xcurSS_nott = curSS_nott.copy(includeELBOTerms=1, includeMergeTerms=0)
xcurSS_nott.insertEmptyComps(1)
propK = curSS_nott.K + 1
propResp = np.zeros((curLP_t['resp'].shape[0], propK))
propResp[:, -1] = 1.0
propLP_t = dict(resp=propResp)
if hasattr(tmpModel.allocModel, 'initLPFromResp'):
propLP_t = tmpModel.allocModel.initLPFromResp(Data_t, propLP_t)
return propLP_t, xcurSS_nott
def expandLP_randomSplit(
Data_t, curLP_t, tmpModel, curSS_nott,
PRNG=np.random, **Plan):
''' Divide target data into two new states, completely at random.
Returns
-------
propLP_t : dict of local params, with K + 2 states
xcurSS_nott : SuffStatBag
first K states are equal to curSS_nott
final few states are empty
'''
Kfresh = 2
xcurSS_nott = curSS_nott.copy(includeELBOTerms=1, includeMergeTerms=0)
xcurSS_nott.insertEmptyComps(Kfresh)
origK = curSS_nott.K
propK = curSS_nott.K + Kfresh
propResp = np.zeros((curLP_t['resp'].shape[0], propK))
propResp[:, :origK] = curLP_t['resp']
if 'btargetCompID' in Plan:
atomids = np.flatnonzero(
curLP_t['resp'][:, Plan['btargetCompID']] > 0.01)
else:
atomids = np.arange(propResp.shape[0])
# randomly permute atomids
PRNG.shuffle(atomids)
if atomids.size > 20:
Aids = atomids[:10]
Bids = atomids[10:20]
else:
half = atomids.size / 2
Aids = atomids[:half]
Bids = atomids[half:]
# Force all atomids to only be explained by new comps
propResp[atomids, :] = 0.0
propResp[Aids, -2] = 1.0
propResp[Bids, -1] = 1.0
propLP_t = dict(resp=propResp)
if hasattr(tmpModel.allocModel, 'initLPFromResp'):
propLP_t = tmpModel.allocModel.initLPFromResp(Data_t, propLP_t)
propSS = tmpModel.get_global_suff_stats(Data_t, propLP_t)
propSS += xcurSS_nott
tmpModel.update_global_params(propSS)
propLP_t = tmpModel.calc_local_params(Data_t, propLP_t)
return propLP_t, xcurSS_nott
| 31.715596 | 74 | 0.654903 |
9accae2d5df1a692a857842e5e5720b703adef9e | 10,476 | py | Python | data/realsr_preprocess/utils.py | pcwuyu/PaddleGAN | b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268 | [
"Apache-2.0"
] | 6,852 | 2020-06-18T10:56:27.000Z | 2022-03-31T10:17:47.000Z | data/realsr_preprocess/utils.py | pcwuyu/PaddleGAN | b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268 | [
"Apache-2.0"
] | 283 | 2020-07-09T13:29:58.000Z | 2022-03-23T02:50:25.000Z | data/realsr_preprocess/utils.py | pcwuyu/PaddleGAN | b4ff90f0c92c4d8dcaa8e25267151b82fc7aa268 | [
"Apache-2.0"
] | 973 | 2020-06-18T11:15:35.000Z | 2022-03-30T10:50:16.000Z | import math
import numpy as np
from PIL import Image
import paddle
# set random seed for reproducibility
np.random.seed(0)
def is_image_file(filename):
return any(
filename.endswith(extension)
for extension in ['.png', '.jpg', '.jpeg', '.PNG', '.JPG', '.JPEG'])
def calculate_valid_crop_size(crop_size, upscale_factor):
return crop_size - (crop_size % upscale_factor)
def gaussian_noise(image, std_dev):
noise = np.rint(
np.random.normal(loc=0.0, scale=std_dev, size=np.shape(image)))
return Image.fromarray(np.clip(image + noise, 0, 255).astype(np.uint8))
#################################################################################
# MATLAB imresize taken from ESRGAN (https://github.com/xinntao/BasicSR)
#################################################################################
def cubic(x):
absx = paddle.abs(x)
absx2 = absx**2
absx3 = absx**3
temp1 = paddle.cast((absx <= 1), absx.dtype)
temp2 = paddle.cast((absx > 1), absx.dtype) * paddle.cast(
(absx <= 2), absx.dtype)
return (1.5 * absx3 - 2.5 * absx2 +
1) * temp1 + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2) * temp2
def calculate_weights_indices(in_length, out_length, scale, kernel,
kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = paddle.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = paddle.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.reshape([out_length, 1]).expand([
out_length, P
]) + paddle.linspace(0, P - 1, P).reshape([1, P]).expand([out_length, P])
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.reshape([out_length, 1]).expand([out_length, P
]) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = paddle.sum(weights, 1).reshape([out_length, 1])
weights = weights / weights_sum.expand([out_length, P])
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = np.sum((weights.numpy() == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices[:, 1:1 + P - 2]
weights = weights[:, 1:1 + P - 2]
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices[:, 0:P - 2]
weights = weights[:, 0:P - 2]
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.shape
_, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = paddle.zeros([in_C, in_H + sym_len_Hs + sym_len_He, in_W])
img_aug[:, sym_len_Hs:sym_len_Hs + in_H, :] = img
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = paddle.arange(sym_patch.shape[1] - 1, -1, -1)
sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 1)
img_aug[:, :sym_len_Hs, :] = sym_patch_inv
sym_patch = img[:, -sym_len_He:, :]
inv_idx = paddle.arange(sym_patch.shape[1] - 1, -1, -1)
sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 1)
img_aug[:,
sym_len_Hs + in_H:sym_len_Hs + in_H + sym_len_He, :] = sym_patch_inv
out_1 = paddle.zeros([in_C, out_H, in_W])
kernel_width = weights_H.shape[1]
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = paddle.mv(
img_aug[0, idx:idx + kernel_width, :].transpose([1, 0]),
(weights_H[i]))
out_1[1, i, :] = paddle.mv(
img_aug[1, idx:idx + kernel_width, :].transpose([1, 0]),
(weights_H[i]))
out_1[2, i, :] = paddle.mv(
img_aug[2, idx:idx + kernel_width, :].transpose([1, 0]),
(weights_H[i]))
# process W dimension
# symmetric copying
out_1_aug = paddle.zeros([in_C, out_H, in_W + sym_len_Ws + sym_len_We])
out_1_aug[:, :, sym_len_Ws:sym_len_Ws + in_W] = out_1
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = paddle.arange(sym_patch.shape[2] - 1, -1, -1)
sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 2)
out_1_aug[:, :, 0:sym_len_Ws] = sym_patch_inv
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = paddle.arange(sym_patch.shape[2] - 1, -1, -1)
sym_patch_inv = paddle.index_select(sym_patch, inv_idx, 2)
out_1_aug[:, :,
sym_len_Ws + in_W:sym_len_Ws + in_W + sym_len_We] = sym_patch_inv
out_2 = paddle.zeros([in_C, out_H, out_W])
kernel_width = weights_W.shape[1]
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :,
idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :,
idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :,
idx:idx + kernel_width].mv(weights_W[i])
return paddle.clip(out_2, 0, 1)
def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
Args:
pic (paddle.Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not (isinstance(pic, paddle.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(
type(pic)))
elif isinstance(pic, paddle.Tensor):
if len(pic.shape) not in {2, 3}:
raise ValueError(
'pic should be 2/3 dimensional. Got {} dimensions.'.format(
pic.ndimension()))
elif len(pic.shape) == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError(
'pic should be 2/3 dimensional. Got {} dimensions.'.format(
pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if isinstance(pic, paddle.Tensor) and mode != 'F':
pic = pic.numpy()
if pic.dtype == 'float32':
npimg = np.transpose((pic * 255.).astype('uint8'), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError(
"Incorrect mode ({}) supplied for input type {}. Should be {}".
format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(
permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(
permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(
permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
| 37.819495 | 100 | 0.598797 |
93b6d767bc7b21ababcc0c428e2cbe3bf26e10bb | 1,216 | py | Python | functions/create_channel/lambda_function.py | prayaganeethu/socless-slack | 482b663f3c3cd1dc3e606dfa43123f720a668f86 | [
"Apache-2.0"
] | null | null | null | functions/create_channel/lambda_function.py | prayaganeethu/socless-slack | 482b663f3c3cd1dc3e606dfa43123f720a668f86 | [
"Apache-2.0"
] | null | null | null | functions/create_channel/lambda_function.py | prayaganeethu/socless-slack | 482b663f3c3cd1dc3e606dfa43123f720a668f86 | [
"Apache-2.0"
] | 1 | 2021-07-27T23:10:58.000Z | 2021-07-27T23:10:58.000Z | from socless import *
import os
import slack
def handle_state(channel_name, is_private):
"""
Create slack channel
Args:
channel_name (str): The name of channel to be created.
is_private (boolean): if the channel private
Token_Type: xoxp
Note:
- See https://api.slack.com/methods/conversations.create for more details on how to create private channel
"""
SOCLESS_USER_TOKEN = os.environ['SOCLESS_USER_TOKEN']
slack_api_client = slack.WebClient(SOCLESS_USER_TOKEN)
try:
res = slack_api_client.conversations_create(
name=channel_name,
is_private=is_private
)
created_channel_id = res["channel"]['id']
return {
"ok": True,
"created_channel_id": created_channel_id,
"channel_name": channel_name
}
except Exception as e:
s = str(e)
err_msg = s.split("'detail': ", 1)[1]
err_msg = err_msg[:len(err_msg) - 1]
return {
"ok": False,
"error": err_msg
}
def lambda_handler(event, context):
return socless_bootstrap(event, context, handle_state)
| 28.27907 | 124 | 0.592105 |
847abddd21e78e96b679d6da2c1f5f725fddf9c9 | 2,152 | py | Python | setup.py | antoinelb/aequitas | 5a912a3c1751b04c8688ad9e0c09ed87a6c48870 | [
"MIT"
] | 469 | 2018-04-24T23:11:45.000Z | 2022-03-29T07:54:07.000Z | setup.py | antoinelb/aequitas | 5a912a3c1751b04c8688ad9e0c09ed87a6c48870 | [
"MIT"
] | 62 | 2018-04-16T00:14:56.000Z | 2021-11-12T10:35:01.000Z | setup.py | antoinelb/aequitas | 5a912a3c1751b04c8688ad9e0c09ed87a6c48870 | [
"MIT"
] | 94 | 2018-05-21T16:13:57.000Z | 2022-03-25T20:07:25.000Z | import re
from pathlib import Path
from setuptools import find_packages, setup
ROOT_PATH = Path(__file__).parent
LICENSE_PATH = ROOT_PATH / 'LICENSE'
README_PATH = ROOT_PATH / 'README.md'
REQUIREMENTS_PATH = ROOT_PATH / 'requirement' / 'main.txt'
#with open(README_PATH, encoding='utf-8') as f:
# long_description = f.read()
long_description = """
Aequitas is an open-source bias audit toolkit for data scientists, machine learning researchers, and policymakers to audit machine learning models for discrimination and bias, and to make informed and equitable decisions around developing and deploying predictive tools."""
def stream_requirements(fd):
"""For a given requirements file descriptor, generate lines of
distribution requirements, ignoring comments and chained requirement
files.
"""
for line in fd:
cleaned = re.sub(r'#.*$', '', line).strip()
if cleaned and not cleaned.startswith('-r'):
yield cleaned
with REQUIREMENTS_PATH.open() as requirements_file:
REQUIREMENTS = list(stream_requirements(requirements_file))
setup(
name='aequitas',
version='0.41.0',
description="The bias and fairness audit toolkit.",
long_description=long_description,
long_description_content_type='text/markdown',
author="Center for Data Science and Public Policy",
author_email='datascifellows@gmail.com',
url='https://github.com/dssg/aequitas',
packages=find_packages('src', exclude=['tests', 'tests.*']),
package_dir={'': 'src'},
include_package_data=True,
install_requires=REQUIREMENTS,
license='https://github.com/dssg/aequitas/blob/master/LICENSE',
zip_safe=False,
keywords='fairness bias aequitas',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'aequitas-report=aequitas_cli.aequitas_audit:main',
],
}
)
| 32.606061 | 273 | 0.691914 |
db235d42910d9de1386bfc75bb5653a24c575f22 | 283 | py | Python | gcj/y2019/qual/A.py | mikhail-dvorkin/competitions | b414b33b75807bf75697cf372510626b551f2973 | [
"Unlicense"
] | 7 | 2017-06-05T22:15:53.000Z | 2021-06-24T10:52:44.000Z | gcj/y2019/qual/A.py | mikhail-dvorkin/competitions | b414b33b75807bf75697cf372510626b551f2973 | [
"Unlicense"
] | null | null | null | gcj/y2019/qual/A.py | mikhail-dvorkin/competitions | b414b33b75807bf75697cf372510626b551f2973 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
def solve(forbidden=4):
n = int(input())
a = n - 1
b = 1
t = 1
while t <= a:
if a // t % 10 == forbidden:
a -= t
b += t
t *= 10
return '{} {}'.format(a, b)
tests = int(input())
for t in range(tests):
print('Case #{}: {}'.format(t + 1, solve()))
| 15.722222 | 45 | 0.498233 |
cfab4cbd443bfb7dbe626124cc2b07cdfb9fb249 | 2,410 | py | Python | tests/common/test_run/ascend/triplet_loss_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | tests/common/test_run/ascend/triplet_loss_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | tests/common/test_run/ascend/triplet_loss_run.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op.ascend import triplet_loss
def triplet_loss_run(shape, dtype, margin=12.0, kernel_name="triplet_loss", attrs={}):
op_attrs = [margin]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(triplet_loss.triplet_loss_naive, [shape, shape, shape], [dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
anchor, expect, neg, output, pos = gen_data(dtype, margin, shape)
return mod, expect, (anchor, pos, neg, output)
else:
return mod
else:
anchor, expect, neg, output, pos = gen_data(dtype, margin, shape)
mod = utils.op_build_test(triplet_loss.triplet_loss_naive, [shape, shape, shape], [dtype, dtype, dtype],
op_attrs=op_attrs, kernel_name=kernel_name, attrs=attrs)
output = utils.mod_launch(mod, (anchor, pos, neg, output), expect=expect)
return anchor, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, margin, shape):
# create a non-zero loss situation
anchor = np.arange(np.prod(shape)).reshape(shape).astype(dtype)
pos = anchor + 1.0
neg = anchor + 2.0
d_pos = np.sum((anchor - pos) * (anchor - pos), -1)
d_neg = np.sum((anchor - neg) * (anchor - neg), -1)
loss = margin + d_pos - d_neg # margin + 1.0 * shape[-1] - 4.0 * shape[-1]
np.maximum(loss, 0, loss) # perform relu
expect = loss
output = np.full(expect.shape, np.nan, dtype)
return anchor, expect, neg, output, pos
| 43.818182 | 112 | 0.670124 |
c1221db918506e586e8e75eff2a2daa9a8d4d5ad | 40,279 | py | Python | discogstagger/taggerutils.py | shunte88/discogs-tagger | dc592692b37ea42b7a7b8343afa7e937d718e5c9 | [
"MIT"
] | null | null | null | discogstagger/taggerutils.py | shunte88/discogs-tagger | dc592692b37ea42b7a7b8343afa7e937d718e5c9 | [
"MIT"
] | null | null | null | discogstagger/taggerutils.py | shunte88/discogs-tagger | dc592692b37ea42b7a7b8343afa7e937d718e5c9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ext.mediafile import MediaFile
from discogstagger.stringformatting import StringFormatting
from discogstagger.album import Album, Disc, Track
from discogstagger.discogsalbum import DiscogsAlbum
from mako.lookup import TemplateLookup
from mako.template import Template
from unicodedata import normalize
import errno
import os
import re
import sys
import logging
import shutil
from shutil import copy2, copystat, Error, ignore_patterns
import imghdr
from datetime import datetime, timedelta
# import subprocess
import pprint
pp = pprint.PrettyPrinter(indent=4)
logger = logging
# class TagOpener(FancyURLopener, object):
#
# version = "discogstagger2"
#
# def __init__(self, user_agent):
# self.version = user_agent
# FancyURLopener.__init__(self)
#
class TaggerError(Exception):
""" A central exception for all errors happening during the tagging
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TagHandler(object):
""" Uses the album (taggerutils) and tags all given files using the given
tags (album)
"""
def __init__(self, album, tagger_config):
self.album = album
self.config = tagger_config
self.keep_tags = self.config.get("details", "keep_tags")
self.user_agent = self.config.get("common", "user_agent")
self.variousartists = self.config.get("details", "variousartists")
self.releasecountry_formatted = self.config.get(
"details", "releasecountry_formatted")
def tag_album(self):
""" tags all tracks in an album, the filenames are determined using
the given properties on the tracks
"""
logger.debug(f'tag_album discs :: {len(self.album.discs)}')
for disc in self.album.discs:
logger.debug(f'tag_album tracks :: {len(disc.tracks)}')
for track in disc.tracks:
path, file = os.path.split(track.full_path)
self.tag_single_track(path, track)
def tag_single_track(self, target_folder, track):
# load metadata information
logger.debug("target_folder: %s" % target_folder)
metadata = MediaFile(os.path.join(target_folder, track.orig_file))
# read already existing (and still wanted) properties
keepTags = {}
if self.keep_tags is not None:
for name in self.keep_tags.split(","):
logger.debug("name %s" % name)
if getattr(metadata, name):
keepTags[name] = getattr(metadata, name)
# remove current metadata
metadata.delete()
self.album.codec = metadata.type
# set album metadata
metadata.album = self.album.title # add formatting methods
if self.releasecountry_formatted:
metadata.album += f" [{self.album.countryiso}]"
metadata.album += f" [{self.album.catnumbers[0]}]"
metadata.album.replace(
', none]', ']', 1).replace('[none]', '[]', 2)
if "various" not in self.album.artist.lower():
metadata.composer = self.album.artist
# use list of albumartists
if 'Various' in self.album.artists and self.album.is_compilation == True:
metadata.albumartist = [self.variousartists]
else:
metadata.albumartist = self.album.artists
# !TODO really, or should we generate this using a specific method?
metadata.albumartist_sort = self.album.sort_artist
# !TODO should be joined
metadata.label = self.album.labels[0]
metadata.source = self.album.sourcemedia
metadata.sourcemedia = self.album.sourcemedia
metadata.year = self.album.year
metadata.country = self.album.country
metadata.countryiso = self.album.countryiso
metadata.catalognum = self.album.catnumbers[0]
# add styles to the grouping tag
metadata.groupings = self.album.styles
# use genres to allow multiple genres in muliple fields
metadata.genres = self.album.genres + self.album.styles
# this assumes, that there is a metadata-tag with the
# id_tag_name in the metadata object
setattr(metadata, self.config.id_tag_name, self.album.id)
metadata.discogs_release_url = self.album.url
metadata.disctitle = track.discsubtitle
metadata.disc = track.discnumber
metadata.disctotal = len(self.album.discs)
logger.debug(f"metadata.disctotal is {metadata.disctotal}")
metadata.media = self.album.media
if self.album.is_compilation:
metadata.comp = True
if track.notes:
metadata.comments = '\r\n'.join((track.notes, self.album.notes))
else:
metadata.comments = self.album.notes
tags = self.config.get_configured_tags
logger.debug("tags: %s" % tags)
for name in tags:
value = self.config.get("tags", name)
if value is not None:
setattr(metadata, name, value)
# set track metadata
metadata.title = track.title
metadata.artists = track.artists
metadata.artist = track.artists
# !TODO take care about sortartist ;-)
metadata.artist_sort = track.sort_artist
if track.real_tracknumber is not None:
metadata.track = track.real_tracknumber
else:
metadata.track = track.tracknumber
metadata.tracktotal = len(self.album.disc(track.discnumber).tracks)
if keepTags is not None:
for name in keepTags:
setattr(metadata, name, keepTags[name])
metadata.save()
class FileHandler(object):
""" this class contains all file handling tasks for the tagger,
it loops over the album and discs (see copy_files) to copy
the files for each album. This could be done in the TagHandler
class, but this would mean a too strong relationship between
FileHandling and Tagging, which is not as nice for testing and
for future extensability.
"""
def __init__(self, album, tagger_config):
self.config = tagger_config
self.album = album
self.cue_done_dir = self.config.get('cue', 'cue_done_dir')
self.rg_process = self.config.getboolean('replaygain', 'add_tags')
self.rg_application = self.config.get('replaygain', 'application')
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def create_done_file(self):
# could be, that the directory does not exist anymore ;-)
if os.path.exists(self.album.sourcedir):
done_file = os.path.join(
self.album.sourcedir, self.config.get("details", "done_file"))
open(done_file, "w")
def create_album_dir(self):
if not os.path.exists(self.album.target_dir):
self.mkdir_p(self.album.target_dir)
def copy_files(self):
"""
copy an album and all its files to the new location, rename those
files if necessary
"""
logger.debug("album sourcedir: %s" % self.album.sourcedir)
logger.debug("album targetdir: %s" % self.album.target_dir)
for disc in self.album.discs:
try:
logger.debug("disc.target_dir: %s" % disc.target_dir)
if hasattr(disc, 'sourcedir') and disc.sourcedir is not None:
logger.debug("disc.sourcedir: %s" % disc.sourcedir)
source_folder = os.path.join(
self.album.sourcedir, disc.sourcedir)
else:
source_folder = self.album.sourcedir
except Exception as e:
source_folder = self.album.sourcedir
if disc.target_dir is not None:
target_folder = os.path.join(
self.album.target_dir, disc.target_dir)
else:
target_folder = self.album.target_dir
target_folder = target_folder.replace(
', none]', ']', 1).replace('[none]', '[]', 2)
logger.info(f"album folder ....: {self.album.target_dir}")
copy_needed = False
if not source_folder == target_folder:
if not os.path.exists(target_folder):
self.mkdir_p(target_folder)
copy_needed = True
for track in disc.tracks:
logger.debug("source_folder: %s" % source_folder)
logger.debug("target_folder: %s" % target_folder)
track.new_file = track.new_file.replace(
', none]', ']', 1).replace('[none]', '[]', 2)
logger.debug("orig_file: %s" % track.orig_file)
logger.debug("new_file: %s" % track.new_file)
source_file = os.path.join(source_folder, track.orig_file)
target_file = os.path.join(target_folder, track.new_file)
if copy_needed and not os.path.exists(target_file):
if not os.path.exists(source_file):
logger.error("Source does not exists")
# throw error
logger.debug("copying files (%s/%s)",
source_folder, track.orig_file)
shutil.copyfile(os.path.join(source_folder, track.orig_file),
os.path.join(target_folder, track.new_file))
def remove_source_dir(self):
"""
remove source directory, if configured as such (see config option
details:keep_original)
"""
keep_original = self.config.getboolean("details", "keep_original")
source_dir = self.album.sourcedir
logger.debug("keep_original: %s" % keep_original)
logger.debug("going to remove directory....")
if not keep_original:
logger.warn("Deleting source directory '%s'" % source_dir)
shutil.rmtree(source_dir)
def copy_other_files(self):
# copy "other files" on request
copy_other_files = self.config.getboolean(
"details", "copy_other_files")
if copy_other_files:
logger.info("copying files from source directory")
if not os.path.exists(self.album.target_dir):
self.mkdir_p(self.album.target_dir)
copy_files = self.album.copy_files
if copy_files != None:
extf = (self.cue_done_dir)
copy_files[:] = [f for f in copy_files if f not in extf]
for fname in copy_files:
if os.path.isdir(os.path.join(self.album.sourcedir, fname)):
copytree_multi(os.path.join(self.album.sourcedir, fname), os.path.join(
self.album.target_dir, fname))
else:
shutil.copyfile(os.path.join(self.album.sourcedir, fname), os.path.join(
self.album.target_dir, fname))
for disc in self.album.discs:
copy_files = disc.copy_files
extf = (self.cue_done_dir)
copy_files[:] = [f for f in copy_files if f not in extf]
for fname in copy_files:
if not fname.endswith(".m3u"):
source_path = self.album.sourcedir
target_path = self.album.target_dir
try: # safe
if hasattr(disc, 'sourcedir') and \
disc.sourcedir is not None:
source_path = os.path.join(
self.album.sourcedir, disc.sourcedir)
except Exception as e:
pass
try: # safe
if hasattr(disc, 'target_dir') and \
disc.target_dir is not None:
target_path = os.path.join(
self.album.target_dir, disc.target_dir)
except Exception as e:
pass
if not os.path.exists(target_path):
self.mkdir_p(target_path)
if os.path.isdir(os.path.join(source_path, fname)):
copytree_multi(os.path.join(
source_path, fname),
os.path.join(target_path, fname))
else:
shutil.copyfile(os.path.join(
source_path, fname),
os.path.join(target_path, fname))
def get_images(self, conn_mgr):
"""
Download and store any available images
The images are all copied into the album directory, on multi-disc
albums the first image (mostly folder.jpg) is copied into the
disc directory also to make it available to mp3 players (e.g. deadbeef)
we need http access here as well (see discogsalbum), and therefore the
user-agent
"""
if self.album.images:
images = self.album.images
logger.debug("images: %s" % images)
image_format = self.config.get("file-formatting", "image")
use_folder_jpg = self.config.getboolean(
"details", "use_folder_jpg")
download_only_cover = self.config.getboolean(
"details", "download_only_cover")
logger.debug("image-format: %s" % image_format)
logger.debug("use_folder_jpg: %s" % use_folder_jpg)
self.create_album_dir()
no = 0
for i, image_url in enumerate(images, 0):
logger.debug("Downloading image '%s'" % image_url)
try:
picture_name = ""
if i == 0 and use_folder_jpg:
picture_name = "folder.jpg"
else:
no = no + 1
picture_name = image_format + "-%.2d.jpg" % no
conn_mgr.fetch_image(os.path.join(
self.album.target_dir, picture_name), image_url)
if i == 0 and download_only_cover:
break
except Exception as e:
logger.error(
"Unable to download image '%s', skipping." % image_url)
print(e)
def embed_coverart_album(self):
"""
Embed cover art into all album files
"""
embed_coverart = self.config.getboolean("details", "embed_coverart")
image_format = self.config.get("file-formatting", "image")
use_folder_jpg = self.config.getboolean("details", "use_folder_jpg")
if use_folder_jpg:
first_image_name = "folder.jpg"
else:
first_image_name = image_format + "-01.jpg"
image_file = os.path.join(self.album.target_dir, first_image_name)
logger.debug("Start to embed coverart (on request)...")
if embed_coverart and os.path.exists(image_file):
logger.debug("embed_coverart and image_file")
with open(image_file, 'rb') as f:
imgdata = f.read()
imgtype = imghdr.what(image_file)
if imgtype in ("jpeg", "png"):
logger.info("Embedding album art...")
for disc in self.album.discs:
for track in disc.tracks:
self.embed_coverart_track(disc, track, imgdata)
def embed_coverart_track(self, disc, track, imgdata):
"""
Embed cover art into a single file
"""
if disc.target_dir != None:
track_dir = os.path.join(self.album.target_dir, disc.target_dir)
else:
track_dir = self.album.target_dir
track_file = os.path.join(track_dir, track.new_file)
metadata = MediaFile(track_file)
try:
metadata.art = imgdata
metadata.save()
except Exception as e:
logger.error("Unable to embed image '{}'".format(track_file))
print(e)
def add_replay_gain_tags(self):
"""
Add replay gain tags to all flac files in the given directory.
Uses the default metaflac command, therefor this has to be installed
on your system, to be able to use this method.
"""
if self.rg_process == False:
return
codecs = ['.flac', '.ogg', '.mp3', '.ape']
lg_options = {
'.flac': '-a -k -s e',
'.mp3': '-I 4 -S -L -a -k -s e'
}
albumdir = self.album.target_dir
# work out if this is a multidisc set. Note that not all
# subdirectories have music files, e.g. scans, covers, etc.
root_dir, subdirs, files = next(os.walk(albumdir))
multidisc = 0
singledisc = 0
matched = set()
files.sort()
for f in files:
if list(filter(f.endswith, codecs)) != []:
singledisc += 1
matched.add(list(filter(f.endswith, codecs))[0])
for dir in subdirs:
subfiles = next(os.walk(os.path.join(albumdir, dir)))[2]
for f in subfiles:
if list(filter(f.endswith, codecs)) != []:
multidisc += 1
matched.add(list(filter(f.endswith, codecs))[0])
for match in list(matched):
pattern = os.path.join(
albumdir, '**', '*' + match) if multidisc > 0 else os.path.join(albumdir, '*' + match)
return_code = None
logger.debug('Adding replaygain to files: {}'.format(pattern))
if self.rg_application == 'metaflac':
cmd = 'metaflac --add-replay-gain {}'.format(
self._escape_string(pattern))
return_code = os.system(cmd)
elif self.rg_application == 'loudgain':
options = lg_options[match] if match in lg_options.keys(
) else ''
cmd = 'loudgain {} {}'.format(
options, self._escape_string(pattern))
return_code = os.system(cmd)
else:
return_code = -1
logging.debug("Replaygain return code %s" % str(return_code))
def _escape_string(self, string):
return '%s' % (
string
.replace('\\', '\\\\')
.replace(' ', '\\ ')
.replace('(', '\(')
.replace(')', '\)')
.replace(',', '\,')
.replace('"', '\"')
.replace('$', '\$')
.replace('&', '\&')
.replace('!', '\!')
.replace('`', '\`')
.replace("'", "\\'")
.replace('[', '\[')
.replace(']', '\]')
.replace('-', '\-')
)
class TaggerUtils(object):
""" Accepts a destination directory name and discogs release id.
TaggerUtils returns a the corresponding metadata information, in which
we can write to disk. The assumption here is that the destination
direcory contains a single album in a support format (mp3 or flac).
The class also provides a few methods that create supplimental files,
relvant to a given album (m3u, nfo file and album art grabber.)"""
# supported file types.
FILE_TYPE = (".mp3", ".flac",)
def fixNone(self, value):
return value.replace(', none]', ']', 1).replace('none]', ']', 2)
def __init__(self, sourcedir, destdir, tagger_config, album=None):
self.config = tagger_config
# ignore directory where old cue files are stashed
self.cue_done_dir = self.config.get('cue', 'cue_done_dir')
# !TODO should we define those in here or in each method (where needed) or in a separate method
# doing the "mapping"?
self.dir_format = self.config.get("file-formatting", "dir")
self.song_format = self.config.get("file-formatting", "song")
self.va_song_format = self.config.get("file-formatting", "va_song")
self.images_format = self.config.get("file-formatting", "image")
self.m3u_format = self.config.get("file-formatting", "m3u")
self.nfo_format = self.config.get("file-formatting", "nfo")
self.disc_folder_name = self.config.get("file-formatting", "discs")
self.normalize = self.config.get("file-formatting", "normalize")
self.use_lower = self.config.getboolean(
"details", "use_lower_filenames")
self.join_artists = self.config.get("details", "join_artists")
self.join_artists_filenames = self.config.get(
"details", "join_artists_filenames")
self.copy_other_files = self.config.getboolean(
"details", "copy_other_files")
self.char_exceptions = self.config.get_character_exceptions
self.sourcedir = sourcedir
self.destdir = self.fixNone(destdir)
if not album == None:
self.album = album
else:
raise RuntimeException('Cannot tag, no album given')
self.map_format_description()
self.album.sourcedir = sourcedir
# the album is stored in a directory beneath the destination directory
# and following the given dir_format
self.album.target_dir = self.dest_dir_name
logging.debug(f"album.target_dir: {self.dest_dir_name}")
# add template functionality ;-)
self.template_lookup = TemplateLookup(directories=["templates"])
def map_format_description(self):
""" Gets format desription, and maps to user defined variations,
e.g. Limited Edition -> ltd
"""
self.format_mapping = {}
self.media_desc_formatting = self.config.items('media_description')
# get the mapping from config and convert to dict
for i in self.media_desc_formatting:
self.format_mapping[i[0]] = i[1] if i[1] != '' else None
for i, desc in enumerate(self.album.format_description):
if desc.lower() in self.format_mapping.keys():
if self.format_mapping[desc.lower()] is not None:
self.album.format_description[i] = self.format_mapping[desc.lower(
)]
def _value_from_tag_format(self, format, discno=1, trackno=1, filetype=".mp3"):
""" Fill in the used variables using the track information
Transform all variables and use them in the given format string, make this
slightly more flexible to be able to add variables easier
Transfer this via a map.
"""
property_map = {
'%album artist%': self.join_artists_filenames.join(self.album.artists),
'%albumartist%': self.join_artists_filenames.join(self.album.artists),
'%album%': self.album.title,
'%catno%': ', '.join(self.album.catnumbers),
'%country%': self.album.country,
'%isocountry%': self.album.countryiso,
"%year%": self.album.year,
'%artist%': self.album.disc(discno).track(trackno).artist,
'%totaldiscs%': self.album.disctotal,
'%discnumber%': discno,
'%mediatype%': self.album.disc(discno).mediatype,
'%disctitle%': self.album.disc(discno).discsubtitle,
'%track artist%': self.album.disc(discno).track(trackno).artist,
'%title%': self.album.disc(discno).track(trackno).title,
'%tracknumber%': self.get_real_track_number(format, discno, trackno),
'%track number%': trackno,
'%format%': self.album.format,
'%format_description%': self.album.format_description,
'%fileext%': self.album.disc(discno).filetype,
'%bitdepth%': self.album.disc(discno).track(trackno).bitdepth,
'%bitrate%': self.album.disc(discno).track(trackno).bitrate,
'%channels%': self.album.disc(discno).track(trackno).channels,
'%codec%': self.album.disc(discno).track(trackno).codec,
'%filesize%': '',
'%filesize_natural%': '',
'%length_samples%': '',
'%encoding%': self.album.disc(discno).track(trackno).encoding,
'%samplerate%': self.album.disc(discno).track(trackno).samplerate,
'%channels%': self.album.disc(discno).track(trackno).channels,
'%length_seconds_fp%': self.album.disc(discno).track(trackno).length_seconds_fp,
'%length%': self.album.disc(discno).track(trackno).length,
'%length_ex%': self.album.disc(discno).track(trackno).length_ex,
'%length_seconds%': self.album.disc(discno).track(trackno).length_seconds,
"%ALBTITLE%": self.album.title,
"%ALBARTIST%": self.album.artist,
"%YEAR%": self.album.year,
"%CATNO%": self.album.catnumbers[0],
'%COUNTRY%': self.album.country,
'%ISOCOUNTRY%': self.album.countryiso,
"%GENRE%": self.album.genre,
"%STYLE%": self.album.style,
"%ARTIST%": self.album.disc(discno).track(trackno).artist,
"%TITLE%": self.album.disc(discno).track(trackno).title,
"%DISCNO%": discno,
"%TRACKNO%": "%.2d" % trackno,
"%TYPE%": filetype,
"%LABEL%": self.album.labels[0],
"%CODEC%": self.album.codec,
}
for hashtag in property_map.keys():
format = format.replace(
hashtag, re.escape(str(property_map[hashtag])))
return format
def get_real_track_number(self, format, discno=1, trackno=1):
if self.album.disc(discno).track(trackno).real_tracknumber is not None:
return self.album.disc(discno).track(trackno).real_tracknumber
else:
return "%.2d" % trackno
def _value_from_tag(self, format, discno=1, trackno=1, filetype=".mp3"):
""" Generates the filename tagging map
avoid usage of file extension here already, could lead to problems
"""
stringFormatting = StringFormatting()
format = self._value_from_tag_format(format, discno, trackno, filetype)
format = stringFormatting.parseString(format)
format = self.get_clean_filename(format)
logger.debug(f"output: {format}")
return format
def _set_target_discs_and_tracks(self, filetype):
"""
set the target names of the disc and tracks in the discnumber
based on the configuration settings and the name of the disc
or track
these can be calculated without knowing the source (well, the
filetype seems to be a different calibre)
"""
for dn, disc in enumerate(self.album.discs):
if not self.album.has_multi_disc:
disc.target_dir = None
else:
target_dir = self._value_from_tag(
self.disc_folder_name, disc.discnumber)
disc.target_dir = target_dir
for tn, track in enumerate(disc.tracks):
# special handling for Various Artists discs
if self.album.artist == "Various":
newfile = self._value_from_tag(self.va_song_format, disc.discnumber,
track.tracknumber, filetype)
else:
newfile = self._value_from_tag(self.song_format, disc.discnumber,
track.tracknumber, filetype)
track.new_file = self.get_clean_filename(newfile)
def gather_addional_properties(self):
''' Fetches additional technical information about the tracks
'''
for disc in self.album.discs:
dn = disc.discnumber
for track in disc.tracks:
tn = track.tracknumber
metadata = MediaFile(track.full_path)
# for field in metadata.readable_fields():
# print('fieldname: {}: '.format(field)) #, getattr(metadata, field)
self.album.disc(dn).track(tn).codec = metadata.type
codec = metadata.type
lossless = ('flac', 'alac', 'wma', 'ape', 'wav')
encod = 'lossless' if codec.lower() in lossless else 'lossy'
self.album.disc(dn).track(tn).encoding = encod
self.album.disc(dn).track(tn).samplerate = metadata.samplerate
self.album.disc(dn).track(tn).bitrate = metadata.bitrate
self.album.disc(dn).track(tn).bitdepth = metadata.bitdepth
chans = metadata.channels
ch_opts = {1: 'mono', 2: 'stereo'}
self.album.disc(dn).track(
tn).channels = ch_opts[chans] if chans in ch_opts else '{}ch'.format(chans)
self.album.disc(dn).track(
tn).length_seconds_fp = metadata.length
length_seconds_fp = metadata.length
self.album.disc(dn).track(
tn).length_seconds = int(length_seconds_fp)
self.album.disc(dn).track(tn).length = str(
timedelta(seconds=int(length_seconds_fp)))
length_ex_str = str(
timedelta(seconds=round(length_seconds_fp, 4)))
self.album.disc(dn).track(tn).length_ex = length_ex_str[:-2]
def _directory_has_audio_files(self, dir):
codecs = ('.flac', '.ogg', '.mp3')
files = next(os.walk(dir))[2]
found = 0
for f in files:
if list(filter(f.endswith, codecs)) != []:
found += 1
return False if found == 0 else True
def _directory_prune_unwanted(self, dir_list):
""" Remove directories without audio files / in ignore list
"""
extf = (self.cue_done_dir)
dir_list[:] = [d for d in dir_list if d not in extf]
# return dir_list
def _audio_files_in_subdirs(self, dir_list):
""" Are files in subdirectories rather than root dirs?
"""
codecs = ('.flac', '.ogg', '.mp3')
sourcedir = self.album.sourcedir
for x in dir_list:
if x.endswith(codecs):
return False
elif os.path.isdir(os.path.join(sourcedir, x)) and \
self._directory_has_audio_files(os.path.join(sourcedir, x)):
return True
return False
def _get_target_list(self):
"""
fetches a list of files with the defined file_type
in the self.sourcedir location as target_list, other
files in the sourcedir are returned in the copy_files list.
"""
copy_files = []
target_list = []
disc_source_dir = None
sourcedir = self.album.sourcedir
logger.debug("target_dir: %s" % self.album.target_dir)
logger.debug("sourcedir: %s" % sourcedir)
try:
dir_list = os.listdir(sourcedir)
dir_list.sort()
self._directory_prune_unwanted(dir_list)
filetype = ""
self.album.copy_files = []
logger.debug(f"flagged mult-disc: {self.album.has_multi_disc}")
if self.album.has_multi_disc or self._audio_files_in_subdirs(dir_list) is True:
logger.debug(">>> is multi disc album, looping discs")
dirno = 0
for y in dir_list:
logger.debug("is it a dir? %s" % y)
if os.path.isdir(os.path.join(sourcedir, y)):
if self._directory_has_audio_files(os.path.join(sourcedir, y)):
logger.debug(
"Setting disc(%s) sourcedir to: %s" % (dirno, y))
self.album.discs[dirno].sourcedir = y
dirno = dirno + 1
else:
logger.debug("Setting copy_files instead of sourcedir")
self.album.copy_files.append(y)
else:
logger.debug("Setting disc sourcedir to none")
self.album.discs[0].sourcedir = None
total_tracks = sum(
1 for disc in self.album.discs for t in disc.tracks)
pos = 0
for dn, disc in enumerate(self.album.discs):
if hasattr(disc, 'sourcedir') and disc.sourcedir is not None:
disc_source_dir = os.path.join(
self.album.sourcedir, disc.sourcedir)
else:
disc_source_dir = self.album.sourcedir
logger.debug("discn inst ..: %d" % dn)
logger.debug("discno ......: %d" % disc.discnumber)
logger.debug("sourcedir ...: %s" % disc_source_dir)
# strip unwanted files
disc_list = os.listdir(disc_source_dir)
disc_list.sort()
disc.copy_files = [x for x in disc_list
if not x.lower().endswith(TaggerUtils.FILE_TYPE)]
target_list = [os.path.join(disc_source_dir, x) for x in disc_list
if x.lower().endswith(TaggerUtils.FILE_TYPE)]
# bug here for multi-disc
# targetlist holds all tracks combined across discs, but
# disc.tracks is per disc collection, test them both -
# OVERKILL BUT SAFE
target_tracks = len(target_list)
disc_tracks = len(disc.tracks)
if not (target_tracks == disc_tracks or target_tracks == total_tracks):
logger.debug(f"target_list: {target_list}")
logger.error(
f"not matching number of files... want {target_tracks} have {disc_tracks}")
# we should throw an error in here
# minimal rework for multi-disk -> tracks is the driver
for _, track in enumerate(disc.tracks):
filename = target_list[pos]
logger.debug(f"track position: {pos}")
logger.debug(
f"mapping file {filename} --to--> {track.artists[0]} - {track.title}")
track.orig_file = os.path.basename(filename)
track.full_path = os.path.join(
self.album.sourcedir, filename)
filetype = os.path.splitext(filename)[1]
disc.filetype = filetype
pos += 1
self._set_target_discs_and_tracks(filetype)
except (OSError) as e:
if e.errno == errno.EEXIST:
logger.error("No such directory '{}'".format(self.sourcedir))
raise TaggerError(
"No such directory '{}'".format(self.sourcedir))
else:
raise TaggerError(
"General IO system error '{}'".format(errno[e]))
@ property
def dest_dir_name(self):
""" generates new album directory name """
logger.debug("self.destdir: {}".format(self.destdir))
# determine if an absolute base path was specified.
path_name = os.path.normpath(self.destdir)
logger.debug("path_name: {}".format(path_name))
dest_dir = ""
for ddir in self.dir_format.split("/"):
d_dir = self.get_clean_filename(self._value_from_tag(ddir))
if dest_dir == "":
dest_dir = d_dir
else:
dest_dir = os.path.join(dest_dir, d_dir)
logger.debug("d_dir: {}".format(dest_dir))
dir_name = self.fixNone(os.path.join(path_name, dest_dir))
return dir_name
@ property
def m3u_filename(self):
""" generates the m3u file name """
m3u = self._value_from_tag(self.m3u_format)
return self.get_clean_filename(m3u)
@ property
def nfo_filename(self):
""" generates the nfo file name """
nfo = self._value_from_tag(self.nfo_format)
return self.get_clean_filename(nfo)
def get_clean_filename(self, f):
""" Removes unwanted characters from file names """
filename, fileext = os.path.splitext(f)
if fileext not in TaggerUtils.FILE_TYPE and fileext not in [".m3u", ".nfo"]:
logger.debug("fileext: {}".format(fileext))
filename = f
fileext = ""
a = str(filename)
# windows doesn't like folders ending with '.'
a = re.sub(r'\.$', '', a)
a = re.sub(r'\$', 'S', a) # Replace $ with S
for k, v in self.char_exceptions.items():
a = a.replace(k, v)
if self.normalize == True:
a = normalize("NFKD", a)
cf = re.compile(r"[^-\w.,()\[\]\s#@&!']") # allowed characters
cf = cf.sub("", str(a))
cf = "".join([cf, fileext])
if self.use_lower:
cf = cf.lower()
return cf
def create_file_from_template(self, template_name, file_name):
file_template = self.template_lookup.get_template(template_name)
return write_file(file_template.render(album=self.album),
os.path.join(self.album.target_dir, file_name))
def create_nfo(self, dest_dir):
""" Writes the .nfo file to disk. """
return self.create_file_from_template("info.txt", self.nfo_filename)
def create_m3u(self, dest_dir):
""" Generates the playlist for the given albm.
Adhering to the following m3u format.
---
# EXTM3U
# EXTINF:233,Artist - Song
directory\file_name.mp3.mp3
# EXTINF:-1,My Cool Stream
http://www.site.com:8000/listen.pls
---
Taken from http://forums.winamp.com/showthread.php?s=&threadid=65772"""
return self.create_file_from_template("m3u.txt", self.m3u_filename)
def write_file(filecontents, filename):
""" writes a string of data to disk """
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
logger.debug("Writing file '%s' to disk" % filename)
try:
with open(filename, "w") as fh:
fh.write(filecontents)
except IOError:
logger.error("Unable to write file '%s'" % filename)
return True
def copytree_multi(src, dst, symlinks=False, ignore=None):
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
# -------- E D I T --------
# os.path.isdir(dst)
if not os.path.isdir(dst):
os.makedirs(dst)
# -------- E D I T --------
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree_multi(srcname, dstname, symlinks, ignore)
else:
copy2(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
except Error as err:
errors.extend(err.args[0])
try:
copystat(src, dst)
except WindowsError:
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
| 38.65547 | 102 | 0.562526 |
257bd525fd49ad4dc0e02ad1d0543a6d8badb505 | 7,700 | py | Python | download_split_videos.py | bryant1410/vlog_action_reason | ac4e1ba437b9d544218bab48b1d788f54a62e794 | [
"MIT"
] | null | null | null | download_split_videos.py | bryant1410/vlog_action_reason | ac4e1ba437b9d544218bab48b1d788f54a62e794 | [
"MIT"
] | null | null | null | download_split_videos.py | bryant1410/vlog_action_reason | ac4e1ba437b9d544218bab48b1d788f54a62e794 | [
"MIT"
] | null | null | null | import glob
import json
import os
import subprocess
from subprocess import PIPE
import datetime
import time
import cv2
import shutil
import numpy as np
import pandas as pd
import tqdm
def split_video_by_time(video_id, time_start, time_end, verb, github_path):
duration = time_end - time_start
print(time_start)
print(time_end)
print(duration)
time_start = str(datetime.timedelta(seconds=time_start))
time_end = str(datetime.timedelta(seconds=time_end))
duration = str(datetime.timedelta(seconds=duration))
print(time_start)
print(time_end)
path_video = 'data/videos/' + video_id + '.mp4 '
# command_split_video = 'ffmpeg -ss ' + time_start + ' -i ' + 'data/videos/' + video_id + '.mp4 ' + "-fs 25M " + '-to ' + duration + \
# ' -c copy data/videos/splits/' + verb + "/" + video_id + '+' + time_start + '+' + time_end + '.mp4'
command_split_video = 'ffmpeg -ss ' + time_start + ' -i ' + path_video + "-fs 25M " + '-to ' + duration + \
' -c copy ' + github_path + video_id + '+' + time_start + '+' + time_end + '.mp4'
print(command_split_video)
os.system(command_split_video)
def download_video(video_id):
url = "https://www.youtube.com/watch?v=" + video_id
command_save_video = 'youtube-dl --no-check-certificate -f bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4 -v -o ' \
+ "data/videos/" + video_id + " " + url
os.system(command_save_video)
def filter_split_by_motion(PATH_miniclips, PATH_problematic_videos, PARAM_CORR2D_COEFF= 0.8):
print("filtering videos by motion")
if not os.path.exists(PATH_problematic_videos):
os.makedirs(PATH_problematic_videos)
list_video_names_removed = []
list_videos = sorted(glob.glob(PATH_miniclips + "*.mp4"), key=os.path.getmtime)
for video in tqdm.tqdm(list_videos):
vidcap = cv2.VideoCapture(video)
if (vidcap.isOpened() == False):
continue
# vidcap.open(video)
corr_list = []
video_name = video.split("/")[-1]
length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_nb_1 in range(0, length - 100, 100):
vidcap.set(1, frame_nb_1)
success, image = vidcap.read()
if success == False:
continue
# image1 = cv2.resize(image, (100, 50))
gray_image_1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# for frame_nb_2 in range(frame_nb_1 + 100, length, 100):
frame_nb_2 = frame_nb_1 + 100
vidcap.set(1, frame_nb_2)
success, image = vidcap.read()
if success == False:
continue
# image2 = cv2.resize(image, (100, 50))
gray_image_2 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
corr2_matrix = np.corrcoef(gray_image_1.reshape(-1), gray_image_2.reshape(-1))
corr2 = corr2_matrix[0][1]
corr_list.append(corr2)
if np.median(corr_list) >= PARAM_CORR2D_COEFF:
# move video in another folder
list_video_names_removed.append(video_name)
shutil.move(video, PATH_problematic_videos + video_name)
return list_video_names_removed
def download_from_dict(file_in):
with open(file_in) as json_file:
data = json.load(json_file)
github_path = '../miniclips/'
# ftr = [3600, 60, 1]
list_all_video_names_removed = []
list_actions = data.keys()
for verb in list_actions:
for s in data[verb][:10]: #TODO: change index
# print(verb.split())
x = time.strptime(s["time_s"].split('.')[0], '%H:%M:%S')
time_s = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
x = time.strptime(s["time_e"].split('.')[0], '%H:%M:%S')
time_e = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
video_id = s["video"]
verb = "_".join(verb.split())
# check if miniclip already exists
time_start = str(datetime.timedelta(seconds=time_s))
time_end = str(datetime.timedelta(seconds=time_e))
if os.path.exists(github_path + video_id + '+' + time_start + '+' + time_end + '.mp4'):
print(github_path + video_id + '+' + time_start + '+' + time_end + '.mp4' + " EXISTS! MOVING ON ..")
continue
if not os.path.exists('data/videos/' + video_id + ".mp4"):
download_video(video_id)
split_video_by_time(video_id, time_s, time_e, verb, github_path)
list_video_names_removed = filter_split_by_motion(PATH_miniclips=github_path,
PATH_problematic_videos="data/videos/filtered_out/",
PARAM_CORR2D_COEFF=0.9)
# 0.8 by default
for video_name in list_video_names_removed:
list_all_video_names_removed.append(video_name)
df = pd.DataFrame({'videos_to_remove': list_all_video_names_removed})
df.to_csv('data/videos_to_remove.csv')
def download_from_AMT_input(file_in):
list_all_video_names_removed = []
github_path = '../miniclips/'
df = pd.read_csv(file_in)
for video_url, verb, reasons in zip(df["video_url"], df["action"], df["reasons"]):
video_id, time_s, time_e = video_url.split("https://github.com/OanaIgnat/miniclips/blob/master/")[1].split("+")
time_e = time_e.split(".mp4?raw=true")[0]
x = time.strptime(time_s.split('.')[0], '%H:%M:%S')
time_s = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
x = time.strptime(time_e.split('.')[0], '%H:%M:%S')
time_e = datetime.timedelta(hours=x.tm_hour, minutes=x.tm_min, seconds=x.tm_sec).total_seconds()
# check if miniclip already exists
time_start = str(datetime.timedelta(seconds=time_s))
time_end = str(datetime.timedelta(seconds=time_e))
if os.path.exists(github_path + video_id + '+' + time_start + '+' + time_end + '.mp4'):
print(github_path + video_id + '+' + time_start + '+' + time_end + '.mp4' + " EXISTS! MOVING ON ..")
continue
if not os.path.exists('data/videos/' + video_id + ".mp4"):
download_video(video_id)
# if not os.path.exists('data/videos/splits/' + verb):
# os.makedirs(github_path + '/data/videos/splits/' + verb)
split_video_by_time(video_id, time_s, time_e, verb, github_path)
# list_video_names_removed = filter_split_by_motion(PATH_miniclips="github_path,
# PATH_problematic_videos="data/videos/filtered_out/",
# PARAM_CORR2D_COEFF=0.9)
# # 0.8 by default
# for video_name in list_video_names_removed:
# list_all_video_names_removed.append(video_name)
#
# df = pd.DataFrame({'videos_to_remove': list_all_video_names_removed})
# df.to_csv('data/AMT/videos_to_remove.csv')
def main():
# download_from_AMT_input(file_in="data/AMT/input/trial2.csv")
# download_from_dict(file_in="data/dict_sentences_per_verb_MARKERS_for_annotation.json")
# list_video_names_removed = filter_split_by_motion(PATH_miniclips="../miniclips/no_check/",
list_video_names_removed = filter_split_by_motion(PATH_miniclips="../miniclips2/",
PATH_problematic_videos="../miniclips3/filtered_out/",
PARAM_CORR2D_COEFF=0.9)
if __name__ == '__main__':
main()
| 45.02924 | 138 | 0.617403 |
f6c9c99851d7dc4dd1ad13a02d56fe174ca831e9 | 16,426 | py | Python | bin/attic/seqcap_align.py | milag6/phyluce | ff0d351eab2e38916f028fafdef5ea4fddcd2657 | [
"BSD-3-Clause"
] | null | null | null | bin/attic/seqcap_align.py | milag6/phyluce | ff0d351eab2e38916f028fafdef5ea4fddcd2657 | [
"BSD-3-Clause"
] | null | null | null | bin/attic/seqcap_align.py | milag6/phyluce | ff0d351eab2e38916f028fafdef5ea4fddcd2657 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
seqcap_align.py
Created by Nicholas Crawford and Brant C. Faircloth
Copyright (c) 2010 Nicholas Crawford and Brant C. Faircloth. All rights reserved.
This script aligns fasta sequence groups on a per locus basis (where the locus
name is in fasta header). It takes a fasta file of reads, in arbitrary order,
groups reads by locus and uses MUSCLE (http://www.drive5.com/muscle/) to do
align reads by locus. We use the Biopython Alignment class to hold and
output reads in various formats (fasta, nexus, etc). We've also implemented
a class (ConcatenatedAlignment) which concatenates alignments by locus, and
outputs one large alignment in various formats.
We've also implemented options to trim aligned reads at specified distances
from an internal probe sequence and also remove the specified sequence
from within reads.
python seqcapAlign.py --input=all.fa --output=output/ \
--probe-file=../Simulation/SureSelectProbes.fsa --trim-flank=100 \
--multiprocessing --processors=6 --concatenate
"""
import pdb
import sys
import os
import shutil
import optparse
import tempfile
import multiprocessing
import phyluce.muscle
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC, Gapped
from Bio.Align.Generic import Alignment
class ConcatenatedAlignment(Alignment):
'''A child class of the biopython alignment class, created so that we could
add the concat method to concatenate multiple alignments'''
def __init__(self):
Alignment.__init__(self, Gapped(IUPAC.unambiguous_dna, '-'))
def concat(self, alignment):
'''concatenate alignment objects (on a per SeqRecord basis)'''
#pdb.set_trace()
if not self._records:
for seq in alignment:
seq.id = seq.id.split('_')[-1]
seq.name = seq.id
self._records.append(seq)
else:
for seq in alignment:
for pos, record in enumerate(self._records):
# this assumes that we will be using id as
# the joining attribute...
if seq.id.split('_')[-1] == record.id:
c_seq = SeqRecord(record.seq + seq.seq)
c_seq.name = record.id
c_seq.id = record.id
self._records[pos] = c_seq
class Locus(list):
'''a subclass of list to hold a group of sequence reads on a per
locus basis'''
def __init__(self, *arg):
list.__init__(self)
if arg:
self.append(arg[0])
self.contents = ''
self.formatted = False
self.tempFile = None
def formatSequences(self):
'''create the contents of a fasta file on a per locus basis'''
self.formatted = True
for item in self:
self.contents += item.format('fasta')
def createTempFile(self):
'''create a tempfile holding the contents of self.contents'''
if self.contents:
fd, self.tempFile = tempfile.mkstemp(suffix='.muscle')
os.write(fd, self.contents)
os.close(fd)
#def cleanupTempFile(self):
# '''remove the tempfile'''
# os.remove(self.tempFile)
def interfaceWarnings(p, message):
'''generic warning function for interface options'''
print message
p.print_help()
sys.exit(2)
def interface():
'''Get the starting parameters from a configuration file'''
usage = "usage: %prog [options]"
p = optparse.OptionParser(usage)
p.add_option('--input', dest='path', action='store', \
type='string', default=None, \
help='The path to the input file.', metavar='FILE')
p.add_option('--output', dest='output', action='store', \
type='string', default=None, \
help='The path to the output directory.', metavar='FILE')
p.add_option('--format', dest='format', action='store', \
type='string', default='nexus', \
help='Output format: clustal, emboss, fasta, nexus, phylip,'\
+'stockholm')
p.add_option('--concatenate', dest='concatenate', action='store_true', \
default=False, help='Concatenate alignments by locus')
p.add_option('--probe-file', dest='probefile', action='store', \
type='string', default=None, \
help='Path to a fasta file containing the probe sequences.', \
metavar='FILE')
p.add_option('--trim-flank', dest='trimflank', action='store', \
type='int', default=None, \
help='The length to trim the flank.')
p.add_option('--species', dest='species', action='store', \
type='int', default=None, \
help='Number of species expected in each alignment.')
p.add_option('--no-ambiguous', dest='no_ambiguous', action='store_true', \
default=False, help='Don\'t allow reads with ambiguous (N) bases')
p.add_option('--trim-ambiguous', dest='trim_ambiguous', action='store_true', \
default=False, help='Allow reads with ambiguous (N) bases, but trim' + \
' them out following alignment')
p.add_option('--trim-running', dest='trimrunning', action='store_true', \
default=False, help='Trim alignments using a running average')
p.add_option('--remove-probe', dest='removeprobe', action='store_true', \
default=False, help='Remove the probe sequence from the alignment.')
p.add_option('--keep-probe', dest='keepprobe', action='store_true', \
default=False, help='Add the probe sequence to the alignment.')
p.add_option('--multiprocessing', dest='multiprocessing', action='store_true', \
default=False, help='Use mutiple cores/processors.')
p.add_option('--processors', dest='processors', action='store', \
type='int', default=multiprocessing.cpu_count() - 2, \
help='Number of cores to use.')
p.add_option('--notstrict', dest='notstrict', action='store_true', \
default=False, help='Allow alignments containing not all species.')
p.add_option('--faircloth', dest='faircloth', action='store_true', \
default=False, help='Take faircloth+stephens probe names')
p.add_option('--verbose', dest='verbose', action='store_true', \
default=False, help='List locus names while processing.')
(options, arg) = p.parse_args()
if not options.species:
interfaceWarnings(p, "You must provide an expected species count per alignment")
if options.processors < 1:
options.processors = 1
if not options.path:
interfaceWarnings(p, None)
if not os.path.isfile(options.path):
interfaceWarnings(p, "You must provide a valid path to the input file or directory")
if options.removeprobe and not options.probefile:
interfaceWarnings(p, "You must provide a file of probe sequences if you are removing a probe")
if os.path.isfile(options.output):
interfaceWarnings("You must provide an output *directory*.")
formats = {'clustal': '.clw',
'emboss': '.emboss',
'fasta': '.fa',
'nexus': '.nex',
'phylip': '.phylip',
'stockholm': '.stockholm'}
if options.format not in formats.keys():
interfaceWarnings(p, "This is an unsupported output format")
options.format_extension = formats[options.format]
return options, arg
def outPFilename(path, fnn, extension):
'''generalize the formatting of output filenames'''
return (os.path.join(path, fnn)) + extension
def multiAlign(input, output):
for locus_name, sequences, probe, options in iter(input.get, 'STOP'):
results = singleAlign(locus_name, sequences, probe, options)
output.put(results)
return
def singleAlign(locus_name, sequences, probe, options):
#pdb.set_trace()
if options.verbose:
print '\tLocus: %s' % locus_name
if options.trimflank or options.removeprobe:
# give the probe a generic name so we can easily find it later
probe.name = 'probe'
probe.id = 'probe'
probe.description = 'probe'
# add the probe sequence to the locus
sequences.append(probe)
# format the locus reads to ('fasta')
sequences.formatSequences()
# create a tempfile to feed to muscle and feed it
sequences.createTempFile()
muscle = phyluce.muscle.Align(sequences.tempFile)
muscle.run_alignment(consensus=False)
muscle.get_probe_location()
muscle.trim_alignment(method='trim', probe='remove')
elif options.keepprobe:
# give the probe a generic name so we can easily find it later
probe.name = 'probe'
probe.id = 'probe'
probe.description = 'probe'
# add the probe sequence to the locus
sequences.append(probe)
# format the locus reads to ('fasta')
sequences.formatSequences()
# create a tempfile to feed to muscle and feed it
sequences.createTempFile()
muscle = phyluce.muscle.Align(sequences.tempFile)
muscle.run_alignment(consensus=False)
muscle.trim_alignment(method='running-probe', window_size=20, threshold=0.5)
elif options.trimrunning:
#print sequences
sequences.formatSequences()
sequences.createTempFile()
muscle = phyluce.muscle.Align(sequences.tempFile)
muscle.run_alignment(consensus=False)
muscle.trim_alignment(method='running', window_size=20, threshold=0.5)
else:
#print sequences
sequences.formatSequences()
sequences.createTempFile()
muscle = phyluce.muscle.Align(sequences.tempFile)
muscle.run_alignment(consensus=False)
muscle.trim_alignment(method='notrim')
if options.trim_ambiguous:
muscle.trim_ambiguous_bases()
return locus_name, muscle.perfect_trimmed_alignment
else:
return locus_name, muscle.trimmed_alignment
def q_runner(n_procs, loci, probes, options, function, *args):
'''generic function used to start worker processes'''
task_queue = multiprocessing.Queue()
results_queue = multiprocessing.JoinableQueue()
if args:
arguments = (task_queue, results_queue,) + args
else:
arguments = (task_queue, results_queue,)
results = []
# reduce processer count if proc count > files
if len(loci) < n_procs:
n_procs = len(loci)
for count, locus in enumerate(loci):
if probes:
task_queue.put([locus, loci[locus], probes[locus], options])
else:
task_queue.put([locus, loci[locus], None, options])
for _ in range(n_procs):
p = multiprocessing.Process(target=function, args=arguments).start()
#print 'Starting %s' % function
for _ in range(len(loci)):
# indicated done results processing
results.append(results_queue.get())
results_queue.task_done()
#tell child processes to stop
for _ in range(n_procs):
task_queue.put('STOP')
# join the queue until we're finished processing results
print 'Waiting for jobs to finish...'
results_queue.join()
# not closing the Queues caused me untold heartache and suffering
task_queue.close()
results_queue.close()
return results
def makeOutPutDir(output):
"""create directory to store output"""
if os.path.exists(output) == True:
overwrite = raw_input('Path exists, overwrite? [Y/n]:')
if overwrite.lower() == 'y' or 'yes':
shutil.rmtree(output)
os.mkdir(output)
else:
pass
return output
def ambiguousBaseChecker(loci, locus, record):
if not 'N' in record.seq:
loci = buildLocusDict(loci, locus, record)
else:
print 'Skipping {0} because it contains ambiguous bases'.format(record.id)
return loci
def buildLocusDict(loci, locus, record):
if locus not in loci.keys():
loci[locus] = Locus(record)
else:
loci[locus].append(record)
return loci
def main():
options, arg = interface()
# create an instance of the ConcatenatedAlignment, if necc:
if options.concatenate:
concatenatedAlignment = ConcatenatedAlignment()
else:
alignmentBlocks = None
if not options.output:
options.output = options.path
if options.trimflank or options.removeprobe or options.keepprobe:
# create a dict of probe sequences
#TODO: clean this up a bit
probes = SeqIO.to_dict(SeqIO.parse(open(options.probefile, 'rU'), 'fasta'))
else:
probes = None
# create a holder for all loci
loci = {}
# basically, cram all of the sequence records from a file into a
# dictionary, indexed by the locus name. This will allow us (soon)
# to read from a single file of fastas, putting reads with their
# correct loci then sending that group of locus records off for
# processing.
print 'Making output directory...'
makeOutPutDir(options.output)
print 'Building the locus dictionary...'
if options.no_ambiguous:
print 'Removing ALL sequences with ambiguous bases...'
else:
print 'NOT removing sequences with ambiguous bases...'
for record in SeqIO.parse(open(options.path, 'rU'), 'fasta'):
#pdb.set_trace()
if not options.faircloth:
locus = record.description.split('|')[1]
else:
locus = '_'.join([record.description.split('|')[0], \
record.description.split('|')[1].split('_')[0]])
#record.id = record.description.split('_')[0] # added by NGC to fix seq IDs
# skip records containing ambiguous bases
if options.no_ambiguous:
loci = ambiguousBaseChecker(loci, locus, record)
else:
loci = buildLocusDict(loci, locus, record)
#pdb.set_trace()
# iterate over loci to check for all species at a locus
good_loci = {}
for locus in loci:
if options.notstrict:
good_loci[locus] = loci[locus]
elif len(loci[locus]) < options.species:
#for critter in loci[locus]:
# #pdb.set_trace()
# print critter.id.split('_')[2]
print 'Dropping Locus {0} because of missing species'.format(locus)
#pdb.set_trace()
else:
good_loci[locus] = loci[locus]
#pdb.set_trace()
if options.multiprocessing:
print 'Using %s cores to align DNA sequences...' % options.processors
results = q_runner(options.processors, good_loci, probes, options, multiAlign)
else:
print 'Using 1 core to align DNA sequences...'
results = []
for locus in good_loci:
if probes:
probe = probes[locus]
else:
probe = None
results.append(singleAlign(locus, loci[locus], probe, options))
print 'Writing output files...'
for locus, alignment in results:
if alignment and alignment.get_alignment_length() > 50:
# renamed from nexus to (generic) outp because function is generic now...
print outPFilename(options.output, locus, options.format_extension)
outp = open(outPFilename(options.output, locus, options.format_extension), 'w')
#
# NOTE: this is a stop-gap measure to deal with different probe names
#
if options.format == "phylip":
for r in alignment:
r.id = r.id.split('_')[-1]
outp.write(alignment.format(options.format))
outp.close()
else:
print '\tLocus %s not successfully aligned due to trimming errors - skipped from writing' % locus
pass
if options.concatenate:
for locus, alignment in results:
if alignment and alignment.get_alignment_length() > 50:
concatenatedAlignment.concat(alignment)
else:
print '\tLocus %s not successfully aligned due to trimming errors - skipped from writing' % locus
pass
outp = open(outPFilename(options.output, 'concat', options.format_extension), 'w')
try:
outp.write(concatenatedAlignment.format(options.format))
except:
pdb.set_trace()
outp.close()
#pdb.set_trace()
if __name__ == '__main__':
main()
| 39.772397 | 113 | 0.636856 |
5c60957fda1127526c8940116ecf5807188cda2e | 62,414 | py | Python | Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py | cyberluminar/content | 45ef86b7fd2cc0382844086c4c323e2151d598f3 | [
"MIT"
] | 1 | 2022-03-15T13:16:24.000Z | 2022-03-15T13:16:24.000Z | Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py | cyberluminar/content | 45ef86b7fd2cc0382844086c4c323e2151d598f3 | [
"MIT"
] | 53 | 2022-03-08T14:50:03.000Z | 2022-03-31T17:36:57.000Z | Packs/SplunkPy/Integrations/SplunkPy/SplunkPy_test.py | haldunerbay/content | 7cdabbf87138eb9efb6b27c6a8e81ecc15d81379 | [
"MIT"
] | null | null | null | from copy import deepcopy
import pytest
from splunklib.binding import AuthenticationError
import SplunkPy as splunk
import splunklib.client as client
import demistomock as demisto
from CommonServerPython import *
from datetime import timedelta, datetime
from collections import namedtuple
RETURN_ERROR_TARGET = 'SplunkPy.return_error'
DICT_RAW_RESPONSE = '"1528755951, search_name="NG_SIEM_UC25- High number of hits against ' \
'unknown website from same subnet", action="allowed", dest="bb.bbb.bb.bbb , cc.ccc.ccc.cc , ' \
'xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", distinct_hosts="5", ' \
'first_3_octets="1.1.1", first_time="06/11/18 17:34:07 , 06/11/18 17:37:55 , 06/11/18 17:41:28 , ' \
'06/11/18 17:42:05 , 06/11/18 17:42:38", info_max_time="+Infinity", info_min_time="0.000", ' \
'src="xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa", u_category="unknown", ' \
'user="xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown", website="2.2.2.2""'
LIST_RAW = 'Feb 13 09:02:55 1,2020/02/13 09:02:55,001606001116,THREAT,url,' \
'1,2020/02/13 09:02:55,10.1.1.1,1.2.3.4,0.0.0.0,0.0.0.0,rule1,jordy,,web-browsing,vsys1,trust,untrust,' \
'ethernet1/2,ethernet1/1,forwardAll,2020/02/13 09:02:55,59460,1,62889,80,0,0,0x208000,tcp,alert,' \
'"ushship.com/xed/config.bin",(9999),not-resolved,informational,client-to-server,' \
'0,0x0,1.1.22.22-5.6.7.8,United States,0,text/html'
RAW_WITH_MESSAGE = '{"@timestamp":"2019-10-15T13:30:08.578-04:00","message":"{"TimeStamp":"2019-10-15 13:30:08",' \
'"CATEGORY_1":"CONTACT","ASSOCIATEOID":"G2N2TJETBRAAX68V","HOST":' \
'"step-up-authentication-api.gslb.es.oneadp.com","SCOPE[4]":"PiSvcsProvider\/payroll","SCOPE[19]":' \
'"\/api\/events\/core\/v1\/user-status","CONTEXT":"\/smsstepup","FLOW":"API","X-REAL-IP":' \
'"2.2.2.2","PRODUCT_CODE":"WFNPortal","X-FORWARDED-PROTO":"http","ERROR_ID":"4008",' \
'"SCOPE[23]":"\/security\/notification-communication-response-value.accept","REQ_URL":' \
'"http:\/\/step-up-authentication-api.gslb.es.blabla.com\/smsstepup\/events\/core\/v1\/step-up-' \
'user-authorization-request.evaluate","SCOPE[35]":"autopay\/payroll\/v1\/cafeteria-plan-' \
'configurations\/{configurationItemID}","SCOPE_MATCHED":"Y","SCOPE[43]":"communication\/n' \
'otification-message-template.add","SCOPE[11]":"\/ISIJWSUserSecurity","SCOPE[27]":"autopay\/events' \
'\/payroll\/v1\/earning-configuration.add","ORGOID":"G2SY6MR3ATKA232T","SCOPE[8]":"\/' \
'ISIJWSAssociatesService","SCOPE[39]":"autopay\/payroll\/v1\/earning-configurations",' \
'"SETUP_SELF":"N","SCOPE[47]":"communication\/notification.publish","SCOPE[15]":"' \
'\/OrganizationSoftPurge","X-FORWARDED-HOST":"step-up-authentication-api.gslb.es.blabla.com",' \
'"ADP-MESSAGEID":"a1d57ed2-1fe6-4800-be7a-26cd89bhello","CNAME":"JRJG INC","CONTENT-LENGTH":' \
'"584","SCOPE[31]":"autopay\/events\/payroll\/v1\/earning-configuration.remove","CID":"BSTAR00044"' \
',"ACTOR_UID":"ABinters@BSTAR00044","SECURE_API_MODE":"HTTPS_SECURE","X-REQUEST-ID":' \
'"2473a981bef27bc8444e510adc12234a","SCOPE[1]":"AVSSCP\/Docstash\/Download","SCOPE[18]":' \
'"\/api\/events\/core\/v1\/product-role.assign","BLOCK_SESSION":"Y","CONSUMER_ID":' \
'"ab2e715e-41c4-43d6-bff7-fc2d713hello","SCOPE[34]":"autopay\/payroll\/v1\/cafeteria-plan-' \
'configurations","SCOPE[46]":"communication\/notification-message-template.remove","MODULE":' \
'"STEPUP_API","SCOPE[9]":"\/ISIJWSClientService","SCOPE[10]":"\/ISIJWSJobsService","SCOPE[22]":' \
'"\/api\/person-account-registration","SCOPE[38]":"autopay\/payroll\/v1\/deposit-configurations",' \
'"SUBJECT_ORGOID":"G2SY6MR3ATKA232T","SCOPE[5]":"\/Associate","SCOPE[14]":"\/Organization",' \
'"SCOPE[26]":"WFNSvcsProvider\/payrollPi","EVENT_ID":"9ea87118-5679-5b0e-a67f-1abd8ccabcde",' \
'"SCOPE[30]":"autopay\/events\/payroll\/v1\/earning-configuration.payroll-accumulators.modify",' \
'"X-FORWARDED-PORT":"80","SCOPE[42]":"autopay\/payroll\/v1\/worker-employment-records","JTI":' \
'"867b6d06-47cf-40ab-8dd7-bd0d57babcde","X-DOMAIN":"secure.api.es.abc.com","SOR_CODE":' \
'"WFNPortal","SCOPE[29]":"autopay\/events\/payroll\/v1\/earning-configuration.configuration' \
'-tags.modify","SCOPE[2]":"AVSSCP\/Docstash\/Get","OUTPUT_TYPE":"FAIL","ERR_MSG":"BLOCK_SESSION",' \
'"TRANS_ID":"3AF-D30-7CTTCQ","SCOPE[45]":"communication\/notification-message-template.read",' \
'"USE_HISTORY":"Y","SCHEME":"http","SCOPE[13]":"\/ISIJWSUsersService","SCOPE[21]":"\/api\/person",' \
'"SCOPE[33]":"autopay\/events\/payroll\/v1\/worker-insurable-payments.modify","X-FORWARDED-FOR":' \
'"8.8.8.8, 10.10.10.10, 1.2.3.4, 5.6.7.8","SCOPE[17]":"\/api\/core\/v1\/organization",' \
'"SCOPE[25]":"\/step-up-user-authorization.initiate","SCOPE[6]":"\/Associate\/PIC","SCOPE[37]":' \
'"autopay\/payroll\/v1\/cafeteria-plan-configurations\/{configurationItemID}\/' \
'payroll-item-configurations\/{payrollItemID}","FLOW_TYPE":"REST","SCOPE[41]":' \
'"autopay\/payroll\/v1\/payroll-output","CONSUMERAPPOID":"WFNPortal","RESOURCE":' \
'"\/events\/core\/v1\/step-up-user-authorization-request.evaluate","USER-AGENT":' \
'"Apache-HttpClient\/4.5.5 (Java\/10.0.1)","SCOPE[3]":"AVSSCP\/Docstash\/List",' \
'"SUB_CATEGORY_1":"worker.businessCommunication.email.change","TIME":"9","X-SCHEME":' \
'"http","ADP-CONVERSATIONID":"stY46PpweABoT5JX04CZGCeBbX8=","SCOPE[12]":' \
'"\/ISIJWSUserSecurityService","SCOPE[24]":"\/step-up-user-authorization-request.evaluate",' \
'"SCOPE[32]":"autopay\/events\/payroll\/v1\/retro-pay-request.add","SCOPE[44]":' \
'"communication\/notification-message-template.change","ACTION":"POST","SCOPE[7]":' \
'"\/AssociateSoftPurge","SCOPE[16]":"\/api\/authentication","X-ORIGINAL-URI":' \
'"\/smsstepup\/events\/core\/v1\/step-up-user-authorization-request.evaluate","SCOPE[28]":' \
'"autopay\/events\/payroll\/v1\/earning-configuration.change","SCOPE[36]":' \
'"autopay\/payroll\/v1\/cafeteria-plan-configurations\/{configurationItemID}\/payroll-item' \
'-configurations","SESSION_ID":"f50be909-9e4f-408d-bf77-68499012bc35","SCOPE[20]":' \
'"\/api\/events\/core\/v1\/user.provision","SUBJECT_AOID":"G370XX6XYCABCDE",' \
'"X-ORIGINAL-FORWARDED-FOR":"1.1.1.1, 3.3.3.3, 4.4.4.4","SCOPE[40]":' \
'"autopay\/payroll\/v1\/employer-details"}","TXID":"3AF-D30-ABCDEF","ADP-MessageID":' \
'"a1d57ed2-1fe6-4800-be7a-26cd89bf686d","SESSIONID":"stY46PpweFToT5JX04CZGMeCvP8=","ORGOID":' \
'"G2SY6MR3ATKA232T","AOID":"G2N2TJETBRAAXAAA","MSGID":"a1d57ed2-1fe6-0000-be7a-26cd89bf686d"}'
SAMPLE_RESPONSE = [{
'_bkt': 'notable~668~66D21DF4-F4FD-4886-A986-82E72ADCBFE9',
'_cd': '668:17198',
'_indextime': '1596545116',
'_raw': '1596545116, search_name="Endpoint - Recurring Malware Infection - Rule", count="17", '
'day_count="8", dest="ACME-workstation-012", info_max_time="1596545100.000000000", '
'info_min_time="1595939700.000000000", info_search_time="1596545113.965466000", '
'signature="Trojan.Gen.2"',
'_serial': '50',
'_si': ['ip-172-31-44-193', 'notable'],
'_sourcetype': 'stash',
'_time': '2020-08-04T05:45:16.000-07:00',
'dest': 'ACME-workstation-012',
'dest_asset_id': '028877d3c80cb9d87900eb4f9c9601ea993d9b63',
'dest_asset_tag': ['cardholder', 'pci', 'americas'],
'dest_bunit': 'americas',
'dest_category': ['cardholder', 'pci'],
'dest_city': 'Pleasanton',
'dest_country': 'USA',
'dest_ip': '192.168.3.12',
'dest_is_expected': 'TRUE',
'dest_lat': '37.694452',
'dest_long': '-121.894461',
'dest_nt_host': 'ACME-workstation-012',
'dest_pci_domain': ['trust', 'cardholder'],
'dest_priority': 'medium',
'dest_requires_av': 'TRUE',
'dest_risk_object_type': 'system',
'dest_risk_score': '15680',
'dest_should_timesync': 'TRUE',
'dest_should_update': 'TRUE',
'host': 'ip-172-31-44-193',
'host_risk_object_type': 'system',
'host_risk_score': '0',
'index': 'notable',
'linecount': '1',
'priorities': 'medium',
'priority': 'medium',
'risk_score': '15680',
'rule_description': 'Endpoint - Recurring Malware Infection - Rule',
'rule_name': 'Endpoint - Recurring Malware Infection - Rule',
'rule_title': 'Endpoint - Recurring Malware Infection - Rule',
'security_domain': 'Endpoint - Recurring Malware Infection - Rule',
'severity': 'unknown',
'signature': 'Trojan.Gen.2',
'source': 'Endpoint - Recurring Malware Infection - Rule',
'sourcetype': 'stash',
'splunk_server': 'ip-172-31-44-193',
'urgency': 'low'
}]
EXPECTED = {
"action": "allowed",
"dest": "bb.bbb.bb.bbb , cc.ccc.ccc.cc , xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa",
"distinct_hosts": '5',
"first_3_octets": "1.1.1",
"first_time": "06/11/18 17:34:07 , 06/11/18 17:37:55 , 06/11/18 17:41:28 , 06/11/18 17:42:05 , 06/11/18 17:42:38",
"info_max_time": "+Infinity",
"info_min_time": '0.000',
"search_name": "NG_SIEM_UC25- High number of hits against unknown website from same subnet",
"src": "xx.xx.xxx.xx , yyy.yy.yyy.yy , zz.zzz.zz.zzz , aa.aa.aaa.aaa",
"u_category": "unknown",
"user": "xyz\\a1234 , xyz\\b5678 , xyz\\c91011 , xyz\\d121314 , unknown",
"website": "2.2.2.2"
}
URL_TESTING_IN = '"url="https://test.com?key=val"'
URL_TESTING_OUT = {'url': 'https://test.com?key=val'}
# checking a case where the last character for each value was cut
RESPONSE = 'NAS-IP-Address=2.2.2.2, NAS-Port=50222, NAS-Identifier=de-wilm-251littl-idf3b-s2, NAS-Port-Type=' \
'Ethernet, NAS-Port-Id=GigabitEthernet2/0/05'
POSITIVE = {
"NAS-IP-Address": "2.2.2.2",
"NAS-Identifier": "de-wilm-251littl-idf3b-s2",
"NAS-Port": "50222",
"NAS-Port-Id": "GigabitEthernet2/0/05",
"NAS-Port-Type": "Ethernet"
}
# testing the ValueError and json sections
RAW_JSON = '{"Test": "success"}'
RAW_STANDARD = '"Test="success"'
RAW_JSON_AND_STANDARD_OUTPUT = {"Test": "success"}
class Jobs:
def __init__(self, status):
self.oneshot = None
state = namedtuple('state', 'content')
self.state = state(content={'dispatchState': str(status)})
def __getitem__(self, arg):
return 0
def create(self, query, latest_time, app, earliest_time, exec_mode):
return {'sid': '123456', 'resultCount': 0}
class Service:
def __init__(self, status):
self.jobs = Jobs(status)
self.status = status
def job(self, sid):
return Jobs(self.status)
def test_raw_to_dict():
actual_raw = DICT_RAW_RESPONSE
response = splunk.rawToDict(actual_raw)
list_response = splunk.rawToDict(LIST_RAW)
raw_message = splunk.rawToDict(RAW_WITH_MESSAGE)
empty = splunk.rawToDict('')
url_test = splunk.rawToDict(URL_TESTING_IN)
character_check = splunk.rawToDict(RESPONSE)
assert EXPECTED == response
assert {} == list_response
assert raw_message.get('SCOPE[29]') == 'autopay\/events\/payroll\/v1\/earning-configuration.configuration-tags' \
'.modify'
assert isinstance(raw_message, dict)
assert empty == {}
assert URL_TESTING_OUT == url_test
assert POSITIVE == character_check
assert splunk.rawToDict(RAW_JSON) == RAW_JSON_AND_STANDARD_OUTPUT
assert splunk.rawToDict(RAW_STANDARD) == RAW_JSON_AND_STANDARD_OUTPUT
assert splunk.rawToDict('drilldown_search="key IN ("test1","test2")') == {'drilldown_search': 'key IN (test1,test2)'}
@pytest.mark.parametrize('text, output', [
('', ['']),
('"",', ['"",']),
# a value shouldn't do anything special
('woopwoop', ['woopwoop']),
# a normal key value without quotes
('abc=123', ['abc="123"']),
# add a comma at the end
('abc=123,', ['abc="123"']),
# a normal key value with quotes
('cbd="123"', ['cbd="123"']),
# check all wrapped with quotes removed
('"abc="123""', ['abc="123"']),
# we need to remove 111 at the start.
('111, cbd="123"', ['cbd="123"']),
# Testing with/without quotes and/or spaces:
('abc=123,cbd=123', ['abc="123"', 'cbd="123"']),
('abc=123,cbd="123"', ['abc="123"', 'cbd="123"']),
('abc="123",cbd=123', ['abc="123"', 'cbd="123"']),
('abc="123",cbd="123"', ['abc="123"', 'cbd="123"']),
('abc=123, cbd=123', ['abc="123"', 'cbd="123"']),
('abc=123, cbd="123"', ['abc="123"', 'cbd="123"']),
('cbd="123", abc=123', ['abc="123"', 'cbd="123"']),
('cbd="123",abc=123', ['abc="123"', 'cbd="123"']),
# Continue testing quotes with more values:
('xyz=321,cbd=123,abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']),
('xyz=321,cbd="123",abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']),
('xyz="321",cbd="123",abc=123', ['xyz="321"', 'abc="123"', 'cbd="123"']),
('xyz="321",cbd="123",abc="123"', ['xyz="321"', 'abc="123"', 'cbd="123"']),
# Testing nested quotes (the main reason for quote_group):
# Try to remove the start 111.
('111, cbd="a="123""', ['cbd="a="123""']),
('cbd="a="123""', ['cbd="a="123""']),
('cbd="a="123", b=321"', ['cbd="a="123", b="321""']),
('cbd="a=123, b=321"', ['cbd="a="123", b="321""']),
('cbd="a=123, b="321""', ['cbd="a="123", b="321""']),
('cbd="a="123", b="321""', ['cbd="a="123", b="321""']),
('cbd="a=123, b=321"', ['cbd="a="123", b="321""']),
('xyz=123, cbd="a="123", b=321"', ['xyz="123"', 'cbd="a="123", b="321""']),
('xyz="123", cbd="a="123", b="321""', ['xyz="123"', 'cbd="a="123", b="321""']),
('xyz="123", cbd="a="123", b="321"", qqq=2', ['xyz="123"', 'cbd="a="123", b="321""', 'qqq="2"']),
('xyz="123", cbd="a="123", b="321"", qqq="2"', ['xyz="123"', 'cbd="a="123", b="321""', 'qqq="2"']),
])
def test_quote_group(text, output):
assert sorted(splunk.quote_group(text)) == sorted(output)
data_test_replace_keys = [
({}, {}),
({'test': 'test'}, {'test': 'test'}),
({'test.': 'test.'}, {'test_': 'test.'}),
({'te.st': 'te.st'}, {'te_st': 'te.st'}),
({'te[st': 'te[st'}, {'te_st': 'te[st'}),
({'te]st': 'te]st'}, {'te_st': 'te]st'}),
({'te)st': 'te)st'}, {'te_st': 'te)st'}),
({'te(st': 'te(st'}, {'te_st': 'te(st'}),
('', ''),
(None, None)
]
@pytest.mark.parametrize('dict_in, dict_out', data_test_replace_keys)
def test_replace_keys(dict_in, dict_out):
out = splunk.replace_keys(deepcopy(dict_in))
assert out == dict_out, 'replace_keys({}) got: {} instead: {}'.format(dict_in, out, dict_out)
def test_parse_time_to_minutes_no_error():
splunk.FETCH_TIME = '3 hours'
res = splunk.parse_time_to_minutes()
assert res == 180
def test_parse_time_to_minutes_invalid_time_integer(mocker):
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
splunk.FETCH_TIME = 'abc hours'
splunk.parse_time_to_minutes()
err_msg = return_error_mock.call_args[0][0]
assert err_msg == "Error: Invalid fetch time, need to be a positive integer with the time unit afterwards " \
"e.g '2 months, 4 days'."
def test_parse_time_to_minutes_invalid_time_unit(mocker):
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
splunk.FETCH_TIME = '3 hoursss'
splunk.parse_time_to_minutes()
err_msg = return_error_mock.call_args[0][0]
assert err_msg == 'Error: Invalid time unit.'
SEARCH_RESULT = [
{
"But": {
"This": "is"
},
"Very": "Unique"
},
{
"Something": "regular",
"But": {
"This": "is"
},
"Very": "Unique"
},
{
"Something": "natural",
"But": {
"This": "is a very very"
},
"Very": "Unique and awesome"
}
]
REGULAR_ALL_CHOSEN_FIELDS = [
"Something",
"But",
"Very"
]
REGULAR_CHOSEN_FIELDS_SUBSET = [
"Something",
"Very"
]
REGEX_CHOSEN_FIELDS_SUBSET = [
"Some*",
"Very"
]
NON_EXISTING_FIELDS = [
"SDFAFSD",
"ASBLFKDJK"
]
@pytest.mark.parametrize('search_result, chosen_fields, expected_result', [
(SEARCH_RESULT, REGULAR_ALL_CHOSEN_FIELDS, REGULAR_ALL_CHOSEN_FIELDS),
(SEARCH_RESULT, REGULAR_CHOSEN_FIELDS_SUBSET, REGULAR_CHOSEN_FIELDS_SUBSET),
(SEARCH_RESULT, REGEX_CHOSEN_FIELDS_SUBSET, REGULAR_CHOSEN_FIELDS_SUBSET),
(SEARCH_RESULT, NON_EXISTING_FIELDS, []),
])
def test_commands(search_result, chosen_fields, expected_result):
from SplunkPy import update_headers_from_field_names
headers = update_headers_from_field_names(search_result, chosen_fields)
assert expected_result == headers
APPS = ['app']
STORES = ['store']
EMPTY_CASE = {}
STORE_WITHOUT_APP = {"kv_store_collection_name": "test"}
JUST_APP_NAME = {'app_name': 'app'} # happens in splunk-kv-store-collections-list command
CREATE_COMMAND = {'app_name': 'app', 'kv_store_name': 'not_store'}
CORRECT = {'app_name': 'app', 'kv_store_collection_name': 'store'}
INCORRECT_STORE_NAME = {'app_name': 'app', 'kv_store_collection_name': 'not_store'}
data_test_check_error = [
(EMPTY_CASE, 'app not found'),
(STORE_WITHOUT_APP, 'app not found'),
(JUST_APP_NAME, 'empty'),
(CREATE_COMMAND, 'empty'),
(CORRECT, 'empty'),
(INCORRECT_STORE_NAME, 'KV Store not found'),
]
@pytest.mark.parametrize('args, out_error', data_test_check_error)
def test_check_error(args, out_error):
class Service:
def __init__(self):
self.apps = APPS
self.kvstore = STORES
try:
splunk.check_error(Service(), args)
raise splunk.DemistoException('empty')
except splunk.DemistoException as error:
output = str(error)
assert output == out_error, 'check_error(service, {})\n\treturns: {}\n\tinstead: {}'.format(args,
output, out_error)
EMPTY_CASE = {}
JUST_KEY = {"key": "key"}
WITH_ALL_PARAMS = {"key": "demisto", "value": "is awesome", "limit": 1, "query": "test"}
STANDARD_KEY_VAL = {"key": "demisto", "value": "is awesome"}
KEY_AND_LIMIT = {"key": "key", "limit": 1}
KEY_AND_QUERY = {"key": "key", "query": 'test_query'}
QUERY = {"query": 'test_query'}
QUERY_AND_VALUE = {"query": 'test_query', "value": "awesome"}
data_test_build_kv_store_query = [
(EMPTY_CASE, str(EMPTY_CASE)),
(JUST_KEY, str(EMPTY_CASE)),
(STANDARD_KEY_VAL, '{"demisto": "is awesome"}'),
(WITH_ALL_PARAMS, '{"demisto": "is awesome"}'),
(KEY_AND_LIMIT, {"limit": 1}),
(KEY_AND_QUERY, 'test_query'),
(QUERY, 'test_query'),
(QUERY_AND_VALUE, 'test_query'),
]
@pytest.mark.parametrize('args, expected_query', data_test_build_kv_store_query)
def test_build_kv_store_query(args, expected_query, mocker):
mocker.patch('SplunkPy.get_key_type', return_value=None)
output = splunk.build_kv_store_query(None, args)
assert output == expected_query, 'build_kv_store_query({})\n\treturns: {}\n\tinstead: {}'.format(args, output,
expected_query)
data_test_build_kv_store_query_with_key_val = [
({"key": "demisto", "value": "is awesome"}, str, '{"demisto": "is awesome"}'),
({"key": "demisto", "value": "1"}, int, '{"demisto": 1}'),
({"key": "demisto", "value": "True"}, bool, '{"demisto": true}'),
]
@pytest.mark.parametrize('args, _type, expected_query', data_test_build_kv_store_query_with_key_val)
def test_build_kv_store_query_with_key_val(args, _type, expected_query, mocker):
mocker.patch('SplunkPy.get_key_type', return_value=_type)
output = splunk.build_kv_store_query(None, args)
assert output == expected_query, 'build_kv_store_query({})\n\treturns: {}\n\tinstead: {}'.format(args, output,
expected_query)
test_test_get_key_type = [
({'field.key': 'number'}, float),
({'field.key': 'string'}, str),
({'field.key': 'cidr'}, str),
({'field.key': 'boolean'}, bool),
({'field.key': 'empty'}, None),
({'field.key': 'time'}, str),
]
@pytest.mark.parametrize('keys_and_types, expected_type', test_test_get_key_type)
def test_get_key_type(keys_and_types, expected_type, mocker):
mocker.patch('SplunkPy.get_keys_and_types', return_value=keys_and_types)
output = splunk.get_key_type(None, 'key')
assert output == expected_type, 'get_key_type(kv_store, key)\n\treturns: {}\n\tinstead: {}'.format(output,
expected_type)
EMPTY_CASE = {}
WITHOUT_FIELD = {'empty': 'number'}
STRING_FIELD = {'field.test': 'string'}
NUMBER_FIELD = {'field.test': 'number'}
INDEX = {'index.test': 'string'}
MIXED = {'field.test': 'string', 'empty': 'field'}
data_test_get_keys_and_types = [
(EMPTY_CASE, EMPTY_CASE),
(WITHOUT_FIELD, EMPTY_CASE),
(STRING_FIELD, {'field.test': 'string'}),
(NUMBER_FIELD, {'field.test': 'number'}),
(INDEX, {'index.test': 'string'}),
(MIXED, {'field.test': 'string'}),
]
@pytest.mark.parametrize('raw_keys, expected_keys', data_test_get_keys_and_types)
def test_get_keys_and_types(raw_keys, expected_keys):
class KVMock:
def __init__(self):
pass
def content(self):
return raw_keys
output = splunk.get_keys_and_types(KVMock())
assert output == expected_keys, 'get_keys_and_types(kv_store)\n\treturns: {}\n\tinstead: {}'.format(output,
expected_keys)
START_OUTPUT = '#### configuration for {} store\n| field name | type |\n| --- | --- |'.format('name')
EMPTY_OUTPUT = ''
STANDARD_CASE = {'field.test': 'number'}
STANDARD_OUTPUT = '\n| field.test | number |'
data_test_get_kv_store_config = [
({}, EMPTY_OUTPUT),
(STANDARD_CASE, STANDARD_OUTPUT)
]
@pytest.mark.parametrize('fields, expected_output', data_test_get_kv_store_config)
def test_get_kv_store_config(fields, expected_output, mocker):
class Name:
def __init__(self):
self.name = 'name'
mocker.patch('SplunkPy.get_keys_and_types', return_value=fields)
output = splunk.get_kv_store_config(Name())
expected_output = '{}{}'.format(START_OUTPUT, expected_output)
assert output == expected_output
def test_fetch_incidents(mocker):
mocker.patch.object(demisto, 'incidents')
mocker.patch.object(demisto, 'setLastRun')
mock_last_run = {'time': '2018-10-24T14:13:20'}
mock_params = {'fetchQuery': "something"}
mocker.patch('demistomock.getLastRun', return_value=mock_last_run)
mocker.patch('demistomock.params', return_value=mock_params)
service = mocker.patch('splunklib.client.connect', return_value=None)
mocker.patch('splunklib.results.ResultsReader', return_value=SAMPLE_RESPONSE)
splunk.fetch_incidents(service)
incidents = demisto.incidents.call_args[0][0]
assert demisto.incidents.call_count == 1
assert len(incidents) == 1
assert incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " \
"Recurring Malware Infection - Rule"
SPLUNK_RESULTS = [
{
"rawJSON":
'{"source": "This is the alert type", "field_name1": "field_val1", "field_name2": "field_val2"}',
"details": "Endpoint - High Or Critical Priority Host With Malware - Rule",
"labels": [
{
"type": "security_domain",
"value": "Endpoint - High Or Critical Priority Host With Malware - Rule"
}
],
}
]
EXPECTED_OUTPUT = {
'This is the alert type': {
"source": "This is the alert type",
"field_name1": "field_val1",
"field_name2": "field_val2"
}
}
def test_create_mapping_dict():
mapping_dict = splunk.create_mapping_dict(SPLUNK_RESULTS, type_field='source')
assert mapping_dict == EXPECTED_OUTPUT
def test_fetch_notables(mocker):
mocker.patch.object(demisto, 'incidents')
mocker.patch.object(demisto, 'setLastRun')
mock_last_run = {'time': '2018-10-24T14:13:20'}
mock_params = {'fetchQuery': "something"}
mocker.patch('demistomock.getLastRun', return_value=mock_last_run)
mocker.patch('demistomock.params', return_value=mock_params)
service = mocker.patch('splunklib.client.connect', return_value=None)
mocker.patch('splunklib.results.ResultsReader', return_value=SAMPLE_RESPONSE)
splunk.fetch_notables(service, enrich_notables=False)
incidents = demisto.incidents.call_args[0][0]
assert demisto.incidents.call_count == 1
assert len(incidents) == 1
assert incidents[0]["name"] == "Endpoint - Recurring Malware Infection - Rule : Endpoint - " \
"Recurring Malware Infection - Rule"
""" ========== Enriching Fetch Mechanism Tests ========== """
@pytest.mark.parametrize('integration_context, output', [
({splunk.INCIDENTS: ['incident']}, ['incident']),
({splunk.INCIDENTS: []}, []),
({}, [])
])
def test_fetch_incidents_for_mapping(integration_context, output, mocker):
"""
Scenario: When a user configures a mapper using Fetch from Instance when the enrichment mechanism is working,
we save the ready incidents in the integration context.
Given:
- List of ready incidents
- An empty list of incidents
- An empty integration context object
When:
- fetch_incidents_for_mapping is called
Then:
- Return the expected result
"""
mocker.patch.object(demisto, 'info')
mocker.patch.object(demisto, 'incidents')
splunk.fetch_incidents_for_mapping(integration_context)
assert demisto.incidents.call_count == 1
assert demisto.incidents.call_args[0][0] == output
def test_reset_enriching_fetch_mechanism(mocker):
"""
Scenario: When a user is willing to reset the enriching fetch mechanism and start over.
Given:
- An integration context object with not empty Cache and incidents
When:
- reset_enriching_fetch_mechanism is called
Then:
- Check that the integration context does not contain this fields
"""
integration_context = {
splunk.CACHE: "cache_string",
splunk.INCIDENTS: ['i1', 'i2'],
'wow': 'wow'
}
mocker.patch('SplunkPy.get_integration_context', return_value=integration_context)
mocker.patch('SplunkPy.set_integration_context')
splunk.reset_enriching_fetch_mechanism()
assert integration_context == {'wow': 'wow'}
@pytest.mark.parametrize('drilldown_creation_time, asset_creation_time, enrichment_timeout, output', [
(datetime.utcnow().isoformat(), datetime.utcnow().isoformat(), 5, False),
((datetime.utcnow() - timedelta(minutes=6)).isoformat(), datetime.utcnow().isoformat(), 5, True)
])
def test_is_enrichment_exceeding_timeout(mocker, drilldown_creation_time, asset_creation_time, enrichment_timeout, output):
"""
Scenario: When one of the notable's enrichments is exceeding the timeout, we want to create an incident we all
the data gathered so far.
Given:
- Two enrichments that none of them exceeds the timeout.
- An enrichment exceeding the timeout and one that does not exceeds the timeout.
When:
- is_enrichment_process_exceeding_timeout is called
Then:
- Return the expected result
"""
mocker.patch.object(splunk, 'ENABLED_ENRICHMENTS', return_value=[splunk.DRILLDOWN_ENRICHMENT, splunk.ASSET_ENRICHMENT])
notable = splunk.Notable({splunk.EVENT_ID: 'id'})
notable.enrichments.append(splunk.Enrichment(splunk.DRILLDOWN_ENRICHMENT, creation_time=drilldown_creation_time))
notable.enrichments.append(splunk.Enrichment(splunk.ASSET_ENRICHMENT, creation_time=asset_creation_time))
assert notable.is_enrichment_process_exceeding_timeout(enrichment_timeout) is output
INCIDENT_1 = {'name': 'incident1', 'rawJSON': json.dumps({})}
INCIDENT_2 = {'name': 'incident2', 'rawJSON': json.dumps({})}
@pytest.mark.parametrize('integration_context, incidents, output', [
({}, [], []),
({}, [INCIDENT_1, INCIDENT_2], [INCIDENT_1, INCIDENT_2])
])
def test_store_incidents_for_mapping(integration_context, incidents, output):
"""
Scenario: Store ready incidents in integration context, to be retrieved by a user configuring a mapper
and selecting "Fetch from instance" when the enrichment mechanism is working.
Given:
- An empty list of incidents
- A list of two incidents
When:
- store_incidents_for_mapping is called
Then:
- Return the expected result
"""
splunk.store_incidents_for_mapping(incidents, integration_context)
assert integration_context.get(splunk.INCIDENTS, []) == output
@pytest.mark.parametrize('notable_data, raw, status, earliest, latest', [
({}, {}, False, "", ""),
({"drilldown_earliest": "${}$".format(splunk.INFO_MIN_TIME),
"drilldown_latest": "${}$".format(splunk.INFO_MAX_TIME)},
{splunk.INFO_MIN_TIME: '1', splunk.INFO_MAX_TIME: '2'}, True, '1', '2'),
({"drilldown_earliest": '1', "drilldown_latest": '2', }, {}, True, '1', '2')
])
def test_get_drilldown_timeframe(notable_data, raw, status, earliest, latest, mocker):
"""
Scenario: Trying to get the drilldown's timeframe from the notable's data
Given:
- An empty notable's data
- An notable's data that the info of the timeframe is in the raw field
- An notable's data that the info is in the data dict
When:
- get_drilldown_timeframe is called
Then:
- Return the expected result
"""
mocker.patch.object(demisto, 'info')
task_status, earliest_offset, latest_offset = splunk.get_drilldown_timeframe(notable_data, raw)
assert task_status == status
assert earliest_offset == earliest
assert latest_offset == latest
@pytest.mark.parametrize('raw_field, notable_data, expected_field, expected_value', [
('field|s', {'field': '1'}, 'field', '1'),
('field', {'field': '1'}, 'field', '1'),
('field|s', {'_raw': 'field=1, value=2'}, 'field', '1'),
('x', {'y': '2'}, '', '')
])
def test_get_notable_field_and_value(raw_field, notable_data, expected_field, expected_value, mocker):
"""
Scenario: When building the drilldown search query, we search for the field in the raw search query
and search for its real name in the notable's data or in the notable's raw data.
We also ignore Splunk advanced syntax such as "|s, |h, ..."
Given:
- A raw field that has the same name in the notable's data
- A raw field that has "|s" as a suffix in the raw search query and its value is in the notable's data
- A raw field that has "|s" as a suffix in the raw search query and its value is in the notable's raw data
- A raw field that is not is the notable's data or in the notable's raw data
When:
- get_notable_field_and_value is called
Then:
- Return the expected result
"""
mocker.patch.object(demisto, 'error')
field, value = splunk.get_notable_field_and_value(raw_field, notable_data)
assert field == expected_field
assert value == expected_value
@pytest.mark.parametrize('notable_data, search, raw, expected_search', [
({'a': '1', '_raw': 'c=3'}, 'search a=$a|s$ c=$c$ suffix', {'c': '3'}, 'search a="1" c="3" suffix'),
({'a': ['1', '2'], 'b': '3'}, 'search a=$a|s$ b=$b|s$ suffix', {}, 'search (a="1" OR a="2") b="3" suffix'),
({'a': '1', '_raw': 'b=3', 'event_id': '123'}, 'search a=$a|s$ c=$c$ suffix', {'b': '3'}, ''),
])
def test_build_drilldown_search(notable_data, search, raw, expected_search, mocker):
"""
Scenario: When building the drilldown search query, we replace every field in between "$" sign with its
corresponding query part (key & value).
Given:
- A raw search query with fields both in the notable's data and in the notable's raw data
- A raw search query with fields in the notable's data that has more than one value
- A raw search query with fields that does not exist in the notable's data or in the notable's raw data
When:
- build_drilldown_search is called
Then:
- Return the expected result
"""
mocker.patch.object(demisto, 'error')
assert splunk.build_drilldown_search(notable_data, search, raw) == expected_search
@pytest.mark.parametrize('notable_data, prefix, fields, query_part', [
({'user': ['u1', 'u2']}, 'identity', ['user'], '(identity="u1" OR identity="u2")'),
({'_raw': '1233, user=u1'}, 'user', ['user'], 'user="u1"'),
({'user': ['u1', 'u2'], '_raw': '1321, src_user=u3'}, 'user', ['user', 'src_user'],
'(user="u1" OR user="u2" OR user="u3")'),
({}, 'prefix', ['field'], '')
])
def test_get_fields_query_part(notable_data, prefix, fields, query_part):
"""
Scenario: When building an enrichment search query, we search for values in the notable's data / notable's raw data
and fill them in the raw search query to create a searchable query.
Given:
- One field with multiple values, values in the data
- One field, value is in the raw data
- Two fields with multiple values, values in both the data and the raw data
- An empty notable data, field does not exists
When:
- get_fields_query_part is called
Then:
- Return the expected result
"""
assert splunk.get_fields_query_part(notable_data, prefix, fields) == query_part
""" ========== Mirroring Mechanism Tests ========== """
@pytest.mark.parametrize('last_update, demisto_params, splunk_time_timestamp', [
('2021-02-22T18:39:47.753+00:00', {'timezone': '0'}, 1614019187.753),
('2021-02-22T18:39:47.753+02:00', {'timezone': '+120'}, 1614019187.753),
('2021-02-22T20:39:47.753+02:00', {'timezone': '0'}, 1614019187.753),
('2021-02-09T16:41:30.589575+02:00', {}, '')
])
def test_get_last_update_in_splunk_time(last_update, demisto_params, splunk_time_timestamp, mocker):
""" Tests the conversion of the Demisto server time into timestamp in Splunk Server time
Given:
- The last update time in the Demisto server
- The timezone in the Splunk Server
When:
Converting the time in the Demisto server into timestamp in Splunk Server time
Then:
- Conversion is correct
- An Exception is raised in case that Splunk Server timezone is not specified in Demisto params
"""
mocker.patch.object(demisto, 'params', return_value=demisto_params)
if demisto_params:
assert splunk.get_last_update_in_splunk_time(last_update) == splunk_time_timestamp
else:
error_msg = 'Cannot mirror incidents when timezone is not configured. Please enter the '
'timezone of the Splunk server being used in the integration configuration.'
with pytest.raises(Exception, match=error_msg):
splunk.get_last_update_in_splunk_time(last_update)
def test_get_remote_data_command(mocker):
updated_notable = {'status': '1', 'event_id': 'id'}
class Jobs:
def __init__(self):
self.oneshot = lambda x: updated_notable
class Service:
def __init__(self):
self.jobs = Jobs()
args = {'lastUpdate': '2021-02-09T16:41:30.589575+02:00', 'id': 'id'}
mocker.patch.object(demisto, 'params', return_value={'timezone': '0'})
mocker.patch.object(demisto, 'debug')
mocker.patch.object(demisto, 'info')
mocker.patch('SplunkPy.results.ResultsReader', return_value=[updated_notable])
mocker.patch.object(demisto, 'results')
splunk.get_remote_data_command(Service(), args, close_incident=False)
results = demisto.results.call_args[0][0]
assert demisto.results.call_count == 1
assert results == [{'event_id': 'id', 'status': '1'}]
def test_get_remote_data_command_close_incident(mocker):
updated_notable = {'status': '5', 'event_id': 'id'}
class Jobs:
def __init__(self):
self.oneshot = lambda x: updated_notable
class Service:
def __init__(self):
self.jobs = Jobs()
args = {'lastUpdate': '2021-02-09T16:41:30.589575+02:00', 'id': 'id'}
mocker.patch.object(demisto, 'params', return_value={'timezone': '0'})
mocker.patch.object(demisto, 'debug')
mocker.patch.object(demisto, 'info')
mocker.patch('SplunkPy.results.ResultsReader', return_value=[updated_notable])
mocker.patch.object(demisto, 'results')
splunk.get_remote_data_command(Service(), args, close_incident=True)
results = demisto.results.call_args[0][0]
assert demisto.results.call_count == 1
assert results == [
{'event_id': 'id', 'status': '5'},
{
'Type': EntryType.NOTE,
'Contents': {
'dbotIncidentClose': True,
'closeReason': 'Notable event was closed on Splunk.'
},
'ContentsFormat': EntryFormat.JSON
}]
def test_get_modified_remote_data_command(mocker):
updated_incidet_review = {'rule_id': 'id'}
class Jobs:
def __init__(self):
self.oneshot = lambda x: [updated_incidet_review]
class Service:
def __init__(self):
self.jobs = Jobs()
args = {'lastUpdate': '2021-02-09T16:41:30.589575+02:00'}
mocker.patch.object(demisto, 'params', return_value={'timezone': '0'})
mocker.patch.object(demisto, 'debug')
mocker.patch('SplunkPy.results.ResultsReader', return_value=[updated_incidet_review])
mocker.patch.object(demisto, 'results')
splunk.get_modified_remote_data_command(Service(), args)
results = demisto.results.call_args[0][0]['Contents']
assert demisto.results.call_count == 1
assert results == [updated_incidet_review['rule_id']]
def test_edit_notable_event__failed_to_update(mocker, requests_mock):
"""
Given
- notable event with id ID100
When
- updating the event with invalid owner 'dbot'
- the service should return error string message 'ValueError: Invalid owner value.'
Then
- ensure the error message parsed correctly and returned to the user
"""
test_base_url = 'https://test.url.com:8089/'
test_token = 'token12345'
test_args = {
'eventIDs': 'ID100',
'owner': 'dbot'
}
mocker.patch.object(demisto, 'results')
requests_mock.post('{}services/notable_update'.format(test_base_url), json='ValueError: Invalid owner value.')
splunk.splunk_edit_notable_event_command(
base_url=test_base_url,
token=test_token,
auth_token=None,
args=test_args
)
assert demisto.results.call_count == 1
error_message = demisto.results.call_args[0][0]['Contents']
assert error_message == 'Could not update notable events: ID100 : ValueError: Invalid owner value.'
@pytest.mark.parametrize('args, params, call_count, success', [
({'delta': {'status': '2'}, 'remoteId': '12345', 'status': 2, 'incidentChanged': True},
{'host': 'ec.com', 'port': '8089', 'authentication': {'identifier': 'i', 'password': 'p'}}, 3, True),
({'delta': {'status': '2'}, 'remoteId': '12345', 'status': 2, 'incidentChanged': True},
{'host': 'ec.com', 'port': '8089', 'authentication': {'identifier': 'i', 'password': 'p'}}, 2, False),
({'delta': {'status': '2'}, 'remoteId': '12345', 'status': 2, 'incidentChanged': True},
{'host': 'ec.com', 'port': '8089', 'authentication': {'identifier': 'i', 'password': 'p'}, 'close_notable': True},
4, True)
])
def test_update_remote_system(args, params, call_count, success, mocker, requests_mock):
class Service:
def __init__(self):
self.token = 'fake_token'
mocker.patch.object(demisto, 'info')
mocker.patch.object(demisto, 'debug')
base_url = 'https://' + params['host'] + ':' + params['port'] + '/'
requests_mock.post(base_url + 'services/auth/login', json={'sessionKey': 'session_key'})
requests_mock.post(base_url + 'services/notable_update', json={'success': success, 'message': 'wow'})
if not success:
mocker.patch.object(demisto, 'error')
assert splunk.update_remote_system_command(args, params, Service(), None) == args['remoteId']
assert demisto.debug.call_count == call_count
if not success:
assert demisto.error.call_count == 1
NOTABLE = {
'rule_name': 'string', 'rule_title': 'string', 'security_domain': 'string', 'index': 'string',
'rule_description': 'string', 'risk_score': 'string', 'host': 'string',
'host_risk_object_type': 'string', 'dest_risk_object_type': 'string', 'dest_risk_score': 'string',
'splunk_server': 'string', '_sourcetype': 'string', '_indextime': 'string', '_time': 'string',
'src_risk_object_type': 'string', 'src_risk_score': 'string', '_raw': 'string', 'urgency': 'string',
'owner': 'string', 'info_min_time': 'string', 'info_max_time': 'string', 'comment': 'string',
'reviewer': 'string', 'rule_id': 'string', 'action': 'string', 'app': 'string',
'authentication_method': 'string', 'authentication_service': 'string', 'bugtraq': 'string',
'bytes': 'string', 'bytes_in': 'string', 'bytes_out': 'string', 'category': 'string', 'cert': 'string',
'change': 'string', 'change_type': 'string', 'command': 'string', 'comments': 'string',
'cookie': 'string', 'creation_time': 'string', 'cve': 'string', 'cvss': 'string', 'date': 'string',
'description': 'string', 'dest': 'string', 'dest_bunit': 'string', 'dest_category': 'string',
'dest_dns': 'string', 'dest_interface': 'string', 'dest_ip': 'string', 'dest_ip_range': 'string',
'dest_mac': 'string', 'dest_nt_domain': 'string', 'dest_nt_host': 'string', 'dest_port': 'string',
'dest_priority': 'string', 'dest_translated_ip': 'string', 'dest_translated_port': 'string',
'dest_type': 'string', 'dest_zone': 'string', 'direction': 'string', 'dlp_type': 'string',
'dns': 'string', 'duration': 'string', 'dvc': 'string', 'dvc_bunit': 'string', 'dvc_category': 'string',
'dvc_ip': 'string', 'dvc_mac': 'string', 'dvc_priority': 'string', 'dvc_zone': 'string',
'file_hash': 'string', 'file_name': 'string', 'file_path': 'string', 'file_size': 'string',
'http_content_type': 'string', 'http_method': 'string', 'http_referrer': 'string',
'http_referrer_domain': 'string', 'http_user_agent': 'string', 'icmp_code': 'string',
'icmp_type': 'string', 'id': 'string', 'ids_type': 'string', 'incident': 'string', 'ip': 'string',
'mac': 'string', 'message_id': 'string', 'message_info': 'string', 'message_priority': 'string',
'message_type': 'string', 'mitre_technique_id': 'string', 'msft': 'string', 'mskb': 'string',
'name': 'string', 'orig_dest': 'string', 'orig_recipient': 'string', 'orig_src': 'string',
'os': 'string', 'packets': 'string', 'packets_in': 'string', 'packets_out': 'string',
'parent_process': 'string', 'parent_process_id': 'string', 'parent_process_name': 'string',
'parent_process_path': 'string', 'password': 'string', 'payload': 'string', 'payload_type': 'string',
'priority': 'string', 'problem': 'string', 'process': 'string', 'process_hash': 'string',
'process_id': 'string', 'process_name': 'string', 'process_path': 'string', 'product_version': 'string',
'protocol': 'string', 'protocol_version': 'string', 'query': 'string', 'query_count': 'string',
'query_type': 'string', 'reason': 'string', 'recipient': 'string', 'recipient_count': 'string',
'recipient_domain': 'string', 'recipient_status': 'string', 'record_type': 'string',
'registry_hive': 'string', 'registry_key_name': 'string', 'registry_path': 'string',
'registry_value_data': 'string', 'registry_value_name': 'string', 'registry_value_text': 'string',
'registry_value_type': 'string', 'request_sent_time': 'string', 'request_payload': 'string',
'request_payload_type': 'string', 'response_code': 'string', 'response_payload_type': 'string',
'response_received_time': 'string', 'response_time': 'string', 'result': 'string',
'return_addr': 'string', 'rule': 'string', 'rule_action': 'string', 'sender': 'string',
'service': 'string', 'service_hash': 'string', 'service_id': 'string', 'service_name': 'string',
'service_path': 'string', 'session_id': 'string', 'sessions': 'string', 'severity': 'string',
'severity_id': 'string', 'sid': 'string', 'signature': 'string', 'signature_id': 'string',
'signature_version': 'string', 'site': 'string', 'size': 'string', 'source': 'string',
'sourcetype': 'string', 'src': 'string', 'src_bunit': 'string', 'src_category': 'string',
'src_dns': 'string', 'src_interface': 'string', 'src_ip': 'string', 'src_ip_range': 'string',
'src_mac': 'string', 'src_nt_domain': 'string', 'src_nt_host': 'string', 'src_port': 'string',
'src_priority': 'string', 'src_translated_ip': 'string', 'src_translated_port': 'string',
'src_type': 'string', 'src_user': 'string', 'src_user_bunit': 'string', 'src_user_category': 'string',
'src_user_domain': 'string', 'src_user_id': 'string', 'src_user_priority': 'string',
'src_user_role': 'string', 'src_user_type': 'string', 'src_zone': 'string', 'state': 'string',
'status': 'string', 'status_code': 'string', 'status_description': 'string', 'subject': 'string',
'tag': 'string', 'ticket_id': 'string', 'time': 'string', 'time_submitted': 'string',
'transport': 'string', 'transport_dest_port': 'string', 'type': 'string', 'uri': 'string',
'uri_path': 'string', 'uri_query': 'string', 'url': 'string', 'url_domain': 'string',
'url_length': 'string', 'user': 'string', 'user_agent': 'string', 'user_bunit': 'string',
'user_category': 'string', 'user_id': 'string', 'user_priority': 'string', 'user_role': 'string',
'user_type': 'string', 'vendor_account': 'string', 'vendor_product': 'string', 'vlan': 'string',
'xdelay': 'string', 'xref': 'string'
}
DRILLDOWN = {
'Drilldown': {
'action': 'string', 'app': 'string', 'authentication_method': 'string',
'authentication_service': 'string', 'bugtraq': 'string', 'bytes': 'string',
'bytes_in': 'string', 'bytes_out': 'string', 'category': 'string', 'cert': 'string',
'change': 'string', 'change_type': 'string', 'command': 'string', 'comments': 'string',
'cookie': 'string', 'creation_time': 'string', 'cve': 'string', 'cvss': 'string',
'date': 'string', 'description': 'string', 'dest': 'string', 'dest_bunit': 'string',
'dest_category': 'string', 'dest_dns': 'string', 'dest_interface': 'string',
'dest_ip': 'string', 'dest_ip_range': 'string', 'dest_mac': 'string',
'dest_nt_domain': 'string', 'dest_nt_host': 'string', 'dest_port': 'string',
'dest_priority': 'string', 'dest_translated_ip': 'string',
'dest_translated_port': 'string', 'dest_type': 'string', 'dest_zone': 'string',
'direction': 'string', 'dlp_type': 'string', 'dns': 'string', 'duration': 'string',
'dvc': 'string', 'dvc_bunit': 'string', 'dvc_category': 'string', 'dvc_ip': 'string',
'dvc_mac': 'string', 'dvc_priority': 'string', 'dvc_zone': 'string',
'file_hash': 'string', 'file_name': 'string', 'file_path': 'string',
'file_size': 'string', 'http_content_type': 'string', 'http_method': 'string',
'http_referrer': 'string', 'http_referrer_domain': 'string', 'http_user_agent': 'string',
'icmp_code': 'string', 'icmp_type': 'string', 'id': 'string', 'ids_type': 'string',
'incident': 'string', 'ip': 'string', 'mac': 'string', 'message_id': 'string',
'message_info': 'string', 'message_priority': 'string', 'message_type': 'string',
'mitre_technique_id': 'string', 'msft': 'string', 'mskb': 'string', 'name': 'string',
'orig_dest': 'string', 'orig_recipient': 'string', 'orig_src': 'string', 'os': 'string',
'packets': 'string', 'packets_in': 'string', 'packets_out': 'string',
'parent_process': 'string', 'parent_process_id': 'string',
'parent_process_name': 'string', 'parent_process_path': 'string', 'password': 'string',
'payload': 'string', 'payload_type': 'string', 'priority': 'string', 'problem': 'string',
'process': 'string', 'process_hash': 'string', 'process_id': 'string',
'process_name': 'string', 'process_path': 'string', 'product_version': 'string',
'protocol': 'string', 'protocol_version': 'string', 'query': 'string',
'query_count': 'string', 'query_type': 'string', 'reason': 'string',
'recipient': 'string', 'recipient_count': 'string', 'recipient_domain': 'string',
'recipient_status': 'string', 'record_type': 'string', 'registry_hive': 'string',
'registry_key_name': 'string', 'registry_path': 'string',
'registry_value_data': 'string', 'registry_value_name': 'string',
'registry_value_text': 'string', 'registry_value_type': 'string',
'request_payload': 'string', 'request_payload_type': 'string',
'request_sent_time': 'string', 'response_code': 'string',
'response_payload_type': 'string', 'response_received_time': 'string',
'response_time': 'string', 'result': 'string', 'return_addr': 'string', 'rule': 'string',
'rule_action': 'string', 'sender': 'string', 'service': 'string',
'service_hash': 'string', 'service_id': 'string', 'service_name': 'string',
'service_path': 'string', 'session_id': 'string', 'sessions': 'string',
'severity': 'string', 'severity_id': 'string', 'sid': 'string', 'signature': 'string',
'signature_id': 'string', 'signature_version': 'string', 'site': 'string',
'size': 'string', 'source': 'string', 'sourcetype': 'string', 'src': 'string',
'src_bunit': 'string', 'src_category': 'string', 'src_dns': 'string',
'src_interface': 'string', 'src_ip': 'string', 'src_ip_range': 'string',
'src_mac': 'string', 'src_nt_domain': 'string', 'src_nt_host': 'string',
'src_port': 'string', 'src_priority': 'string', 'src_translated_ip': 'string',
'src_translated_port': 'string', 'src_type': 'string', 'src_user': 'string',
'src_user_bunit': 'string', 'src_user_category': 'string', 'src_user_domain': 'string',
'src_user_id': 'string', 'src_user_priority': 'string', 'src_user_role': 'string',
'src_user_type': 'string', 'src_zone': 'string', 'state': 'string', 'status': 'string',
'status_code': 'string', 'subject': 'string', 'tag': 'string', 'ticket_id': 'string',
'time': 'string', 'time_submitted': 'string', 'transport': 'string',
'transport_dest_port': 'string', 'type': 'string', 'uri': 'string', 'uri_path': 'string',
'uri_query': 'string', 'url': 'string', 'url_domain': 'string', 'url_length': 'string',
'user': 'string', 'user_agent': 'string', 'user_bunit': 'string',
'user_category': 'string', 'user_id': 'string', 'user_priority': 'string',
'user_role': 'string', 'user_type': 'string', 'vendor_account': 'string',
'vendor_product': 'string', 'vlan': 'string', 'xdelay': 'string', 'xref': 'string'
}
}
ASSET = {
'Asset': {
'asset': 'string', 'asset_id': 'string', 'asset_tag': 'string', 'bunit': 'string',
'category': 'string', 'city': 'string', 'country': 'string', 'dns': 'string',
'ip': 'string', 'is_expected': 'string', 'lat': 'string', 'long': 'string', 'mac': 'string',
'nt_host': 'string', 'owner': 'string', 'pci_domain': 'string', 'priority': 'string',
'requires_av': 'string'
}
}
IDENTITY = {
'Identity': {
'bunit': 'string', 'category': 'string', 'email': 'string', 'endDate': 'string', 'first': 'string',
'identity': 'string', 'identity_tag': 'string', 'last': 'string', 'managedBy': 'string',
'nick': 'string', 'phone': 'string', 'prefix': 'string', 'priority': 'string',
'startDate': 'string', 'suffix': 'string', 'watchlist': 'string', 'work_city': 'string',
'work_lat': 'string', 'work_long': 'string'
}
}
def test_get_cim_mapping_field_command(mocker):
""" Scenario: When the mapping is based on Splunk CIM. """
mocker.patch.object(demisto, 'results')
splunk.get_cim_mapping_field_command()
fields = demisto.results.call_args[0][0]
assert demisto.results.call_count == 1
assert fields == {
'Notable Data': NOTABLE,
'Drilldown Data': DRILLDOWN,
'Asset Data': ASSET,
'Identity Data': IDENTITY
}
def test_build_search_human_readable(mocker):
"""
Given:
table headers in query
When:
building a human readable table as part of splunk-search
Then:
Test headers are calculated correctly:
* comma-separated, space-separated
* support commas and spaces inside header values (if surrounded with parenthesis)
* rename headers
"""
func_patch = mocker.patch('SplunkPy.update_headers_from_field_names')
results = [
{'ID': 1, 'Header with space': 'h1', 'header3': 1, 'header_without_space': '1234',
'old_header_1': '1', 'old_header_2': '2'},
{'ID': 2, 'Header with space': 'h2', 'header3': 2, 'header_without_space': '1234',
'old_header_1': '1', 'old_header_2': '2'},
]
args = {
'query': 'something | table ID "Header with space" header3 header_without_space '
'comma,separated "Single,Header,with,Commas" old_header_1 old_header_2 | something else'
' | rename old_header_1 AS new_header_1 old_header_2 AS new_header_2'
}
expected_headers = ['ID', 'Header with space', 'header3', 'header_without_space',
'comma', 'separated', 'Single,Header,with,Commas', 'new_header_1', 'new_header_2']
splunk.build_search_human_readable(args, results)
headers = func_patch.call_args[0][1]
assert headers == expected_headers
def test_build_search_human_readable_multi_table_in_query(mocker):
"""
Given:
multiple table headers in query
When:
building a human readable table as part of splunk-search
Then:
Test headers are calculated correctly:
* all expected header exist without duplications
"""
args = {"query": " table header_1, header_2 | stats state_1, state_2 | table header_1, header_2, header_3, header_4"}
results = [
{'header_1': 'val_1', 'header_2': 'val_2', 'header_3': 'val_3', 'header_4': 'val_4'},
]
expected_headers_hr = "|header_1|header_2|header_3|header_4|\n|---|---|---|---|"
hr = splunk.build_search_human_readable(args, results)
assert expected_headers_hr in hr
@pytest.mark.parametrize('polling', [False, True])
def test_build_search_kwargs(polling):
"""
Given:
The splunk-search command args.
When:
Running the build_search_kwargs to build the search query kwargs.
Then:
Ensure the query kwargs as expected.
"""
args = {'earliest_time': '2021-11-23T10:10:10', 'latest_time': '2021-11-23T10:10:20', 'app': 'test_app', 'polling': polling}
kwargs_normalsearch = splunk.build_search_kwargs(args, polling)
for field in args:
if field == 'polling':
assert 'exec_mode' in kwargs_normalsearch
if polling:
assert kwargs_normalsearch['exec_mode'] == 'normal'
else:
assert kwargs_normalsearch['exec_mode'] == 'blocking'
else:
assert field in kwargs_normalsearch
@pytest.mark.parametrize('polling,status', [
(False, 'DONE'), (True, 'DONE'), (True, 'RUNNING')
])
def test_splunk_search_command(mocker, polling, status):
"""
Given:
A search query with args.
When:
Running the splunk_search_command with and without polling.
Then:
Ensure the result as expected in polling and in regular search.
"""
mocker.patch.object(demisto, 'args', return_value={'query': 'query', 'earliest_time': '2021-11-23T10:10:10',
'latest_time': '2020-10-20T10:10:20', 'app': 'test_app',
'polling': polling})
mocker.patch.object(ScheduledCommand, 'raise_error_if_not_supported')
search_result = splunk.splunk_search_command(Service(status))
if search_result.scheduled_command:
assert search_result.outputs['Status'] == status
assert search_result.scheduled_command._args['sid'] == '123456'
else:
assert search_result.outputs['Splunk.Result'] == []
assert search_result.readable_output == '### Splunk Search results for query: query\n**No entries.**\n'
@pytest.mark.parametrize(
argnames='credentials',
argvalues=[{'username': 'test', 'password': 'test'}, {'splunkToken': 'token', 'password': 'test'}]
)
def test_module_test(mocker, credentials):
"""
Given:
- Credentials for connecting Splunk
When:
- Run test-module command
Then:
- Validate the info method was called
"""
# prepare
mocker.patch.object(client.Service, 'info')
mocker.patch.object(client.Service, 'login')
service = client.Service(**credentials)
# run
splunk.test_module(service)
# validate
assert service.info.call_count == 1
@pytest.mark.parametrize(
argnames='credentials',
argvalues=[{'username': 'test', 'password': 'test'}, {'splunkToken': 'token', 'password': 'test'}]
)
def test_module__exception_raised(mocker, credentials):
"""
Given:
- AuthenticationError was occurred
When:
- Run test-module command
Then:
- Validate the expected message was returned
"""
# prepare
def exception_raiser():
raise AuthenticationError()
mocker.patch.object(AuthenticationError, '__init__', return_value=None)
mocker.patch.object(client.Service, 'info', side_effect=exception_raiser)
mocker.patch.object(client.Service, 'login')
return_error_mock = mocker.patch(RETURN_ERROR_TARGET)
service = client.Service(**credentials)
# run
splunk.test_module(service)
# validate
assert return_error_mock.call_args[0][0] == 'Authentication error, please validate your credentials.'
def test_module_hec_url(mocker):
"""
Given:
- hec_url was is in params
When:
- Run test-module command
Then:
- Validate taht the request.get was called with the expected args
"""
# prepare
mocker.patch.object(demisto, 'params', return_value={'hec_url': 'test_hec_url'})
mocker.patch.object(client.Service, 'info')
mocker.patch.object(client.Service, 'login')
mocker.patch.object(requests, 'get')
service = client.Service(username='test', password='test')
# run
splunk.test_module(service)
# validate
assert requests.get.call_args[0][0] == 'test_hec_url/services/collector/health'
def test_labels_with_non_str_values(mocker):
"""
Given:
- Raw response with values in _raw that stored as dict or list
When:
- Fetch incidents
Then:
- Validate the Labels created in the incident are well formatted to avoid server errors on json.Unmarshal
"""
# prepare
raw = {
"message": "Authentication of user via Radius",
"actor_obj": {
"id": "test",
"type": "User",
"alternateId": "test",
"displayName": "test"
},
"actor_list": [{
"id": "test",
"type": "User",
"alternateId": "test",
"displayName": "test"
}],
"actor_tuple": ("id", "test"),
"num_val": 100,
"bool_val": False,
"float_val": 100.0
}
mocked_response = SAMPLE_RESPONSE[0].copy()
mocked_response['_raw'] = json.dumps(raw)
mock_last_run = {'time': '2018-10-24T14:13:20'}
mock_params = {'fetchQuery': "something", "parseNotableEventsRaw": True}
mocker.patch.object(demisto, 'incidents')
mocker.patch.object(demisto, 'setLastRun')
mocker.patch('demistomock.getLastRun', return_value=mock_last_run)
mocker.patch('demistomock.params', return_value=mock_params)
mocker.patch('splunklib.results.ResultsReader', return_value=[mocked_response])
# run
service = mocker.patch('splunklib.client.connect', return_value=None)
splunk.fetch_incidents(service)
incidents = demisto.incidents.call_args[0][0]
# validate
assert demisto.incidents.call_count == 1
assert len(incidents) == 1
labels = incidents[0]["labels"]
assert len(labels) >= 7
assert all(isinstance(label['value'], str) for label in labels)
def test_empty_string_as_app_param_value(mocker):
"""
Given:
- A mock to demisto.params that contains an 'app' key with an empty string as its value
When:
- Run splunk.get_connection_args() function
Then:
- Validate that the value of the 'app' key in connection_args is '-'
"""
# prepare
mock_params = {'app': '', 'host': '111', 'port': '111'}
mocker.patch('demistomock.params', return_value=mock_params)
# run
connection_args = splunk.get_connection_args()
# validate
assert connection_args.get('app') == '-'
| 44.517832 | 128 | 0.630083 |
e33d270ed45489733873eeb659d8bf5c570d9960 | 8,281 | py | Python | pkg/workloads/cortex/lib/type/predictor.py | zouyee/cortex | dce5ca19ac3f177ef90afe8095f6f6c4d9664e2f | [
"Apache-2.0"
] | 1 | 2020-05-06T17:47:31.000Z | 2020-05-06T17:47:31.000Z | pkg/workloads/cortex/lib/type/predictor.py | zouyee/cortex | dce5ca19ac3f177ef90afe8095f6f6c4d9664e2f | [
"Apache-2.0"
] | null | null | null | pkg/workloads/cortex/lib/type/predictor.py | zouyee/cortex | dce5ca19ac3f177ef90afe8095f6f6c4d9664e2f | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import imp
import inspect
import dill
from cortex.lib.log import refresh_logger, cx_logger
from cortex.lib.exceptions import CortexException, UserException, UserRuntimeException
class Predictor:
def __init__(self, provider, cache_dir, **kwargs):
self.provider = provider
self.type = kwargs["type"]
self.path = kwargs["path"]
self.model = kwargs.get("model")
self.python_path = kwargs.get("python_path")
self.config = kwargs.get("config", {})
self.env = kwargs.get("env")
self.signature_key = kwargs.get("signature_key")
self.cache_dir = cache_dir
def initialize_client(self, model_dir=None, tf_serving_host=None, tf_serving_port=None):
if self.type == "onnx":
from cortex.lib.client.onnx import ONNXClient
model_path = os.path.join(model_dir, os.path.basename(self.model))
client = ONNXClient(model_path)
cx_logger().info("ONNX model signature: {}".format(client.input_signature))
return client
elif self.type == "tensorflow":
from cortex.lib.client.tensorflow import TensorFlowClient
tf_serving_address = tf_serving_host + ":" + tf_serving_port
validate_model_dir(model_dir)
client = TensorFlowClient(tf_serving_address, self.signature_key)
cx_logger().info("TensorFlow model signature: {}".format(client.input_signature))
return client
return None
def initialize_impl(self, project_dir, client=None):
class_impl = self.class_impl(project_dir)
try:
if self.type == "python":
return class_impl(self.config)
else:
return class_impl(client, self.config)
except Exception as e:
raise UserRuntimeException(self.path, "__init__", str(e)) from e
finally:
refresh_logger()
def class_impl(self, project_dir):
if self.type == "tensorflow":
target_class_name = "TensorFlowPredictor"
validations = TENSORFLOW_CLASS_VALIDATION
elif self.type == "onnx":
target_class_name = "ONNXPredictor"
validations = ONNX_CLASS_VALIDATION
elif self.type == "python":
target_class_name = "PythonPredictor"
validations = PYTHON_CLASS_VALIDATION
try:
impl = self._load_module("cortex_predictor", os.path.join(project_dir, self.path))
except CortexException as e:
e.wrap("error in " + self.path)
raise
finally:
refresh_logger()
try:
classes = inspect.getmembers(impl, inspect.isclass)
predictor_class = None
for class_df in classes:
if class_df[0] == target_class_name:
if predictor_class is not None:
raise UserException(
"multiple definitions for {} class found; please check your imports and class definitions and ensure that there is only one Predictor class definition".format(
target_class_name
)
)
predictor_class = class_df[1]
if predictor_class is None:
raise UserException("{} class is not defined".format(target_class_name))
_validate_impl(predictor_class, validations)
except CortexException as e:
e.wrap("error in " + self.path)
raise
return predictor_class
def _load_module(self, module_name, impl_path):
if impl_path.endswith(".pickle"):
try:
impl = imp.new_module(module_name)
with open(impl_path, "rb") as pickle_file:
pickled_dict = dill.load(pickle_file)
for key in pickled_dict:
setattr(impl, key, pickled_dict[key])
except Exception as e:
raise UserException("unable to load pickle", str(e)) from e
else:
try:
impl = imp.load_source(module_name, impl_path)
except Exception as e:
raise UserException(str(e)) from e
return impl
PYTHON_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "args": ["self", "config"]},
{"name": "predict", "args": ["self", "payload"]},
]
}
TENSORFLOW_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "args": ["self", "tensorflow_client", "config"]},
{"name": "predict", "args": ["self", "payload"]},
]
}
ONNX_CLASS_VALIDATION = {
"required": [
{"name": "__init__", "args": ["self", "onnx_client", "config"]},
{"name": "predict", "args": ["self", "payload"]},
]
}
def _validate_impl(impl, impl_req):
for optional_func in impl_req.get("optional", []):
_validate_optional_fn_args(impl, optional_func["name"], optional_func["args"])
for required_func in impl_req.get("required", []):
_validate_required_fn_args(impl, required_func["name"], required_func["args"])
def _validate_optional_fn_args(impl, fn_name, args):
if fn_name in vars(impl):
_validate_required_fn_args(impl, fn_name, args)
def _validate_required_fn_args(impl, fn_name, args):
fn = getattr(impl, fn_name, None)
if not fn:
raise UserException('required function "{}" is not defined'.format(fn_name))
if not callable(fn):
raise UserException('"{}" is defined, but is not a function'.format(fn_name))
argspec = inspect.getfullargspec(fn)
if argspec.args != args:
raise UserException(
'invalid signature for function "{}": expected arguments ({}) but found ({})'.format(
fn_name, ", ".join(args), ", ".join(argspec.args)
)
)
tf_expected_dir_structure = """tensorflow model directories must have the following structure:
1523423423/ (version prefix, usually a timestamp)
├── saved_model.pb
└── variables/
├── variables.index
├── variables.data-00000-of-00003
├── variables.data-00001-of-00003
└── variables.data-00002-of-...`"""
def validate_model_dir(model_dir):
version = None
for file_name in os.listdir(model_dir):
if file_name.isdigit():
version = file_name
break
if version is None:
cx_logger().error(tf_expected_dir_structure)
raise UserException("no top-level version folder found")
if not os.path.isdir(os.path.join(model_dir, version)):
cx_logger().error(tf_expected_dir_structure)
raise UserException("no top-level version folder found")
if not os.path.isfile(os.path.join(model_dir, version, "saved_model.pb")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "saved_model.pb" file')
if not os.path.isdir(os.path.join(model_dir, version, "variables")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "variables" directory')
if not os.path.isfile(os.path.join(model_dir, version, "variables", "variables.index")):
cx_logger().error(tf_expected_dir_structure)
raise UserException('expected a "variables/variables.index" file')
for file_name in os.listdir(os.path.join(model_dir, version, "variables")):
if file_name.startswith("variables.data-00000-of"):
return
cx_logger().error(tf_expected_dir_structure)
raise UserException(
'expected at least one variables data file, starting with "variables.data-00000-of-"'
)
| 36.480176 | 187 | 0.627943 |
c18efc3e76bd2161452048bc131866ffd840d037 | 5,893 | py | Python | blender/arm/assets.py | MarketGarden/armory | 75c6121deb21f0bddc60eeef52e21206db9f825f | [
"Zlib"
] | 1 | 2021-03-17T05:51:45.000Z | 2021-03-17T05:51:45.000Z | blender/arm/assets.py | MarketGarden/armory | 75c6121deb21f0bddc60eeef52e21206db9f825f | [
"Zlib"
] | 1 | 2019-12-13T08:16:20.000Z | 2019-12-13T08:16:20.000Z | blender/arm/assets.py | MarketGarden/armory | 75c6121deb21f0bddc60eeef52e21206db9f825f | [
"Zlib"
] | null | null | null | import shutil
import os
import stat
import bpy
import arm.utils
assets = []
reserved_names = ['return.']
khafile_params = []
khafile_defs = []
khafile_defs_last = []
embedded_data = []
shaders = []
shaders_last = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_passes_assets = {}
shader_cons = {}
def reset():
global assets
global khafile_params
global khafile_defs
global khafile_defs_last
global embedded_data
global shaders
global shaders_last
global shaders_external
global shader_datas
global shader_passes
global shader_cons
assets = []
khafile_params = []
khafile_defs_last = khafile_defs
khafile_defs = []
embedded_data = []
shaders_last = shaders
shaders = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_cons = {}
shader_cons['mesh_vert'] = []
shader_cons['depth_vert'] = []
shader_cons['depth_frag'] = []
shader_cons['voxel_vert'] = []
shader_cons['voxel_frag'] = []
shader_cons['voxel_geom'] = []
def add(file):
global assets
if file in assets:
return
base = os.path.basename(file)
for f in assets:
if f.endswith(base):
print('Armory Warning: Asset name "{0}" already exists, skipping'.format(base))
return
assets.append(file)
# Reserved file name
for f in reserved_names:
if f in file:
print('Armory Warning: File "{0}" contains reserved keyword, this will break C++ builds!'.format(file))
def add_khafile_def(d):
global khafile_defs
if d not in khafile_defs:
khafile_defs.append(d)
def add_khafile_param(p):
global khafile_params
if p not in khafile_params:
khafile_params.append(p)
def add_embedded_data(file):
global embedded_data
if file not in embedded_data:
embedded_data.append(file)
def add_shader(file):
global shaders
global shaders_last
if file not in shaders:
shaders.append(file)
def add_shader_data(file):
global shader_datas
if file not in shader_datas:
shader_datas.append(file)
def add_shader_pass(data_name):
global shader_passes
# Shader data for passes are written into single shader_datas.arm file
add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm')
if data_name not in shader_passes:
shader_passes.append(data_name)
def add_shader_external(file):
global shaders_external
shaders_external.append(file)
name = file.split('/')[-1].split('\\')[-1]
add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name)
invalidate_enabled = True # Disable invalidating during build process
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def invalidate_shader_cache(self, context):
# compiled.inc changed, recompile all shaders next time
global invalidate_enabled
if invalidate_enabled == False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Shaders'):
shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/html5-resources'):
shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/krom-resources'):
shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/krom-resources'):
shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/windows-resources'):
shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/linux-resources'):
shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/osx-resources'):
shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly)
def invalidate_compiled_data(self, context):
global invalidate_enabled
if invalidate_enabled == False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled'):
shutil.rmtree(fp + '/compiled', onerror=remove_readonly)
def invalidate_mesh_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/meshes'):
shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly)
def invalidate_envmap_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/envmaps'):
shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly)
def invalidate_unpacked_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/unpacked'):
shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly)
def invalidate_mesh_cache(self, context):
if context.object == None or context.object.data == None:
return
context.object.data.arm_cached = False
def invalidate_instance_cache(self, context):
if context.object == None or context.object.data == None:
return
invalidate_mesh_cache(self, context)
for slot in context.object.material_slots:
slot.material.arm_cached = False
def invalidate_compiler_cache(self, context):
bpy.data.worlds['Arm'].arm_recompile = True
def shader_equal(sh, ar, shtype):
# Merge equal shaders
for e in ar:
if sh.is_equal(e):
sh.context.data[shtype] = e.context.data[shtype]
sh.is_linked = True
return
ar.append(sh)
def vs_equal(c, ar):
shader_equal(c.vert, ar, 'vertex_shader')
def fs_equal(c, ar):
shader_equal(c.frag, ar, 'fragment_shader')
def gs_equal(c, ar):
shader_equal(c.geom, ar, 'geometry_shader')
def tcs_equal(c, ar):
shader_equal(c.tesc, ar, 'tesscontrol_shader')
def tes_equal(c, ar):
shader_equal(c.tese, ar, 'tesseval_shader')
| 30.533679 | 115 | 0.682335 |
7ada9f4fd40db752ef8b64726efd503d363f6fb6 | 703 | py | Python | app/core/models.py | johangenis/api_example | fe2c048411c93f162619aa8aed4ea26e11345692 | [
"MIT"
] | null | null | null | app/core/models.py | johangenis/api_example | fe2c048411c93f162619aa8aed4ea26e11345692 | [
"MIT"
] | null | null | null | app/core/models.py | johangenis/api_example | fe2c048411c93f162619aa8aed4ea26e11345692 | [
"MIT"
] | null | null | null | from django.db import models
class Beer(models.Model):
"""Beers rated in the app."""
name = models.CharField(max_length=255, blank=False, unique=True)
ibu = models.IntegerField(default=55)
calories = models.FloatField(max_length=5, default=0)
abv = models.FloatField(max_length=3, default=0)
style = models.CharField(max_length=50, default="Bitter")
brewery_location = models.CharField(max_length=50, default="Some Brewery")
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def __str__(self):
"""Return a human readable representation of the model instance."""
return "f{(self.name)}"
| 37 | 78 | 0.715505 |
4f3c3cabceab4a00f14096e45b5ec178ffc8f9b1 | 7,245 | py | Python | src/dreader.py | huazhengwang/AdaMRC | 267b45ebfd1f1802987b38e99bdb1d1938f469c4 | [
"BSD-3-Clause"
] | 7 | 2020-02-19T21:26:45.000Z | 2021-07-14T09:44:50.000Z | src/dreader.py | huazhengwang/AdaMRC | 267b45ebfd1f1802987b38e99bdb1d1938f469c4 | [
"BSD-3-Clause"
] | 3 | 2021-01-09T16:13:32.000Z | 2021-05-26T08:31:17.000Z | src/dreader.py | huazhengwang/AdaMRC | 267b45ebfd1f1802987b38e99bdb1d1938f469c4 | [
"BSD-3-Clause"
] | 2 | 2020-03-09T08:24:29.000Z | 2020-05-21T03:10:54.000Z | '''
SAN model
Created October, 2017
Author: xiaodl@microsoft.com
'''
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from .recurrent import OneLayerBRNN, ContextualEmbed
from .dropout_wrapper import DropoutWrapper
from .encoder import LexiconEncoder
from .similarity import DeepAttentionWrapper, FlatSimilarityWrapper, SelfAttnWrapper
from .similarity import AttentionWrapper
from .san import SAN
from .discriminator import Discriminator
class DNetwork(nn.Module):
"""Network for SAN doc reader."""
def __init__(self, opt, embedding=None, padding_idx=0):
super(DNetwork, self).__init__()
my_dropout = DropoutWrapper(opt['dropout_p'], opt['vb_dropout'])
self.dropout = my_dropout
self.lexicon_encoder = LexiconEncoder(opt, embedding=embedding, dropout=my_dropout)
query_input_size = self.lexicon_encoder.query_input_size
doc_input_size = self.lexicon_encoder.doc_input_size
covec_size = self.lexicon_encoder.covec_size
embedding_size = self.lexicon_encoder.embedding_dim
# share net
contextual_share = opt.get('contextual_encoder_share', False)
prefix = 'contextual'
# doc_hidden_size
self.doc_encoder_low = OneLayerBRNN(doc_input_size + covec_size, opt['contextual_hidden_size'], prefix=prefix, opt=opt, dropout=my_dropout)
self.doc_encoder_high = OneLayerBRNN(self.doc_encoder_low.output_size + covec_size, opt['contextual_hidden_size'], prefix=prefix, opt=opt, dropout=my_dropout)
if contextual_share:
self.query_encoder_low = self.doc_encoder_low
self.query_encoder_high = self.doc_encoder_high
else:
self.query_encoder_low = OneLayerBRNN(query_input_size + covec_size, opt['contextual_hidden_size'], prefix=prefix, opt=opt, dropout=my_dropout)
self.query_encoder_high = OneLayerBRNN(self.query_encoder_low.output_size + covec_size, opt['contextual_hidden_size'], prefix=prefix, opt=opt, dropout=my_dropout)
doc_hidden_size = self.doc_encoder_low.output_size + self.doc_encoder_high.output_size
query_hidden_size = self.query_encoder_low.output_size + self.query_encoder_high.output_size
self.query_understand = OneLayerBRNN(query_hidden_size, opt['msum_hidden_size'], prefix='msum', opt=opt, dropout=my_dropout)
doc_attn_size = doc_hidden_size + covec_size + embedding_size
query_attn_size = query_hidden_size + covec_size + embedding_size
num_layers = 3
prefix = 'deep_att'
self.deep_attn = DeepAttentionWrapper(doc_attn_size, query_attn_size, num_layers, prefix, opt, my_dropout)
doc_und_size = doc_hidden_size + query_hidden_size + self.query_understand.output_size
self.doc_understand = OneLayerBRNN(doc_und_size, opt['msum_hidden_size'], prefix='msum', opt=opt, dropout=my_dropout)
query_mem_hidden_size = self.query_understand.output_size
doc_mem_hidden_size = self.doc_understand.output_size
if opt['self_attention_on']:
att_size = embedding_size + covec_size + doc_hidden_size + query_hidden_size + self.query_understand.output_size + self.doc_understand.output_size
self.doc_self_attn = AttentionWrapper(att_size, att_size, prefix='self_att', opt=opt, dropout=my_dropout)
doc_mem_hidden_size = doc_mem_hidden_size * 2
self.doc_mem_gen = OneLayerBRNN(doc_mem_hidden_size, opt['msum_hidden_size'], 'msum', opt, my_dropout)
doc_mem_hidden_size = self.doc_mem_gen.output_size
# Question merging
self.query_sum_attn = SelfAttnWrapper(query_mem_hidden_size, prefix='query_sum', opt=opt, dropout=my_dropout)
self.decoder = SAN(doc_mem_hidden_size, query_mem_hidden_size, opt, prefix='decoder', dropout=my_dropout)
self.opt = opt
self.discriminator = Discriminator(opt)
def forward(self, batch_list, name_map, grad_scale=4):
doc_input, query_input,\
doc_emb, query_emb,\
doc_cove_low, doc_cove_high,\
query_cove_low, query_cove_high,\
doc_mask, query_mask = self.lexicon_encoder(batch_list, name_map)
query_list, doc_list = [], []
query_list.append(query_input)
doc_list.append(doc_input)
# doc encode
doc_low = self.doc_encoder_low(torch.cat([doc_input, doc_cove_low], 2), doc_mask)
doc_low = self.dropout(doc_low)
doc_high = self.doc_encoder_high(torch.cat([doc_low, doc_cove_high], 2), doc_mask)
doc_high = self.dropout(doc_high)
# query
query_low = self.query_encoder_low(torch.cat([query_input, query_cove_low], 2), query_mask)
query_low = self.dropout(query_low)
query_high = self.query_encoder_high(torch.cat([query_low, query_cove_high], 2), query_mask)
query_high = self.dropout(query_high)
query_mem_hiddens = self.query_understand(torch.cat([query_low, query_high], 2), query_mask)
query_mem_hiddens = self.dropout(query_mem_hiddens)
query_list = [query_low, query_high, query_mem_hiddens]
doc_list = [doc_low, doc_high]
query_att_input = torch.cat([query_emb, query_cove_high, query_low, query_high], 2)
doc_att_input = torch.cat([doc_emb, doc_cove_high] + doc_list, 2)
doc_attn_hiddens = self.deep_attn(doc_att_input, query_att_input, query_list, query_mask)
doc_attn_hiddens = self.dropout(doc_attn_hiddens)
doc_mem_hiddens = self.doc_understand(torch.cat([doc_attn_hiddens] + doc_list, 2), doc_mask)
doc_mem_hiddens = self.dropout(doc_mem_hiddens)
doc_mem_inputs = torch.cat([doc_attn_hiddens] + doc_list, 2)
if self.opt['self_attention_on']:
doc_att = torch.cat([doc_mem_inputs, doc_mem_hiddens, doc_cove_high, doc_emb], 2)
doc_self_hiddens = self.doc_self_attn(doc_att, doc_att, doc_mask, x3=doc_mem_hiddens)
doc_mem = self.doc_mem_gen(torch.cat([doc_mem_hiddens, doc_self_hiddens], 2), doc_mask)
else:
doc_mem = doc_mem_hiddens
query_mem = self.query_sum_attn(query_mem_hiddens, query_mask)
# print (doc_mem.size(), query_mem.size())
start_scores, end_scores = self.decoder(doc_mem, query_mem, doc_mask)
# doc_query_mem = torch.cat([doc_mem, query_mem], 2)
# import pdb; pdb.set_trace()
if self.opt['no_adv']:
disc_out = None
adv_norm = [0]
else:
# disc_out, adv_norm = self.discriminator(doc_mem, query_mem, grad_scale)
if self.opt['disc_input_type'] == 1: #concatenate query and doc sequence
disc_out, adv_norm = self.discriminator(doc_mem, torch.cat([query_mem.unsqueeze(1), doc_mem], 1), doc_mask, grad_scale)
elif self.opt['disc_input_type'] == 2: #only doc sequence
disc_out, adv_norm = self.discriminator(doc_mem, doc_mem, doc_mask, grad_scale)
elif self.opt['disc_input_type'] == 3:
disc_out, adv_norm = self.discriminator(doc_mem, doc_mem, doc_mask, grad_scale)
# import pdb; pdb.set_trace()
return start_scores, end_scores, disc_out, adv_norm
| 51.75 | 174 | 0.712077 |
efba6eac9a1aea95bf2bef2b71640012c461b76d | 11,664 | py | Python | test/onnx-model-zoo/CheckONNXModelZoo.py | mmoldawsky/onnx-mlir | cb0a12147c38800b04e1036fe835f1b5376e1a12 | [
"Apache-2.0"
] | null | null | null | test/onnx-model-zoo/CheckONNXModelZoo.py | mmoldawsky/onnx-mlir | cb0a12147c38800b04e1036fe835f1b5376e1a12 | [
"Apache-2.0"
] | 1 | 2020-05-19T18:21:52.000Z | 2020-05-19T18:21:52.000Z | test/onnx-model-zoo/CheckONNXModelZoo.py | mmoldawsky/onnx-mlir | cb0a12147c38800b04e1036fe835f1b5376e1a12 | [
"Apache-2.0"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
##################### CheckONNXModelZoo.py #####################################
#
# Copyright 2022 The IBM Research Authors.
#
################################################################################
#
# This script is used to check models in https://github.com/onnx/models.
#
################################################################################
import os
import sys
import argparse
import subprocess
import tempfile
import difflib
from joblib import Parallel, delayed
"""
Note:
- This script must be invoked from the root folder of https://github.com/onnx/models.
- This script will call RunONNXModel.py. Make sure to put RunONNXModel.py and this script in the same folder.
- Environment variable ONNX_MLIR_HOME is needed to find onnx-mlir.
- By default, the script checks all models in the model zoo.
- Use `-m model_name` to check one model, or directly edit `models_to_run` to check a list of selected models.
Example:
$ git clone https://github.com/onnx/models
$ cd models
$ ln -s /onnx_mlir/test/onnx-model/test/onnx-model-zoo/CheckONNXModelZoo.py CheckONNXModelZoo.py
$ ln -s /onnx_mlir/utils/RunONNXModel.py RunONNXModel.py
$ VERBOSE=1 ONNX_MLIR_HOME=/onnx-mlir/build/Release/ python CheckONNXModelZoo.py -pull-models -m mnist-8 -compile_args="-O3 -mcpu=z14"
"""
if (not os.environ.get('ONNX_MLIR_HOME', None)):
raise RuntimeError(
"Environment variable ONNX_MLIR_HOME is not set, please set it to the path to "
"the HOME directory for onnx-mlir. The HOME directory for onnx-mlir refers to "
"the parent folder containing the bin, lib, etc. sub-folders in which ONNX-MLIR "
"executables and libraries can be found.")
"""
VERBOSE values:
- 0: turn off
- 1: user information
- 2: (user + command) information
"""
VERBOSE = int(os.environ.get('VERBOSE', 0))
def log_l1(*args):
if (VERBOSE >= 1):
print(' '.join(args))
def log_l2(*args):
if (VERBOSE >= 2):
print(' '.join(args))
"""Commands will be called in this script.
"""
FIND_MODEL_PATHS_CMD = ['find', '.', '-type', 'f', '-name', '*.tar.gz']
# git lfs pull --include="${onnx_model}" --exclude=""
PULL_CMD = ['git', 'lfs', 'pull', '--exclude=\"\"']
# git lfs pointer --file = "${onnx_model}" > ${onnx_model}.pt
CLEAN_CMD = ['git', 'lfs', 'pointer']
# git checkout file_path
CHECKOUT_CMD = ['git', 'checkout']
# tar -xzvf file.tar.gz
UNTAR_CMD = ['tar', '-xzvf']
RM_CMD = ['rm']
MV_CMD = ['mv']
# Compile, run and verify an onnx model.
RUN_ONNX_MODEL = ['python', 'RunONNXModel.py']
def execute_commands(cmds):
log_l2(' '.join(cmds))
out = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = out.communicate()
if stderr:
return (False, stderr.decode("utf-8"))
else:
return (True, stdout.decode("utf-8"))
def execute_commands_to_file(cmds, ofile):
log_l2(' '.join(cmds))
with open(ofile, 'w') as output:
server = subprocess.Popen(cmds,
stdout=output,
stderr=subprocess.STDOUT)
stdout, stderr = server.communicate()
# Deprecated models according to: https://github.com/onnx/models/pull/389
deprecated_models = {
"mnist-1",
"bvlcalexnet-3",
"caffenet-3",
"densenet-3",
"inception-v1-3",
"inception-v2-3",
"rcnn-ilsvrc13-3",
"resnet50-caffe2-v1-3",
"shufflenet-3",
"zfnet512-3",
"vgg19-caffe2-3",
"emotion-ferplus-2",
}
# States
NO_TEST = 0
TEST_FAILED = 1
TEST_PASSED = 2
def obtain_all_model_paths():
_, model_paths = execute_commands(FIND_MODEL_PATHS_CMD)
model_paths = model_paths.split('\n')
# Remove empty paths and prune '._' in a path.
model_paths = [path[2:] for path in model_paths if path]
model_names = [
path.split('/')[-1][:-len(".tag.gz")] for path in model_paths
] # remove .tag.gz
deprecated_names = set(model_names).intersection(deprecated_models)
log_l1('\n')
deprecated_msg = ""
if (len(deprecated_names) != 0):
deprecated_msg = "where " + \
str(len(deprecated_names)) + \
" models are deprecated (using very old opsets, e.g. <= 3)"
log_l1("# There are {} models in the ONNX model zoo {}".format(
len(model_paths), deprecated_msg))
log_l1("See https://github.com/onnx/models/pull/389",
"for a list of deprecated models\n")
return model_names, model_paths
def check_model(model_path, model_name, compile_args):
passed = NO_TEST
with tempfile.TemporaryDirectory() as tmpdir:
# untar
log_l1('Extracting the .tag.gz to {}'.format(tmpdir))
execute_commands(UNTAR_CMD + [model_path, '-C', tmpdir])
_, onnx_files = execute_commands(
['find', tmpdir, '-type', 'f', '-name', '*.onnx'])
# log_l1(onnx_files)
# temporary folder's structure:
# - model.onnx
# - test_data_set_0
# - test_data_set_1
# - test_data_set_2
# - ...
# - test_data_set_n
# Check .onnx file.
if (len(onnx_files) == 0):
log_l1("There is no .onnx file for this model. Ignored.")
return NO_TEST
onnx_file = onnx_files.split('\n')[0]
# Check data sets.
has_data_sets = False
_, data_sets = execute_commands(
['find', tmpdir, '-type', 'd', '-name', 'test_data_set*'])
if (len(data_sets) > 0):
has_data_sets = True
data_set = data_sets.split('\n')[0]
else:
# if there is no `test_data_set` subfolder, find a folder containing .pb files.
_, pb_files = execute_commands(
['find', tmpdir, '-name', '*.pb', '-printf', '%h\n'])
if (len(pb_files) > 0):
has_data_sets = True
data_set = pb_files.split('\n')[0]
if (not has_data_sets):
log_l1("Warning: This model does not have test data sets.")
# compile, run and verify.
log_l1("Checking the model {} ...".format(model_name))
compile_options = "--compile_args=" + (compile_args
if compile_args else "-O3")
options = [compile_options]
if has_data_sets:
options += ['--verify=ref']
options += ['--data_folder={}'.format(data_set)]
ok, msg = execute_commands(RUN_ONNX_MODEL + [onnx_file] + options)
state = TEST_PASSED if ok else TEST_FAILED
log_l1(msg)
return state
def pull_and_check_model(model_path, compile_args, pull_models, keep_model):
state = NO_TEST
# Ignore deprecated models.
model_name = model_path.split('/')[-1][:-len(".tag.gz")] # remove .tag.gz
if model_name in deprecated_models:
log_l1("The model {} is deprecated. Ignored.".format(model_name))
return state, model_name
# pull the model.
if pull_models:
log_l1('Downloading {}'.format(model_path))
pull_cmd = PULL_CMD + ['--include={}'.format(model_path)]
ok, _ = execute_commands(pull_cmd)
if not ok:
log_l1("Failed to pull the model {}. Ignored.".format(model_name))
# check the model.
state = check_model(model_path, model_name, compile_args)
if pull_models and (not keep_model):
# remove the model to save the storage space.
clean_cmd = CLEAN_CMD + ['--file={}'.format(model_path)]
execute_commands_to_file(clean_cmd, '{}.pt'.format(model_path))
execute_commands(RM_CMD + [model_path])
execute_commands(MV_CMD + ['{}.pt'.format(model_path), model_path])
execute_commands(CHECKOUT_CMD + [model_path])
return state, model_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'-m',
'-model',
metavar='model_name',
help="Only process a single model in the ONNX model zoo."
" Passing the name of the model, e.g. mnist-8."
" Use -p to know model names.")
parser.add_argument('-p',
'-print_model_paths',
action='store_true',
help="Only print model paths in the model zoo.")
parser.add_argument('-k',
'-keep_pulled_models',
action='store_true',
help="Keep the pulled models")
parser.add_argument('-a',
'-assertion',
action='store_true',
help="Raise assertion if there are failed models")
parser.add_argument(
'-compile_args',
help="Options passing to onnx-mlir to compile a model.")
parallel_group = parser.add_mutually_exclusive_group()
parallel_group.add_argument(
'-njobs',
type=int,
default=1,
help="The number of processes in parallel."
" The large -njobs is, the more disk space is needed"
" for downloaded onnx models. Default 1.")
parallel_group.add_argument(
'-pull_models',
action='store_true',
help="Pull models from the remote git repository."
" This requires git-lfs. Please follow the instruction here to install"
" git-lfs: https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage."
)
args = parser.parse_args()
# Collect all model paths in the model zoo
all_model_names, all_model_paths = obtain_all_model_paths()
if (args.p):
for path in all_model_paths:
print(path)
return
# By default, run all models in the model zoo.
# But, if `-s` is specified, only run a model if given.
models_to_run = all_model_names
# If we would like to run with models of interest only, set models_to_run.
# models_to_run = ['mnist-8', 'yolov4', 'resnet50-v2-7']
if (args.m):
models_to_run = [args.m]
target_model_paths = []
for name in models_to_run:
if name not in all_model_names:
print(
"Model", args.m,
"not found. Do you mean one of the following? ",
difflib.get_close_matches(name, all_model_names,
len(all_model_names)))
return
target_model_paths += [m for m in all_model_paths if name in m]
# Start processing the models.
results = Parallel(n_jobs=args.njobs,
verbose=1)(delayed(pull_and_check_model)(
path, args.compile_args, args.pull_models, args.k)
for path in target_model_paths)
# Report the results.
tested_models = [r[1] for r in results if r[0] != NO_TEST]
print("{} models tested: {}\n".format(len(tested_models),
', '.join(tested_models)))
passed_models = [r[1] for r in results if r[0] == TEST_PASSED]
print("{} models passed: {}\n".format(len(passed_models),
', '.join(passed_models)))
if len(passed_models) != len(tested_models):
failed_models = [r[1] for r in results if r[0] == TEST_FAILED]
msg = "{} model failed: {}\n".format(len(failed_models),
', '.join(failed_models))
if args.assertion:
raise AssertionError(msg)
else:
print(msg)
if __name__ == "__main__":
main()
| 36.111455 | 138 | 0.587363 |
7a33485b261cc2d48328e27016e59409ede49d22 | 9,312 | py | Python | scripts/hatebase_api/hatebase.py | ysenarath/hate-detection-icsc-2020 | 9bd802209c7df982d63179e53628a89b14915446 | [
"MIT"
] | 2 | 2020-06-25T05:13:22.000Z | 2020-06-25T05:54:10.000Z | scripts/hatebase_api/hatebase.py | ysenarath/hate-detection-icsc-2020 | 9bd802209c7df982d63179e53628a89b14915446 | [
"MIT"
] | null | null | null | scripts/hatebase_api/hatebase.py | ysenarath/hate-detection-icsc-2020 | 9bd802209c7df982d63179e53628a89b14915446 | [
"MIT"
] | null | null | null | import json
import os
from os.path import join
from typing import Text, Iterable
import numpy as np
import pandas as pd
from hatebase import HatebaseAPI
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelBinarizer
from tklearn.datasets import get_data_home
__all__ = [
'download_hatebase',
'load_hatebase',
'HatebaseVectorizer',
]
def download_hatebase(token=None, resource_home=None):
""" Downloads hatebase to the resource folder. Perquisite required: `hatebase`.
Parameters
----------
token
resource_home
Returns
-------
null
Nothing
"""
if token is None:
key = input('Please enter your api key for https://hatebase.org/: ')
else:
key = token
hatebase = HatebaseAPI({"key": key})
filters = {"language": "eng"}
# initialize list for all vocabulary entry dictionaries
en_vocab = {}
response = hatebase.getVocabulary(filters=filters, format='json')
pages = response["number_of_pages"]
# fill the vocabulary list with all entries of all pages
# this might take some time...
for page in range(1, pages + 1):
filters["page"] = str(page)
response = hatebase.getVocabulary(filters=filters, format='json')
results = response["result"]
for result in results:
en_vocab[result['term']] = result
# Save file in the path
if resource_home:
resource_path = join(resource_home, 'hatebase_vocab_en.json')
else:
directory = join(get_data_home(), '..', 'resource')
if not os.path.exists(directory):
os.makedirs(directory)
resource_path = join(directory, 'hatebase_vocab_en.json')
with open(resource_path, 'w', encoding='utf-8') as json_file:
json.dump(en_vocab, json_file)
# noinspection SpellCheckingInspection
def load_hatebase(resource_home=None):
if resource_home:
resource_path = join(resource_home, 'hatebase_vocab_en.json')
else:
resource_path = join(get_data_home(), '..', 'resource', 'hatebase_vocab_en.json')
with open(resource_path, 'r', encoding='utf-8') as json_file:
en_vocab = json.load(json_file)
return en_vocab
# noinspection SpellCheckingInspection,PyPep8Naming
class HatebaseVectorizer(TransformerMixin, BaseEstimator):
""""""
def __init__(self, features=None, tokenizer=None, resource_home=None):
self.features = features
self.tokenizer = tokenizer
self.resource_home = resource_home
# Initialize
if self.features is None:
self.features = ['term']
self.hatebase = load_hatebase(self.resource_home)
self.feature_vectors, self.index = self._prepare_features(self.hatebase, self.features)
self.dims = self.feature_vectors.shape[1]
self.tokenize = self.tokenizer if self.tokenizer else self._whitespace_tokenize
def fit(self, X, y=None, **kwargs):
""" Included for compatibility with the interface of `TransformerMixin`.
Parameters
----------
X
Input features.
y
Input labels.
kwargs
Returns
-------
self
`self`
"""
return self
def transform(self, X):
""" Extract features from the input array-like.
Parameters
----------
X
An array-like of sentences to extract Hatebase features.
Returns
-------
Hatebase features.
"""
features = [self._extract_features(x) for x in X]
return np.array(features)
def _preprocess(self, text: Text) -> pd.Series:
""" Preprocess and tokenize input text.
Parameters
----------
text
Input text.
Returns
-------
tokens
Preprocessed and tokenized sentences.
"""
for v in self.index:
if '_' in v:
text = text.replace(v.replace('_', ' '), v)
return pd.Series(self.tokenize(text))
def _extract_features(self, text: Text) -> np.ndarray:
""" Extracts features from Text input.
Parameters
----------
text
Input text.
Returns
-------
`pandas.DataFrame` of Features.
"""
tokens = self._preprocess(text)
feature_mtx = [np.zeros(self.dims)]
for v in tokens:
if v in self.index:
feature_mtx.append(self.feature_vectors.loc[self.index[v]].tolist())
return np.sum(feature_mtx, axis=0)
@classmethod
def _whitespace_tokenize(cls, text):
""" Default Tokenizer
Parameters
----------
text
Returns
-------
"""
return text.split(' ')
@classmethod
def _prepare_features(cls, dataset, features):
""" Prepares features for each term
Parameters
----------
dataset
Hatebase dataset.
features
Returns
-------
feature_map
Feature-map, and word index mapping.
"""
index = {r['term'].replace(' ', '_'): r['vocabulary_id'] for _, r in dataset.items()}
dataset = pd.DataFrame([v for (k, v) in dataset.items()], index=index)
dfs = []
if 'term' in features:
dfs.append(cls._count_vectorize(dataset.term, 'term_', dataset.vocabulary_id))
if 'hateful_meaning' in features:
dfs.append(cls._count_vectorize(dataset.hateful_meaning, 'hmeam_', dataset.vocabulary_id))
if 'nonhateful_meaning' in features:
dfs.append(cls._count_vectorize(dataset.nonhateful_meaning, 'nmeam_', dataset.vocabulary_id))
if 'is_unambiguous' in features:
dfs.append(cls._label_binarizer(dataset.is_unambiguous, 'iunmb_', dataset.vocabulary_id))
if 'is_unambiguous_in' in features:
dfs.append(cls._count_vectorize(dataset.is_unambiguous_in, 'unmbn_', dataset.vocabulary_id))
if 'average_offensiveness' in features:
dfs.append(cls._discretize(dataset.average_offensiveness, 'avgeff_', dataset.vocabulary_id))
if 'plural_of' in features:
dfs.append(cls._label_binarizer(dataset.plural_of, 'pluof_', dataset.vocabulary_id))
if 'variant_of' in features:
dfs.append(cls._label_binarizer(dataset.variant_of, 'varof_', dataset.vocabulary_id))
if 'transliteration_of' in features:
dfs.append(cls._label_binarizer(dataset.transliteration_of, 'traof_', dataset.vocabulary_id))
if 'is_about_nationality' in features:
dfs.append(cls._label_binarizer(dataset.is_about_nationality, 'abtnat_', dataset.vocabulary_id))
if 'is_about_ethnicity' in features:
dfs.append(cls._label_binarizer(dataset.is_about_ethnicity, 'abteth_', dataset.vocabulary_id))
if 'is_about_religion' in features:
dfs.append(cls._label_binarizer(dataset.is_about_religion, 'abtrel_', dataset.vocabulary_id))
if 'is_about_gender' in features:
dfs.append(cls._label_binarizer(dataset.is_about_gender, 'abtgen_', dataset.vocabulary_id))
if 'is_about_sexual_orientation' in features:
dfs.append(cls._label_binarizer(dataset.is_about_sexual_orientation, 'abtsex_', dataset.vocabulary_id))
if 'is_about_disability' in features:
dfs.append(cls._label_binarizer(dataset.is_about_disability, 'abtdis_', dataset.vocabulary_id))
if 'is_about_class' in features:
dfs.append(cls._label_binarizer(dataset.is_about_class, 'abtcls_', dataset.vocabulary_id))
if 'number_of_sightings' in features:
dfs.append(cls._label_binarizer(dataset.number_of_sightings, 'abtcls_', dataset.vocabulary_id))
return pd.concat(dfs, axis=1), index
@classmethod
def _discretize(cls, a: pd.Series, prefix: Text = '', index: Iterable = None) -> pd.DataFrame:
if index is None:
index = list(range(len(a)))
a = a.fillna(a.mean())
_, bin_edges = np.histogram(a, bins='fd')
data = np.digitize(a, bin_edges)
return cls._label_binarizer(pd.Series(data), prefix=prefix, index=index)
@classmethod
def _label_binarizer(cls, a: pd.Series, prefix: Text = '', index: Iterable = None) -> pd.DataFrame:
if index is None:
index = list(range(len(a)))
a = a.fillna('_NULL_')
ohe = LabelBinarizer()
data = ohe.fit_transform(a)
if data.shape[1] == 1:
columns = [prefix]
else:
columns = [prefix + str(s).lower() for s in ohe.classes_[:data.shape[1]]]
return pd.DataFrame(data, index=index, columns=columns)
@classmethod
def _count_vectorize(cls, a: pd.Series, prefix: Text = '', index: Iterable = None) -> pd.DataFrame:
if index is None:
index = list(range(len(a)))
a = a.fillna('')
cv = CountVectorizer(binary=True)
data = cv.fit_transform(a)
columns = [prefix + s for s in cv.get_feature_names()]
return pd.DataFrame(data.todense(), index=index, columns=columns)
| 35.406844 | 115 | 0.626396 |
32265e65ed52d8955a0683b908abe9faddd6ca41 | 3,589 | py | Python | lang/py/test/test_tether_task.py | gth828r/avro-python3 | c8379728b4a762d99262e526fb53f631076fb143 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | lang/py/test/test_tether_task.py | gth828r/avro-python3 | c8379728b4a762d99262e526fb53f631076fb143 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | lang/py/test/test_tether_task.py | gth828r/avro-python3 | c8379728b4a762d99262e526fb53f631076fb143 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import time
import unittest
import set_avro_test_path
class TestTetherTask(unittest.TestCase):
"""
TODO: We should validate the the server response by looking at stdout
"""
def test1(self):
"""
Test that the thether_task is working. We run the mock_tether_parent in a separate
subprocess
"""
from avro import tether
from avro import io as avio
from avro import schema
from avro.tether import HTTPRequestor,inputProtocol, find_port
import StringIO
import mock_tether_parent
from word_count_task import WordCountTask
task=WordCountTask()
proc=None
try:
# launch the server in a separate process
# env["AVRO_TETHER_OUTPUT_PORT"]=output_port
env=dict()
env["PYTHONPATH"]=':'.join(sys.path)
server_port=find_port()
pyfile=mock_tether_parent.__file__
proc=subprocess.Popen(["python", pyfile,"start_server","{0}".format(server_port)])
input_port=find_port()
print "Mock server started process pid={0}".format(proc.pid)
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
task.open(input_port,clientPort=server_port)
# TODO: We should validate that open worked by grabbing the STDOUT of the subproces
# and ensuring that it outputted the correct message.
#***************************************************************
# Test the mapper
task.configure(tether.TaskType.MAP,str(task.inschema),str(task.midschema))
# Serialize some data so we can send it to the input function
datum="This is a line of text"
writer = StringIO.StringIO()
encoder = avio.BinaryEncoder(writer)
datum_writer = avio.DatumWriter(task.inschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data=writer.read()
# Call input to simulate calling map
task.input(data,1)
# Test the reducer
task.configure(tether.TaskType.REDUCE,str(task.midschema),str(task.outschema))
# Serialize some data so we can send it to the input function
datum={"key":"word","value":2}
writer = StringIO.StringIO()
encoder = avio.BinaryEncoder(writer)
datum_writer = avio.DatumWriter(task.midschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data=writer.read()
# Call input to simulate calling reduce
task.input(data,1)
task.complete()
# try a status
task.status("Status message")
except Exception as e:
raise
finally:
# close the process
if not(proc is None):
proc.kill()
pass
if __name__ == '__main__':
unittest.main() | 30.939655 | 111 | 0.687378 |
dc6411e96a37c17c30490fb93b9466d5bc0d7489 | 434 | py | Python | scorecard/migrations/0004_auto_20201216_1653.py | Code4SA/municipal-data-api | 8b213b702245bc2ff1bab4bd160c4cd3b604d54f | [
"MIT"
] | 19 | 2018-01-09T10:54:15.000Z | 2022-01-25T13:10:55.000Z | scorecard/migrations/0004_auto_20201216_1653.py | Code4SA/municipal-data-api | 8b213b702245bc2ff1bab4bd160c4cd3b604d54f | [
"MIT"
] | 29 | 2018-01-12T12:12:38.000Z | 2022-01-31T15:30:36.000Z | scorecard/migrations/0004_auto_20201216_1653.py | Code4SA/municipal-data-api | 8b213b702245bc2ff1bab4bd160c4cd3b604d54f | [
"MIT"
] | 13 | 2018-02-11T02:12:57.000Z | 2021-11-22T11:03:22.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-12-16 14:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scorecard', '0003_geography_population'),
]
operations = [
migrations.AlterModelOptions(
name='geography',
options={'verbose_name_plural': 'Geographies'},
),
]
| 21.7 | 59 | 0.635945 |
ee37e43e86a0a076ea8131c929c1f8d37c0b1dbc | 12,085 | py | Python | Fisher_Iris_Script.py | eimearbutler7/Sem-1-Python-Project | 42306ac2bef920050b6a089e7c69c652c780b6dd | [
"Apache-2.0"
] | null | null | null | Fisher_Iris_Script.py | eimearbutler7/Sem-1-Python-Project | 42306ac2bef920050b6a089e7c69c652c780b6dd | [
"Apache-2.0"
] | null | null | null | Fisher_Iris_Script.py | eimearbutler7/Sem-1-Python-Project | 42306ac2bef920050b6a089e7c69c652c780b6dd | [
"Apache-2.0"
] | null | null | null | import statistics #ensures Python has all the capabilities from the Python Standard Library to identify and execute statistic functions e.g. statistics.mean()
import csv #ensures python has all the capabiliites from the Python Standard Library to open and work with csv files
import matplotlib.pyplot as plt #ensures Python has the capabilities from the Python Standard Library to create a Histogram
import matplotlib.mlab as mlab #ensures Python has the capabilities from the Python Standard Library to create a Histogram
print ("Fisher's Iris Data, Sem-1-Python-Project, Eimear Butler April 2018") #print title
print() #space
print ("Petal Length ", "Petal Width ", "Sepal Length ", "Sepal Width") #print row headers
with open ("iris.csv") as f:
for line in f:
print ("{:>7} {:>12} {:>13} {:>12}" .format(line.split(',')[0],line.split(',')[1],line.split(',')[2],line.split(',')[3]))
print() #print full list of data in clear format, removing commas and spacing nicely
z = "Petal Length" #row description, change as needed
r = 0 #row index number, chenge in accordance with description
y = [] #this will become the list of entries in the row selected above
with open('iris.csv') as f: # open iris file, week 5 GMIT lecture
for line in f:
iris = line.split(',')[r] # split out each line in iris file, week 5 GMIT lecture
y.append(iris) # put each entry from each row back into the y list as a string
x = [float(i) for i in y] #change the 'y' string list, into a floating number version list called 'x' https://stackoverflow.com/questions/1614236/in-python-how-do-i-convert-all-of-the-items-in-a-list-to-floats
#print(x) #optional to see final list 'x' containing floating number versions of the row from the csv file
a = statistics.mean(x) # Python Standard Library 9.7. statistics — Mathematical statistics functions
a = "%.3f" % a # round to the third decimal point https://stackoverflow.com/questions/455612/limiting-floats-to-two-decimal-points
b = statistics.median(x)
b = "%.1f" % b
c = max(x)
c = "%.1f" % c
d = min(x)
d = "%.1f" % d
e = statistics.stdev(x)
e = "%.3f" % e
f = statistics.variance(x)
f = "%.3f" % f
#print results of calculations in rows 28 - 39 in a suitable format
print("From Fisher's Iris Data, the %s has been interrogated as follows:" %(z,))
print("The mean of %s is: %s" %(z, a))
print("The median of %s is: %s" %(z, b))
print("The maximum of %s is: %s" %(z, c))
print("The minimum of %s is: %s" %(z, d))
print("The standard deviation of %s is: %s" %(z, e))
print("The standard variance of %s is: %s" %(z, f))
print()
print("A histogram (Figure %s) representing %s distribution has also printed using the matplotlib function" %(r+1, z))
print()
#create a normalised (normed=1) Hitogram of this atribute using:
# matplotlib imported functionality (see also row 3 and 4)
# # x = same x set created above mon line 24
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50 #number of bars in Histogram
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1) #ref: https://plot.ly/matplotlib/histogram facecolour = colour of bars , alpha = increasing in values of
plt.xlabel('%s' %(z)) #x axis name
plt.ylabel('Frequency') #y azis name
plt.title('Figure %s: Histogram of %s'%(r+1, z)) #Histogram Title
plt.axis([4, 8, 0, 1]) #X axis min and Max, Y axis min and max
y = mlab.normpdf(bins, mu, sigma) #create histogram data
plt.plot(bins, y, 'r--') #plot histogram
plt.show() #print plot
z = "Petal Width" #z now changes to next Iris atribute
r = 1 #new atribute is asociated with next row #all the rest of this script for Petal Width remains the same except the histogram parameter adjustments which were done manually to ensure best representation of data
y = []
with open('iris.csv') as f:
for line in f:
iris = line.split(',')[r]
y.append(iris)
x = [float(i) for i in y]
a = statistics.mean(x)
a = "%.3f" % a
b = statistics.median(x)
b = "%.1f" % b
c = max(x)
c = "%.1f" % c
d = min(x)
d = "%.1f" % d
e = statistics.stdev(x)
e = "%.3f" % e
f = statistics.variance(x)
f = "%.3f" % f
print("From Fisher's Iris Data, the %s has been interrogated as follows:" %(z,))
print("The mean of %s is: %s" %(z, a))
print("The median of %s is: %s" %(z, b))
print("The maximum of %s is: %s" %(z, c))
print("The minimum of %s is: %s" %(z, d))
print("The standard deviation of %s is: %s" %(z, e))
print("The standard variance of %s is: %s" %(z, f))
print()
print("A histogram (Figure %s) representing %s distribution has also printed using the matplotlib function" %(r+1, z))
print()
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %s: Histogram of %s'%(r+1, z))
plt.axis([2, 5, 0, 2]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
z = "Sepal Length" #z now changes to next Iris atribute
r = 2 #new atribute is asociated with next row #all the rest of this script for Petal Width remains the same except the histogram parameter adjustments which were done manually to ensure best representation of data
y = []
with open('iris.csv') as f:
for line in f:
iris = line.split(',')[r]
y.append(iris)
x = [float(i) for i in y]
a = statistics.mean(x)
a = "%.3f" % a
b = statistics.median(x)
b = "%.1f" % b
c = max(x)
c = "%.1f" % c
d = min(x)
d = "%.1f" % d
e = statistics.stdev(x)
e = "%.3f" % e
f = statistics.variance(x)
f = "%.3f" % f
print("From Fisher's Iris Data, the %s has been interrogated as follows:" %(z,))
print("The mean of %s is: %s" %(z, a))
print("The median of %s is: %s" %(z, b))
print("The maximum of %s is: %s" %(z, c))
print("The minimum of %s is: %s" %(z, d))
print("The standard deviation of %s is: %s" %(z, e))
print("The standard variance of %s is: %s" %(z, f))
print()
print("A histogram (Figure %s) representing %s distribution has also printed using the matplotlib function" %(r+1, z))
print()
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %s: Histogram of %s'%(r+1, z))
plt.axis([0, 7, 0, 1]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
#additional histograms created to separate Iris-versicolor and Iris-virginica data from Iris-setosa as from reviewing the first histogram for Sepal Length, it appears there may be 2 distinct curves
x = (x[50:]) #x now becomes just the data for Iris-versicolor and Iris-virginica i.e. data from index 50 to the end
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %sa: Histogram of %s for Iris-versicolor and Iris-virginica Only'%(r+1, z))
plt.axis([0, 7, 0, 1]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
x = (x[:49]) #x now becomes just the data for Iris-setosa i.e. the first 50 data points - from index 0 to 49
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %sb: Histogram of %s for Iris-setosa Only'%(r+1, z))
plt.axis([0, 7, 0, 4]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
print("Two further Histograms are also printed representing %s distribution for just Iris-versicolor and Iris-virginica (Figure %sa) and Iris-setosa (Figure %sb) " %(z, r+1, r+1))
print()
z = "Sepal Width" #z now changes to next Iris atribute
r = 3 #new atribute is asociated with next row #all the rest of this script for Petal Width remains the same except the histogram parameter adjustments which were done manually to ensure best representation of data
y = []
with open('iris.csv') as f:
for line in f:
iris = line.split(',')[r]
y.append(iris)
x = [float(i) for i in y]
a = statistics.mean(x)
a = "%.3f" % a
b = statistics.median(x)
b = "%.1f" % b
c = max(x)
c = "%.1f" % c
d = min(x)
d = "%.1f" % d
e = statistics.stdev(x)
e = "%.3f" % e
f = statistics.variance(x)
f = "%.3f" % f
print("From Fisher's Iris Data, the %s has been interrogated as follows:" %(z,))
print("The mean of %s is: %s" %(z, a))
print("The median of %s is: %s" %(z, b))
print("The maximum of %s is: %s" %(z, c))
print("The minimum of %s is: %s" %(z, d))
print("The standard deviation of %s is: %s" %(z, e))
print("The standard variance of %s is: %s" %(z, f))
print()
print("A histogram (Figure %s) representing %s distribution has also printed using the matplotlib function" %(r+1, z))
print()
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %s: Histogram of %s'%(r+1, z))
plt.axis([0, 3, 0, 4]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
#additional histograms created to separate Iris-versicolor and Iris-virginica data from Iris-setosa as from reviewing the first histogram for Sepal Width, it appears there may be 2 distinct curves
x = (x[50:]) #x now becomes just the data for Iris-versicolor and Iris-virginica i.e. data from index 50 to the end
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %sa: Histogram of %s for Iris-versicolor and Iris-virginica Only'%(r+1, z))
plt.axis([0, 3, 0, 5]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
x = (x[:49]) #x now becomes just the data for Iris-setosa i.e. the first 50 data points - from index 0 to 49
mu = statistics.mean(x)
sigma = statistics.stdev(x)
bins = 50
n, bins, patches = plt.hist(x, bins, normed=1, facecolor='g', alpha=0.1)
plt.xlabel('%s' %(z))
plt.ylabel('Frequency')
plt.title('Figure %sb: Histogram of %s for Iris-setosa Only'%(r+1, z))
plt.axis([0, 3, 0, 15]) #X axis min and Max, Y axis min and max have been MANUALLY adjusted to ensure best representation of data including if normal transformation was needed. I did try to use min (d) and max (c) but this did not work well enough
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.show()
print("Two further Histograms are also printed representing %s distribution for just Iris-versicolor and Iris-virginica (Figure %sa) and Iris-setosa (Figure %sb) " %(z, r+1, r+1))
print()
| 41.816609 | 247 | 0.681092 |
becc19a9977e2a4d05ed227f5daa776840eb9ecf | 1,542 | py | Python | libs/configs/kl_cfgs.py | Artcs1/PROBIOU | 86ba88b470452a66734257a0abb5e51ec7680df8 | [
"Apache-2.0"
] | 1 | 2021-11-19T14:41:35.000Z | 2021-11-19T14:41:35.000Z | libs/configs/kl_cfgs.py | Artcs1/PROBIOU | 86ba88b470452a66734257a0abb5e51ec7680df8 | [
"Apache-2.0"
] | null | null | null | libs/configs/kl_cfgs.py | Artcs1/PROBIOU | 86ba88b470452a66734257a0abb5e51ec7680df8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from libs.configs._base_.models.retinanet_r50_fpn import *
from libs.configs._base_.datasets.dota_detection import *
from libs.configs._base_.schedules.schedule_1x import *
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
# schedule
BATCH_SIZE = 1
GPU_GROUP = "0,1,2,3"
NUM_GPU = len(GPU_GROUP.strip().split(','))
LR = 1e-3
SAVE_WEIGHTS_INTE = 10000
DECAY_STEP = np.array(DECAY_EPOCH, np.int32) * SAVE_WEIGHTS_INTE
MAX_ITERATION = SAVE_WEIGHTS_INTE * MAX_EPOCH
WARM_SETP = int(WARM_EPOCH * SAVE_WEIGHTS_INTE)
# dataset
DATASET_NAME = 'HRSC2016'
IMG_SHORT_SIDE_LEN = 512
IMG_MAX_LENGTH = 512
CLASS_NUM = 1
# data augmentation
IMG_ROTATE = True
RGB2GRAY = True
VERTICAL_FLIP = True
HORIZONTAL_FLIP = True
IMAGE_PYRAMID = False
# model
pretrain_zoo = PretrainModelZoo()
PRETRAINED_CKPT = pretrain_zoo.pretrain_weight_path(NET_NAME, ROOT_PATH)
TRAINED_CKPT = os.path.join(ROOT_PATH, 'output/trained_weights')
# loss
CLS_WEIGHT = 1.0
REG_WEIGHT = 1.0
REG_LOSS_MODE = 11 # KLD loss
KL_TAU = 2.0
KL_FUNC = 0 # 0: sqrt 1: log
VERSION = 'RetinaNet_HRSC2016_KL_1x_20210204'
"""
RetinaNet-H + kl + sqrt tau=2
cls : ship|| Recall: 0.9617263843648208 || Precison: 0.6605145413870246|| AP: 0.8745101996155792
F1:0.9070372542296279 P:0.8965791567223548 R:0.9177524429967426
mAP is : 0.8745101996155792 444/444 [00:21<00:00, 21.04it/s]
87.45 87.03 86.72 85.45 76.60 72.39 50.65 27.68 3.91 0.17
"""
| 25.7 | 96 | 0.763294 |
6bc1edf75173fd1fbdf8babec43626a9aa7958f8 | 1,221 | py | Python | blogs/migrations/0001_initial.py | indoriyasboyz/Multipurpose_website | ffabe2316b070c4848b6b1d69aba2358abfca460 | [
"MIT"
] | null | null | null | blogs/migrations/0001_initial.py | indoriyasboyz/Multipurpose_website | ffabe2316b070c4848b6b1d69aba2358abfca460 | [
"MIT"
] | null | null | null | blogs/migrations/0001_initial.py | indoriyasboyz/Multipurpose_website | ffabe2316b070c4848b6b1d69aba2358abfca460 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-03-24 10:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blogtags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Blogs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=30)),
('Date', models.DateField(auto_now_add=True)),
('S_Description', models.TextField()),
('L_Description', models.TextField()),
('Author', models.CharField(max_length=50)),
('Image', models.ImageField(upload_to='pics')),
('Tags', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='blogs.Blogtags')),
],
),
]
| 33.916667 | 121 | 0.566749 |
ecd1a39d0441aacca5c37b41c5545b2fa192c04a | 509 | py | Python | core/group.py | yeti-threatintel/yeti | 9e8b76cd393f149c4990ead003902eac50c1766d | [
"Apache-2.0"
] | 1,250 | 2017-03-12T16:20:47.000Z | 2022-03-29T02:12:11.000Z | core/group.py | yeti-threatintel/yeti | 9e8b76cd393f149c4990ead003902eac50c1766d | [
"Apache-2.0"
] | 540 | 2017-03-20T16:45:35.000Z | 2022-03-22T16:55:02.000Z | core/group.py | yeti-threatintel/yeti | 9e8b76cd393f149c4990ead003902eac50c1766d | [
"Apache-2.0"
] | 293 | 2017-03-20T13:59:07.000Z | 2022-03-28T16:00:10.000Z | from __future__ import unicode_literals
from mongoengine import BooleanField, StringField, ListField, ReferenceField, CASCADE
from core.database import YetiDocument
from core.user import User
class Group(YetiDocument):
enabled = BooleanField(required=True, default=True)
groupname = StringField(required=True, unique=True)
members = ListField(ReferenceField(User, reverse_delete_rule=CASCADE))
admins = ListField(ReferenceField(User, reverse_delete_rule=CASCADE))
SEARCH_ALIASES = {}
| 31.8125 | 85 | 0.795678 |
5abe10fab17596c1721c43eb964c3daea2242dea | 2,099 | py | Python | clioude-server/orm.py | ccw630/CliOuDE | 743540c7c14d7f95455219efe8be01817e96f96b | [
"MIT"
] | 3 | 2020-07-22T02:17:23.000Z | 2021-03-10T12:48:46.000Z | orm.py | ccw630/CLIOUDE-Server | 4dc129e5bc57caead2df0107f02671d74c7808f1 | [
"MIT"
] | 5 | 2020-07-24T07:39:43.000Z | 2022-02-27T08:32:49.000Z | orm.py | ccw630/CLIOUDE-Server | 4dc129e5bc57caead2df0107f02671d74c7808f1 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine, Column, Text, Integer, Float, DateTime, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import datetime, timedelta
import os
engine = create_engine(os.getenv('DB_URL'))
Base = declarative_base()
session = sessionmaker(bind=engine)
db = session()
class Worker(Base):
__tablename__ = 'workers'
id = Column(Integer, primary_key=True)
hostname = Column(Text, nullable=False, index=True)
version = Column(Text, nullable=False)
cpu_core = Column(Integer, nullable=False)
memory_usage = Column(Float, nullable=False)
cpu_usage = Column(Float, nullable=False)
last_heartbeat = Column(DateTime, nullable=False)
create_time = Column(DateTime, nullable=False, default=datetime.now)
task_number = Column(Integer, nullable=False, default=0)
service_url = Column(Text, nullable=False)
@classmethod
def choose_worker(cls):
worker = db.query(cls).filter(cls.last_heartbeat + timedelta(seconds=6) >= datetime.now()).order_by(cls.cpu_usage).first()
if not worker:
return None
return worker
@classmethod
def upsert_worker(cls, hostname, version, cpu_core, memory_usage, cpu_usage, service_url):
try:
worker = db.query(cls).filter(cls.hostname == hostname).first()
if not worker:
worker = cls(hostname=hostname)
worker.version = version
worker.cpu_core = cpu_core
worker.memory_usage = memory_usage
worker.cpu_usage = cpu_usage
worker.service_url = service_url
worker.last_heartbeat = datetime.now()
db.add(worker)
db.commit()
except Exception as e:
db.rollback()
raise e
@classmethod
def cull_worker(cls):
try:
db.query(cls).filter(cls.last_heartbeat + timedelta(seconds=600) < datetime.now()).delete()
db.commit()
except Exception as e:
db.rollback()
raise e | 33.854839 | 130 | 0.655074 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.