id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8152111 | <gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# %% [markdown]
# # Process the raw mooring data
#
# Contents:
# * <a href=#raw>Raw data reprocessing.</a>
# * <a href=#corrected>Interpolated data processing.</a>
# * <a href=#ADCP>ADCP processing.</a>
# * <a href=#VMP>VMP processing.</a>
#
# Import the needed libraries.
# %%
import datetime
import glob
import os
import gsw
import numpy as np
import numpy.ma as ma
import scipy.integrate as igr
import scipy.interpolate as itpl
import scipy.io as io
import scipy.signal as sig
import seawater
import xarray as xr
from matplotlib import path
import munch
import load_data
import moorings as moo
import utils
from oceans.sw_extras import gamma_GP_from_SP_pt
# Data directory
data_in = os.path.expanduser("../data")
data_out = data_in
def esum(ea, eb):
return np.sqrt(ea ** 2 + eb ** 2)
def emult(a, b, ea, eb):
return np.abs(a * b) * np.sqrt((ea / a) ** 2 + (eb / b) ** 2)
# %% [markdown]
# <a id="raw"></a>
# %% [markdown]
# ## Process raw data into a more convenient format
#
# Parameters for raw processing.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
t_start = 734494.0
# Length of time series
max_len = N_data = 42048
# Data file
raw_data_file = "moorings.mat"
# Index where NaNs start in u and v data from SW mooring
sw_vel_nans = 14027
# Sampling period (minutes)
dt_min = 15.0
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 9
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# When bandpass filtering windowed data use these params multiplied by f and N
filtlo = 0.9 # times f
filthi = 1.1 # times N
# Interpolation distance that raises flag (m)
zimax = 100.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# %% ############### PROCESS RAW DATA #########################################
print("RAW DATA")
###############################################################################
# Load w data for cc mooring and chop from text files. I checked and all the
# data has the same start date and the same length
print("Loading vertical velocity data from text files.")
nortek_files = glob.glob(os.path.join(data_in, "cc_1_*.txt"))
depth = []
for file in nortek_files:
with open(file, "r") as f:
content = f.readlines()
depth.append(int(content[3].split("=")[1].split()[0]))
idxs = np.argsort(depth)
w = np.empty((42573, 12))
datenum = np.empty((42573, 12))
for i in idxs:
YY, MM, DD, hh, W = np.genfromtxt(
nortek_files[i], skip_header=12, usecols=(0, 1, 2, 3, 8), unpack=True
)
YY = YY.astype(int)
MM = MM.astype(int)
DD = DD.astype(int)
mm = (60 * (hh % 1)).astype(int)
hh = np.floor(hh).astype(int)
w[:, i] = W / 100
dates = []
for j in range(len(YY)):
dates.append(datetime.datetime(YY[j], MM[j], DD[j], hh[j], mm[j]))
dates = np.asarray(dates)
datenum[:, i] = utils.datetime_to_datenum(dates)
idx_start = np.searchsorted(datenum[:, 0], t_start)
w = w[idx_start : idx_start + max_len]
# Start prepping raw data from the mat file.
print("Loading raw data file.")
data_path = os.path.join(data_in, raw_data_file)
ds = utils.loadmat(data_path)
cc = ds.pop("c")
nw = ds.pop("nw")
ne = ds.pop("ne")
se = ds.pop("se")
sw = ds.pop("sw")
cc["id"] = "cc"
nw["id"] = "nw"
ne["id"] = "ne"
se["id"] = "se"
sw["id"] = "sw"
moorings = [cc, nw, ne, se, sw]
# Useful information
dt_min = 15.0 # Sample period in minutes.
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
print("Chopping time series.")
for m in moorings:
m["idx_start"] = np.searchsorted(m["Dates"], t_start)
for m in moorings:
m["N_data"] = max_len
m["idx_end"] = m["idx_start"] + max_len
# Chop data to start and end dates.
varl = ["Dates", "Temp", "Sal", "u", "v", "Pres"]
for m in moorings:
for var in varl:
m[var] = m[var][m["idx_start"] : m["idx_end"], ...]
print("Renaming variables.")
print("Interpolating negative pressures.")
for m in moorings:
__, N_levels = m["Pres"].shape
m["N_levels"] = N_levels
# Tile time and pressure
m["t"] = np.tile(m.pop("Dates")[:, np.newaxis], (1, N_levels))
# Fix negative pressures by interpolating nearby data.
fix = m["Pres"] < 0.0
if fix.any():
levs = np.argwhere(np.any(fix, axis=0))[0]
for lev in levs:
x = m["t"][fix[:, lev], lev]
xp = m["t"][~fix[:, lev], lev]
fp = m["Pres"][~fix[:, lev], lev]
m["Pres"][fix[:, lev], lev] = np.interp(x, xp, fp)
# Rename variables
m["P"] = m.pop("Pres")
m["u"] = m["u"] / 100.0
m["v"] = m["v"] / 100.0
m["spd"] = np.sqrt(m["u"] ** 2 + m["v"] ** 2)
m["angle"] = np.angle(m["u"] + 1j * m["v"])
m["Sal"][(m["Sal"] < 33.5) | (m["Sal"] > 34.9)] = np.nan
m["S"] = m.pop("Sal")
m["Temp"][m["Temp"] < -2.0] = np.nan
m["T"] = m.pop("Temp")
# Dimensional quantities.
m["f"] = gsw.f(m["lat"])
m["ll"] = np.array([m["lon"], m["lat"]])
m["z"] = gsw.z_from_p(m["P"], m["lat"])
# Estimate thermodynamic quantities.
m["SA"] = gsw.SA_from_SP(m["S"], m["P"], m["lon"], m["lat"])
m["CT"] = gsw.CT_from_t(m["SA"], m["T"], m["P"])
# specvol_anom = gsw.specvol_anom(m['SA'], m['CT'], m['P'])
# m['sva'] = specvol_anom
cc["wr"] = w
print("Calculating thermodynamics.")
print("Excluding bad data using T-S funnel.")
# Chuck out data outside of TS funnel sensible range.
funnel = np.genfromtxt("funnel.txt")
for m in moorings:
S = m["SA"].flatten()
T = m["CT"].flatten()
p = path.Path(funnel)
in_funnel = p.contains_points(np.vstack((S, T)).T)
fix = np.reshape(~in_funnel, m["SA"].shape)
m["in_funnel"] = ~fix
varl = ["S"]
if fix.any():
levs = np.squeeze(np.argwhere(np.any(fix, axis=0)))
for lev in levs:
x = m["t"][fix[:, lev], lev]
xp = m["t"][~fix[:, lev], lev]
for var in varl:
fp = m[var][~fix[:, lev], lev]
m[var][fix[:, lev], lev] = np.interp(x, xp, fp)
# Re-estimate thermodynamic quantities.
m["SA"] = gsw.SA_from_SP(m["S"], m["P"], m["lon"], m["lat"])
m["CT"] = gsw.CT_from_t(m["SA"], m["T"], m["P"])
print("Calculating neutral density.")
# Estimate the neutral density
for m in moorings:
# Compute potential temperature using the 1983 UNESCO EOS.
m["PT0"] = seawater.ptmp(m["S"], m["T"], m["P"])
# Flatten variables for analysis.
lons = m["lon"] * np.ones_like(m["P"])
lats = m["lat"] * np.ones_like(m["P"])
S_ = m["S"].flatten()
T_ = m["PT0"].flatten()
P_ = m["P"].flatten()
LO_ = lons.flatten()
LA_ = lats.flatten()
gamman = gamma_GP_from_SP_pt(S_, T_, P_, LO_, LA_)
m["gamman"] = np.reshape(gamman, m["P"].shape) + 1000.0
print("Calculating slice gradients at C.")
# Want gradient of density/vel to be local, no large central differences.
slices = [slice(0, 4), slice(4, 6), slice(6, 10), slice(10, 12)]
cc["dgdz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dTdz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dudz"] = np.empty((cc["N_data"], cc["N_levels"]))
cc["dvdz"] = np.empty((cc["N_data"], cc["N_levels"]))
for sl in slices:
z = cc["z"][:, sl]
g = cc["gamman"][:, sl]
T = cc["T"][:, sl]
u = cc["u"][:, sl]
v = cc["v"][:, sl]
cc["dgdz"][:, sl] = np.gradient(g, axis=1) / np.gradient(z, axis=1)
cc["dTdz"][:, sl] = np.gradient(T, axis=1) / np.gradient(z, axis=1)
cc["dudz"][:, sl] = np.gradient(u, axis=1) / np.gradient(z, axis=1)
cc["dvdz"][:, sl] = np.gradient(v, axis=1) / np.gradient(z, axis=1)
print("Filtering data.")
# Low pass filter data.
tc = tc_hrs * 60.0 * 60.0
fc = 1.0 / tc # Cut off frequency.
normal_cutoff = fc * dt_sec * 2.0 # Nyquist frequency is half 1/dt.
b, a = sig.butter(4, normal_cutoff, btype="lowpass")
varl = [
"z",
"P",
"S",
"T",
"u",
"v",
"wr",
"SA",
"CT",
"gamman",
"dgdz",
"dTdz",
"dudz",
"dvdz",
] # sva
for m in moorings:
for var in varl:
try:
data = m[var].copy()
except KeyError:
continue
m[var + "_m"] = np.nanmean(data, axis=0)
# For the purpose of filtering set fill with 0 rather than nan (SW)
nans = np.isnan(data)
if nans.any():
data[nans] = 0.0
datalo = sig.filtfilt(b, a, data, axis=0)
# Then put nans back...
if nans.any():
datalo[nans] = np.nan
namelo = var + "_lo"
m[namelo] = datalo
namehi = var + "_hi"
m[namehi] = m[var] - m[namelo]
m["spd_lo"] = np.sqrt(m["u_lo"] ** 2 + m["v_lo"] ** 2)
m["angle_lo"] = ma.angle(m["u_lo"] + 1j * m["v_lo"])
m["spd_hi"] = np.sqrt(m["u_hi"] ** 2 + m["v_hi"] ** 2)
m["angle_hi"] = ma.angle(m["u_hi"] + 1j * m["v_hi"])
# %% [markdown]
# Save the raw data.
# %% ##################### SAVE RAW DATA ######################################
io.savemat(os.path.join(data_out, "C_raw.mat"), cc)
io.savemat(os.path.join(data_out, "NW_raw.mat"), nw)
io.savemat(os.path.join(data_out, "NE_raw.mat"), ne)
io.savemat(os.path.join(data_out, "SE_raw.mat"), se)
io.savemat(os.path.join(data_out, "SW_raw.mat"), sw)
# %% [markdown]
# ## Create virtual mooring 'raw'.
# %%
print("VIRTUAL MOORING")
print("Determine maximum knockdown as a function of z.")
zms = np.hstack([m["z"].max(axis=0) for m in moorings if "se" not in m["id"]])
Dzs = np.hstack(
[m["z"].min(axis=0) - m["z"].max(axis=0) for m in moorings if "se" not in m["id"]]
)
zmax_pfit = np.polyfit(zms, Dzs, 2) # Second order polynomial for max knockdown
np.save(
os.path.join(data_out, "zmax_pfit"), np.polyfit(zms, Dzs, 2), allow_pickle=False
)
# Define the knockdown model:
def zmodel(u, zmax, zmax_pfit):
return zmax + np.polyval(zmax_pfit, zmax) * u ** 3
print("Load model data.")
mluv = xr.load_dataset("../data/mooring_locations_uv1.nc")
mluv = mluv.isel(
t=slice(0, np.argwhere(mluv.u[:, 0, 0].data == 0)[0][0])
) # Get rid of end zeros...
mluv = mluv.assign_coords(lon=mluv.lon)
mluv = mluv.assign_coords(id=["cc", "nw", "ne", "se", "sw"])
mluv["spd"] = (mluv.u ** 2 + mluv.v ** 2) ** 0.5
print("Create virtual mooring 'raw' dataset.")
savedict = {
"cc": {"id": "cc"},
"nw": {"id": "nw"},
"ne": {"id": "ne"},
"se": {"id": "se"},
"sw": {"id": "sw"},
}
mids = ["cc", "nw", "ne", "se", "sw"]
def nearidx(a, v):
return np.argmin(np.abs(np.asarray(a) - v))
for idx, mid in enumerate(mids):
savedict[mid]["lon"] = mluv.lon[idx].data
savedict[mid]["lat"] = mluv.lat[idx].data
izs = []
for i in range(moorings[idx]["N_levels"]):
izs.append(nearidx(mluv.z, moorings[idx]["z"][:, i].max()))
spdm = mluv.spd.isel(z=izs, index=idx).mean(dim="z")
spdn = spdm / spdm.max()
zmax = mluv.z[izs]
zk = zmodel(spdn.data[:, np.newaxis], zmax.data[np.newaxis, :], zmax_pfit)
savedict[mid]["z"] = zk
savedict[mid]["t"] = np.tile(
mluv.t.data[:, np.newaxis], (1, moorings[idx]["N_levels"])
)
fu = itpl.RectBivariateSpline(mluv.t.data, -mluv.z.data, mluv.u[..., idx].data)
fv = itpl.RectBivariateSpline(mluv.t.data, -mluv.z.data, mluv.v[..., idx].data)
uk = fu(mluv.t.data[:, np.newaxis], -zk, grid=False)
vk = fv(mluv.t.data[:, np.newaxis], -zk, grid=False)
savedict[mid]["u"] = uk
savedict[mid]["v"] = vk
io.savemat("../data/virtual_mooring_raw.mat", savedict)
# %% [markdown]
# ## Create virtual mooring 'interpolated'.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
# t_start = 734494.0
# Length of time series
# max_len = N_data = 42048
# Sampling period (minutes)
dt_min = 60.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 7
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# %%
moorings = utils.loadmat("../data/virtual_mooring_raw.mat")
cc = moorings.pop("cc")
nw = moorings.pop("nw")
ne = moorings.pop("ne")
se = moorings.pop("se")
sw = moorings.pop("sw")
moorings = [cc, nw, ne, se, sw]
N_data = cc["t"].shape[0]
# %% [markdown]
# Polynomial fits first.
# %%
print("**Generating corrected data**")
# Generate corrected moorings
z = np.concatenate([m["z"].flatten() for m in moorings])
u = np.concatenate([m["u"].flatten() for m in moorings])
v = np.concatenate([m["v"].flatten() for m in moorings])
print("Calculating polynomial coefficients.")
pzu = np.polyfit(z, u, 2)
pzv = np.polyfit(z, v, 2)
# %%
# Additional height in m to add to interpolation height.
hoffset = [-25.0, 50.0, -50.0, 100.0]
pi2 = np.pi * 2.0
nfft = nperseg
levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)]
Nclevels = len(levis)
spec_kwargs = {
"fs": 1.0 / dt_sec,
"window": window,
"nperseg": nperseg,
"nfft": nfft,
"detrend": detrend,
"axis": 0,
}
idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index
idx0 = idx1 - nperseg # Window start index
N_windows = len(idx0)
# Initialise the place holder dictionaries.
c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring
c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data
c4 = {"N_levels": Nclevels} # Dictionary for processed data
# Dictionaries for raw, windowed data from outer moorings
nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings5w = [nw5w, ne5w, se5w, sw5w]
# Dictionaries for processed, windowed data from outer moorings
nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings4w = [nw4w, ne4w, se4w, sw4w]
# Initialised the arrays of windowed data
varr = ["t", "z", "u", "v"]
for var in varr:
c12w[var] = np.zeros((nperseg, N_windows, 12))
var4 = [
"t",
"z",
"u",
"v",
"dudx",
"dvdx",
"dudy",
"dvdy",
"dudz",
"dvdz",
"nstrain",
"sstrain",
"vort",
"div",
]
for var in var4:
c4w[var] = np.zeros((nperseg, N_windows, Nclevels))
for var in var4:
c4[var] = np.zeros((N_windows, Nclevels))
# Initialised the arrays of windowed data for outer mooring
varro = ["z", "u", "v"]
for var in varro:
for m5w in moorings5w:
m5w[var] = np.zeros((nperseg, N_windows, 5))
var4o = ["z", "u", "v"]
for var in var4o:
for m4w in moorings4w:
m4w[var] = np.zeros((nperseg, N_windows, Nclevels))
# for var in var4o:
# for m4 in moorings4:
# m4[var] = np.zeros((N_windows, 4))
# Window the raw data.
for i in range(N_windows):
idx = idx0[i]
for var in varr:
c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :]
for i in range(N_windows):
idx = idx0[i]
for var in varro:
for m5w, m in zip(moorings5w, moorings[1:]):
m5w[var][:, i, :] = m[var][idx : idx + nperseg, :]
print("Interpolating properties.")
# Do the interpolation
for i in range(Nclevels):
# THIS hoffset is important!!!
c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i]
for j in range(N_windows):
zr = c12w["z"][:, j, levis[i]]
ur = c12w["u"][:, j, levis[i]]
vr = c12w["v"][:, j, levis[i]]
zi = c4["z"][j, i]
c4w["z"][:, j, i] = np.mean(zr, axis=-1)
c4w["t"][:, j, i] = c12w["t"][:, j, 0]
c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1)
dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1)
# Instead of mean, could moo.interp1d
c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1)
c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1)
for m5w, m4w in zip(moorings5w, moorings4w):
zr = m5w["z"][:, j, :]
ur = m5w["u"][:, j, :]
vr = m5w["v"][:, j, :]
m4w["z"][:, j, i] = np.full((nperseg), zi)
m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
print("Filtering windowed data.")
fcorcpd = np.abs(gsw.f(cc["lat"])) * 86400 / pi2
varl = ["u", "v"]
for var in varl:
c4w[var + "_lo"] = utils.butter_filter(
c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"]
varl = ["u", "v"]
for var in varl:
for m4w in moorings4w:
m4w[var + "_lo"] = utils.butter_filter(
m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"]
c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"]
print("Calculating horizontal gradients.")
# Calculate horizontal gradients
for j in range(N_windows):
ll = np.stack(
([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1
)
uv = np.stack(
(
[m4w["u_lo"][:, j, :] for m4w in moorings4w],
[m4w["v_lo"][:, j, :] for m4w in moorings4w],
),
axis=1,
)
dudx, dudy, dvdx, dvdy, vort, div = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
c4w["dudx"][:, j, :] = dudx
c4w["dudy"][:, j, :] = dudy
c4w["dvdx"][:, j, :] = dvdx
c4w["dvdy"][:, j, :] = dvdy
c4w["nstrain"][:, j, :] = nstrain
c4w["sstrain"][:, j, :] = sstrain
c4w["vort"][:, j, :] = vort
c4w["div"][:, j, :] = div
for var in var4:
if var == "z": # Keep z as modified by hoffset.
continue
c4[var] = np.mean(c4w[var], axis=0)
freq, c4w["Puu"] = sig.welch(c4w["u_hi"], **spec_kwargs)
_, c4w["Pvv"] = sig.welch(c4w["v_hi"], **spec_kwargs)
_, c4w["Cuv"] = sig.csd(c4w["u_hi"], c4w["v_hi"], **spec_kwargs)
c4w["freq"] = freq.copy()
# Get rid of annoying tiny values.
svarl = ["Puu", "Pvv", "Cuv"]
for var in svarl:
c4w[var][0, ...] = 0.0
c4[var + "_int"] = np.full((N_windows, 4), np.nan)
# Horizontal azimuth according to Jing 2018
c4w["theta"] = np.arctan2(2.0 * c4w["Cuv"].real, (c4w["Puu"] - c4w["Pvv"])) / 2
# Integration #############################################################
print("Integrating power spectra.")
for var in svarl:
c4w[var + "_cint"] = np.full_like(c4w[var], fill_value=np.nan)
fcor = np.abs(gsw.f(cc["lat"])) / pi2
N_freq = len(freq)
freq_ = np.tile(freq[:, np.newaxis, np.newaxis], (1, N_windows, Nclevels))
# ulim = fhi * np.tile(c4["N"][np.newaxis, ...], (N_freq, 1, 1)) / pi2
ulim = 1e9 # Set a huge upper limit since we don't know what N is...
llim = fcor * flo
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Puu", "Pvv", "Cuv"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Change lower integration limits for vertical components...
llim = fcor * flov
use = (freq_ < ulim) & (freq_ > llim)
# Usefull quantities
c4["nstress"] = c4["Puu_int"] - c4["Pvv_int"]
c4["sstress"] = -2.0 * c4["Cuv_int"]
c4["F_horiz"] = (
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]) * c4["nstrain"]
- c4["Cuv_int"] * c4["sstrain"]
)
# ## Now we have to create the model 'truth'...
#
# Load the model data and estimate some gradients.
print("Estimating smoothed gradients (slow).")
mluv = xr.load_dataset("../data/mooring_locations_uv1.nc")
mluv = mluv.isel(
t=slice(0, np.argwhere(mluv.u[:, 0, 0].data == 0)[0][0])
) # Get rid of end zeros...
mluv = mluv.assign_coords(lon=mluv.lon)
mluv = mluv.assign_coords(id=["cc", "nw", "ne", "se", "sw"])
mluv["dudz"] = (["t", "z", "index"], np.gradient(mluv.u, mluv.z, axis=1))
mluv["dvdz"] = (["t", "z", "index"], np.gradient(mluv.v, mluv.z, axis=1))
uv = np.rollaxis(np.stack((mluv.u, mluv.v))[..., 1:], 3, 0)
dudx, dudy, dvdx, dvdy, vort, div = moo.div_vort_4D(mluv.lon[1:], mluv.lat[1:], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
mluv["dudx"] = (["t", "z"], dudx)
mluv["dudy"] = (["t", "z"], dudy)
mluv["dvdx"] = (["t", "z"], dvdx)
mluv["dvdy"] = (["t", "z"], dvdy)
mluv["nstrain"] = (["t", "z"], nstrain)
mluv["sstrain"] = (["t", "z"], sstrain)
mluv["vort"] = (["t", "z"], vort)
mluv["div"] = (["t", "z"], div)
# Smooth the model data in an equivalent way to the real mooring.
dudxs = (
mluv.dudx.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdxs = (
mluv.dvdx.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dudys = (
mluv.dudy.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdys = (
mluv.dvdy.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
sstrains = (
mluv.sstrain.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
nstrains = (
mluv.nstrain.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
divs = (
mluv.div.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
vorts = (
mluv.vort.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dudzs = (
mluv.dudz.isel(index=0)
.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
dvdzs = (
mluv.dvdz.isel(index=0)
.rolling(t=nperseg, center=True)
.reduce(np.average, weights=sig.hann(nperseg))
.dropna("t")
)
# Make spline fits.
fdudx = itpl.RectBivariateSpline(dudxs.t.data, -dudxs.z.data, dudxs.data)
fdvdx = itpl.RectBivariateSpline(dvdxs.t.data, -dvdxs.z.data, dvdxs.data)
fdudy = itpl.RectBivariateSpline(dudys.t.data, -dudys.z.data, dudys.data)
fdvdy = itpl.RectBivariateSpline(dvdys.t.data, -dvdys.z.data, dvdys.data)
fsstrain = itpl.RectBivariateSpline(sstrains.t.data, -sstrains.z.data, sstrains.data)
fnstrain = itpl.RectBivariateSpline(nstrains.t.data, -nstrains.z.data, nstrains.data)
fdiv = itpl.RectBivariateSpline(divs.t.data, -divs.z.data, divs.data)
fvort = itpl.RectBivariateSpline(vorts.t.data, -vorts.z.data, vorts.data)
fdudz = itpl.RectBivariateSpline(dudzs.t.data, -dudzs.z.data, dudzs.data)
fdvdz = itpl.RectBivariateSpline(dvdzs.t.data, -dvdzs.z.data, dvdzs.data)
# Interpolate using splines.
dudxt = fdudx(c4["t"], -c4["z"], grid=False)
dvdxt = fdvdx(c4["t"], -c4["z"], grid=False)
dudyt = fdudy(c4["t"], -c4["z"], grid=False)
dvdyt = fdvdy(c4["t"], -c4["z"], grid=False)
sstraint = fsstrain(c4["t"], -c4["z"], grid=False)
nstraint = fnstrain(c4["t"], -c4["z"], grid=False)
divt = fdiv(c4["t"], -c4["z"], grid=False)
vortt = fvort(c4["t"], -c4["z"], grid=False)
dudzt = fdudz(c4["t"], -c4["z"], grid=False)
dvdzt = fdvdz(c4["t"], -c4["z"], grid=False)
c4["dudxt"] = dudxt
c4["dvdxt"] = dvdxt
c4["dudyt"] = dudyt
c4["dvdyt"] = dvdyt
c4["sstraint"] = sstraint
c4["nstraint"] = nstraint
c4["divt"] = divt
c4["vortt"] = vortt
c4["dudzt"] = dudzt
c4["dvdzt"] = dvdzt
# %%
# %% ########################## SAVE CORRECTED FILES ##########################
io.savemat("../data/virtual_mooring_interpolated.mat", c4)
io.savemat("../data/virtual_mooring_interpolated_windowed.mat", c4w)
# %% [markdown]
# Signal to noise ratios.
# %%
print("Estimating signal to noise ratios.")
M = munch.munchify(utils.loadmat('../data/virtual_mooring_interpolated.mat'))
# shear strain
dsstrain = M.sstrain - M.sstraint
SNR_sstrain = M.sstrain.var(axis=0)/dsstrain.var(axis=0)
np.save('../data/SNR_sstrain', SNR_sstrain, allow_pickle=False)
# normal strain
dnstrain = M.nstrain - M.nstraint
SNR_nstrain = M.nstrain.var(axis=0)/dnstrain.var(axis=0)
np.save('../data/SNR_nstrain', SNR_nstrain, allow_pickle=False)
# zonal shear
ddudz = M.dudz - M.dudzt
SNR_dudz = M.dvdz.var(axis=0)/ddudz.var(axis=0)
np.save('../data/SNR_dudz', SNR_dudz, allow_pickle=False)
# meridional shear
ddvdz = M.dvdz - M.dvdzt
SNR_dvdz = M.dvdz.var(axis=0)/ddvdz.var(axis=0)
np.save('../data/SNR_dvdz', SNR_dvdz, allow_pickle=False)
# divergence
ddiv = M.div - M.divt
SNR_nstrain = M.div.var(axis=0)/ddiv.var(axis=0)
np.save('../data/SNR_div', SNR_nstrain, allow_pickle=False)
# %% [markdown]
# <a id="corrected"></a>
# %% [markdown]
# ## Generate interpolated data.
#
# Set parameters again.
# %%
# Corrected levels.
# heights = [-540., -1250., -2100., -3500.]
# Filter cut off (hours)
tc_hrs = 40.0
# Start of time series (matlab datetime)
t_start = 734494.0
# Length of time series
max_len = N_data = 42048
# Data file
raw_data_file = "moorings.mat"
# Index where NaNs start in u and v data from SW mooring
sw_vel_nans = 14027
# Sampling period (minutes)
dt_min = 15.0
# Window length for wave stress quantities and mesoscale strain quantities.
nperseg = 2 ** 9
# Spectra parameters
window = "hanning"
detrend = "constant"
# Extrapolation/interpolation limit above which data will be removed.
dzlim = 100.0
# Integration of spectra parameters. These multiple N and f respectively to set
# the integration limits.
fhi = 1.0
flo = 1.0
flov = 1.0 # When integrating spectra involved in vertical fluxes, get rid of
# the near inertial portion.
# When bandpass filtering windowed data use these params multiplied by f and N
filtlo = 0.9 # times f
filthi = 1.1 # times N
# Interpolation distance that raises flag (m)
zimax = 100.0
dt_sec = dt_min * 60.0 # Sample period in seconds.
dt_day = dt_sec / 86400.0 # Sample period in days.
N_per_day = int(1.0 / dt_day) # Samples per day.
# %% [markdown]
# Polynomial fits first.
# %%
print("REAL MOORING INTERPOLATION")
print("**Generating corrected data**")
moorings = load_data.load_my_data()
cc, nw, ne, se, sw = moorings
# Generate corrected moorings
T = np.concatenate([m["T"].flatten() for m in moorings])
S = np.concatenate([m["S"].flatten() for m in moorings])
z = np.concatenate([m["z"].flatten() for m in moorings])
u = np.concatenate([m["u"].flatten() for m in moorings])
v = np.concatenate([m["v"].flatten() for m in moorings])
g = np.concatenate([m["gamman"].flatten() for m in moorings])
# SW problems...
nans = np.isnan(u) | np.isnan(v)
print("Calculating polynomial coefficients.")
pzT = np.polyfit(z[~nans], T[~nans], 3)
pzS = np.polyfit(z[~nans], S[~nans], 3)
pzg = np.polyfit(z[~nans], g[~nans], 3)
pzu = np.polyfit(z[~nans], u[~nans], 2)
pzv = np.polyfit(z[~nans], v[~nans], 2)
# %%
# Additional height in m to add to interpolation height.
hoffset = [-25.0, 50.0, -50.0, 100.0]
pi2 = np.pi * 2.0
nfft = nperseg
levis = [(0, 1, 2, 3), (4, 5), (6, 7, 8, 9), (10, 11)]
Nclevels = len(levis)
spec_kwargs = {
"fs": 1.0 / dt_sec,
"window": window,
"nperseg": nperseg,
"nfft": nfft,
"detrend": detrend,
"axis": 0,
}
idx1 = np.arange(nperseg, N_data, nperseg // 2) # Window end index
idx0 = idx1 - nperseg # Window start index
N_windows = len(idx0)
# Initialise the place holder dictionaries.
c12w = {"N_levels": 12} # Dictionary for raw, windowed data from central mooring
c4w = {"N_levels": Nclevels} # Dictionary for processed, windowed data
c4 = {"N_levels": Nclevels} # Dictionary for processed data
# Dictionaries for raw, windowed data from outer moorings
nw5w, ne5w, se5w, sw5w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings5w = [nw5w, ne5w, se5w, sw5w]
# Dictionaries for processed, windowed data from outer moorings
nw4w, ne4w, se4w, sw4w = {"id": "nw"}, {"id": "ne"}, {"id": "se"}, {"id": "sw"}
moorings4w = [nw4w, ne4w, se4w, sw4w]
# Initialised the arrays of windowed data
varr = ["t", "z", "u", "v", "gamman", "S", "T", "P"]
for var in varr:
c12w[var] = np.zeros((nperseg, N_windows, cc["N_levels"]))
var4 = [
"t",
"z",
"u",
"v",
"gamman",
"dudx",
"dvdx",
"dudy",
"dvdy",
"dudz",
"dvdz",
"dgdz",
"nstrain",
"sstrain",
"vort",
"N2",
]
for var in var4:
c4w[var] = np.zeros((nperseg, N_windows, Nclevels))
for var in var4:
c4[var] = np.zeros((N_windows, Nclevels))
# Initialised the arrays of windowed data for outer mooring
varro = ["z", "u", "v"]
for var in varro:
for m5w in moorings5w:
m5w[var] = np.zeros((nperseg, N_windows, 5))
var4o = ["z", "u", "v"]
for var in var4o:
for m4w in moorings4w:
m4w[var] = np.zeros((nperseg, N_windows, Nclevels))
# for var in var4o:
# for m4 in moorings4:
# m4[var] = np.zeros((N_windows, 4))
# Window the raw data.
for i in range(N_windows):
idx = idx0[i]
for var in varr:
c12w[var][:, i, :] = cc[var][idx : idx + nperseg, :]
for i in range(N_windows):
idx = idx0[i]
for var in varro:
for m5w, m in zip(moorings5w, moorings[1:]):
m5w[var][:, i, :] = m[var][idx : idx + nperseg, :]
c4["interp_far_flag"] = np.full_like(c4["u"], False, dtype=bool)
print("Interpolating properties.")
# Do the interpolation
for i in range(Nclevels):
# THIS hoffset is important!!!
c4["z"][:, i] = np.mean(c12w["z"][..., levis[i]], axis=(0, -1)) + hoffset[i]
for j in range(N_windows):
zr = c12w["z"][:, j, levis[i]]
ur = c12w["u"][:, j, levis[i]]
vr = c12w["v"][:, j, levis[i]]
gr = c12w["gamman"][:, j, levis[i]]
Sr = c12w["S"][:, j, levis[i]]
Tr = c12w["T"][:, j, levis[i]]
Pr = c12w["P"][:, j, levis[i]]
zi = c4["z"][j, i]
c4["interp_far_flag"][j, i] = np.any(np.min(np.abs(zr - zi), axis=-1) > zimax)
c4w["z"][:, j, i] = np.mean(zr, axis=-1)
c4w["t"][:, j, i] = c12w["t"][:, j, 0]
c4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
c4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
c4w["gamman"][:, j, i] = moo.interp_quantity(zr, gr, zi, pzg)
dudzr = np.gradient(ur, axis=-1) / np.gradient(zr, axis=-1)
dvdzr = np.gradient(vr, axis=-1) / np.gradient(zr, axis=-1)
dgdzr = np.gradient(gr, axis=-1) / np.gradient(zr, axis=-1)
N2 = seawater.bfrq(Sr.T, Tr.T, Pr.T, cc["lat"])[0].T
# Instead of mean, could moo.interp1d
c4w["dudz"][:, j, i] = np.mean(dudzr, axis=-1)
c4w["dvdz"][:, j, i] = np.mean(dvdzr, axis=-1)
c4w["dgdz"][:, j, i] = np.mean(dgdzr, axis=-1)
c4w["N2"][:, j, i] = np.mean(N2, axis=-1)
for m5w, m4w in zip(moorings5w, moorings4w):
if (m5w["id"] == "sw") & (
idx1[j] > sw_vel_nans
): # Skip this level because of NaNs
zr = m5w["z"][:, j, (0, 1, 3, 4)]
ur = m5w["u"][:, j, (0, 1, 3, 4)]
vr = m5w["v"][:, j, (0, 1, 3, 4)]
else:
zr = m5w["z"][:, j, :]
ur = m5w["u"][:, j, :]
vr = m5w["v"][:, j, :]
m4w["z"][:, j, i] = np.full((nperseg), zi)
m4w["u"][:, j, i] = moo.interp_quantity(zr, ur, zi, pzu)
m4w["v"][:, j, i] = moo.interp_quantity(zr, vr, zi, pzv)
print("Filtering windowed data.")
fcorcpd = np.abs(cc["f"]) * 86400 / pi2
Nmean = np.sqrt(np.average(c4w["N2"], weights=sig.hann(nperseg), axis=0))
varl = ["u", "v", "gamman"]
for var in varl:
c4w[var + "_hib"] = np.zeros_like(c4w[var])
c4w[var + "_lo"] = utils.butter_filter(
c4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
c4w[var + "_hi"] = c4w[var] - c4w[var + "_lo"]
for i in range(Nclevels):
for j in range(N_windows):
Nmean_ = Nmean[j, i] * 86400 / pi2
for var in varl:
c4w[var + "_hib"][:, j, i] = utils.butter_filter(
c4w[var][:, j, i],
(filtlo * fcorcpd, filthi * Nmean_),
fs=N_per_day,
btype="band",
)
varl = ["u", "v"]
for var in varl:
for m4w in moorings4w:
m4w[var + "_lo"] = utils.butter_filter(
m4w[var], 24 / tc_hrs, fs=N_per_day, btype="low", axis=0
)
m4w[var + "_hi"] = m4w[var] - m4w[var + "_lo"]
c4w["zi"] = np.ones_like(c4w["z"]) * c4["z"]
print("Calculating horizontal gradients.")
# Calculate horizontal gradients
for j in range(N_windows):
ll = np.stack(
([m["lon"] for m in moorings[1:]], [m["lat"] for m in moorings[1:]]), axis=1
)
uv = np.stack(
(
[m4w["u_lo"][:, j, :] for m4w in moorings4w],
[m4w["v_lo"][:, j, :] for m4w in moorings4w],
),
axis=1,
)
dudx, dudy, dvdx, dvdy, vort, _ = moo.div_vort_4D(ll[:, 0], ll[:, 1], uv)
nstrain = dudx - dvdy
sstrain = dvdx + dudy
c4w["dudx"][:, j, :] = dudx
c4w["dudy"][:, j, :] = dudy
c4w["dvdx"][:, j, :] = dvdx
c4w["dvdy"][:, j, :] = dvdy
c4w["nstrain"][:, j, :] = nstrain
c4w["sstrain"][:, j, :] = sstrain
c4w["vort"][:, j, :] = vort
print("Calculating window averages.")
for var in var4 + ["u_lo", "v_lo", "gamman_lo"]:
if var == "z": # Keep z as modified by hoffset.
continue
c4[var] = np.average(c4w[var], weights=sig.hann(nperseg), axis=0)
print("Estimating w and b.")
om = np.fft.fftfreq(nperseg, 15 * 60)
c4w["w_hi"] = np.fft.ifft(
1j
* pi2
* om[:, np.newaxis, np.newaxis]
* np.fft.fft(-c4w["gamman_hi"] / c4["dgdz"], axis=0),
axis=0,
).real
c4w["w_hib"] = np.fft.ifft(
1j
* pi2
* om[:, np.newaxis, np.newaxis]
* np.fft.fft(-c4w["gamman_hib"] / c4["dgdz"], axis=0),
axis=0,
).real
# Estimate buoyancy variables
c4w["b_hi"] = -gsw.grav(-c4["z"], cc["lat"]) * c4w["gamman_hi"] / c4["gamman_lo"]
c4w["b_hib"] = -gsw.grav(-c4["z"], cc["lat"]) * c4w["gamman_hib"] / c4["gamman_lo"]
c4["N"] = np.sqrt(c4["N2"])
print("Estimating covariance spectra.")
freq, c4w["Puu"] = sig.welch(c4w["u_hi"], **spec_kwargs)
_, c4w["Pvv"] = sig.welch(c4w["v_hi"], **spec_kwargs)
_, c4w["Pww"] = sig.welch(c4w["w_hi"], **spec_kwargs)
_, c4w["Pwwg"] = sig.welch(c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Pwwg"] *= (pi2 * freq[:, np.newaxis, np.newaxis]) ** 2
_, c4w["Pbb"] = sig.welch(c4w["b_hi"], **spec_kwargs)
_, c4w["Cuv"] = sig.csd(c4w["u_hi"], c4w["v_hi"], **spec_kwargs)
_, c4w["Cuwg"] = sig.csd(c4w["u_hi"], c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Cuwg"] *= -1j * pi2 * freq[:, np.newaxis, np.newaxis]
_, c4w["Cvwg"] = sig.csd(c4w["v_hi"], c4w["gamman_hi"] / c4["dgdz"], **spec_kwargs)
c4w["Cvwg"] *= -1j * pi2 * freq[:, np.newaxis, np.newaxis]
_, c4w["Cub"] = sig.csd(c4w["u_hi"], c4w["b_hi"], **spec_kwargs)
_, c4w["Cvb"] = sig.csd(c4w["v_hi"], c4w["b_hi"], **spec_kwargs)
print("Estimating covariance matrices.")
def cov(x, y, axis=None):
return np.mean((x - np.mean(x, axis=axis)) * (y - np.mean(y, axis=axis)), axis=axis)
c4["couu"] = cov(c4w["u_hib"], c4w["u_hib"], axis=0)
c4["covv"] = cov(c4w["v_hib"], c4w["v_hib"], axis=0)
c4["coww"] = cov(c4w["w_hib"], c4w["w_hib"], axis=0)
c4["cobb"] = cov(c4w["b_hib"], c4w["b_hib"], axis=0)
c4["couv"] = cov(c4w["u_hib"], c4w["v_hib"], axis=0)
c4["couw"] = cov(c4w["u_hib"], c4w["w_hib"], axis=0)
c4["covw"] = cov(c4w["v_hib"], c4w["w_hib"], axis=0)
c4["coub"] = cov(c4w["u_hib"], c4w["b_hib"], axis=0)
c4["covb"] = cov(c4w["v_hib"], c4w["b_hib"], axis=0)
c4w["freq"] = freq.copy()
# Get rid of annoying tiny values.
svarl = ["Puu", "Pvv", "Pbb", "Cuv", "Cub", "Cvb", "Pwwg", "Cuwg", "Cvwg"]
for var in svarl:
c4w[var][0, ...] = 0.0
c4[var + "_int"] = np.full((N_windows, 4), np.nan)
# Horizontal azimuth according to Jing 2018
c4w["theta"] = np.arctan2(2.0 * c4w["Cuv"].real, (c4w["Puu"] - c4w["Pvv"])) / 2
# Integration #############################################################
print("Integrating power spectra.")
for var in svarl:
c4w[var + "_cint"] = np.full_like(c4w[var], fill_value=np.nan)
fcor = np.abs(cc["f"]) / pi2
N_freq = len(freq)
freq_ = np.tile(freq[:, np.newaxis, np.newaxis], (1, N_windows, Nclevels))
ulim = fhi * np.tile(c4["N"][np.newaxis, ...], (N_freq, 1, 1)) / pi2
llim = fcor * flo
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Puu", "Pvv", "Pbb", "Cuv", "Pwwg"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Change lower integration limits for vertical components...
llim = fcor * flov
use = (freq_ < ulim) & (freq_ > llim)
svarl = ["Cub", "Cvb", "Cuwg", "Cvwg"]
for var in svarl:
c4[var + "_int"] = igr.simps(use * c4w[var].real, freq, axis=0)
c4w[var + "_cint"] = igr.cumtrapz(use * c4w[var].real, freq, axis=0, initial=0.0)
# Ruddic and Joyce effective stress
for var1, var2 in zip(["Tuwg", "Tvwg"], ["Cuwg", "Cvwg"]):
func = use * c4w[var2].real * (1 - fcor ** 2 / freq_ ** 2)
nans = np.isnan(func)
func[nans] = 0.0
c4[var1 + "_int"] = igr.simps(func, freq, axis=0)
func = use * c4w[var2].real * (1 - fcor ** 2 / freq_ ** 2)
nans = np.isnan(func)
func[nans] = 0.0
c4w[var1 + "_cint"] = igr.cumtrapz(func, freq, axis=0, initial=0.0)
# Usefull quantities
c4["nstress"] = c4["Puu_int"] - c4["Pvv_int"]
c4["sstress"] = -2.0 * c4["Cuv_int"]
c4["F_horiz"] = (
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]) * c4["nstrain"]
- c4["Cuv_int"] * c4["sstrain"]
)
c4["F_vert"] = (
-(c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2) * c4["dudz"]
- (c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2) * c4["dvdz"]
)
c4["F_vert_alt"] = -c4["Tuwg_int"] * c4["dudz"] - c4["Tvwg_int"] * c4["dvdz"]
c4["F_total"] = c4["F_horiz"] + c4["F_vert"]
c4["EPu"] = c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2
c4["EPv"] = c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2
##
c4["nstress_cov"] = c4["couu"] - c4["covv"]
c4["sstress_cov"] = -2.0 * c4["couv"]
c4["F_horiz_cov"] = (
-0.5 * (c4["couu"] - c4["covv"]) * c4["nstrain"] - c4["couv"] * c4["sstrain"]
)
c4["F_vert_cov"] = (
-(c4["couw"] - cc["f"] * c4["covb"] / c4["N"] ** 2) * c4["dudz"]
- (c4["covw"] + cc["f"] * c4["coub"] / c4["N"] ** 2) * c4["dvdz"]
)
c4["F_total_cov"] = c4["F_horiz_cov"] + c4["F_vert_cov"]
# %% [markdown]
# Estimate standard error on covariances.
# %%
bootnum = 1000
np.random.seed(12341555)
idxs = np.arange(nperseg, dtype="i2")
# def cov1(xy, axis=0):
# x = xy[..., -1]
# y = xy[..., -1]
# return np.mean((x - np.mean(x, axis=axis))*(y - np.mean(y, axis=axis)), axis=axis)
print("Estimating error on covariance using bootstrap (slow).")
euu_ = np.zeros((bootnum, N_windows, Nclevels))
evv_ = np.zeros((bootnum, N_windows, Nclevels))
eww_ = np.zeros((bootnum, N_windows, Nclevels))
ebb_ = np.zeros((bootnum, N_windows, Nclevels))
euv_ = np.zeros((bootnum, N_windows, Nclevels))
euw_ = np.zeros((bootnum, N_windows, Nclevels))
evw_ = np.zeros((bootnum, N_windows, Nclevels))
eub_ = np.zeros((bootnum, N_windows, Nclevels))
evb_ = np.zeros((bootnum, N_windows, Nclevels))
for i in range(bootnum):
idxs_ = np.random.choice(idxs, nperseg)
u_ = c4w["u_hib"][idxs_, ...]
v_ = c4w["v_hib"][idxs_, ...]
w_ = c4w["w_hib"][idxs_, ...]
b_ = c4w["b_hib"][idxs_, ...]
euu_[i, ...] = cov(u_, u_, axis=0)
evv_[i, ...] = cov(v_, v_, axis=0)
eww_[i, ...] = cov(w_, w_, axis=0)
ebb_[i, ...] = cov(b_, b_, axis=0)
euv_[i, ...] = cov(u_, v_, axis=0)
euw_[i, ...] = cov(u_, w_, axis=0)
evw_[i, ...] = cov(v_, w_, axis=0)
eub_[i, ...] = cov(u_, b_, axis=0)
evb_[i, ...] = cov(v_, b_, axis=0)
c4["euu"] = euu_.std(axis=0)
c4["evv"] = evv_.std(axis=0)
c4["eww"] = eww_.std(axis=0)
c4["ebb"] = ebb_.std(axis=0)
c4["euv"] = euv_.std(axis=0)
c4["euw"] = euw_.std(axis=0)
c4["evw"] = evw_.std(axis=0)
c4["eub"] = eub_.std(axis=0)
c4["evb"] = evb_.std(axis=0)
# %% [markdown]
# Error on gradients.
# %%
finite_diff_err = 0.06 # Assume 6 percent...
SNR_dudz = np.load("../data/SNR_dudz.npy")
SNR_dvdz = np.load("../data/SNR_dvdz.npy")
SNR_nstrain = np.load("../data/SNR_nstrain.npy")
SNR_sstrain = np.load("../data/SNR_sstrain.npy")
ones = np.ones_like(c4["euu"])
c4["edudz"] = ones * np.sqrt(c4["dudz"].var(axis=0) / SNR_dudz)
c4["edvdz"] = ones * np.sqrt(c4["dvdz"].var(axis=0) / SNR_dvdz)
c4["enstrain"] = esum(
ones * np.sqrt(c4["nstrain"].var(axis=0) / SNR_nstrain),
finite_diff_err * c4["nstrain"],
)
c4["esstrain"] = esum(
ones * np.sqrt(c4["sstrain"].var(axis=0) / SNR_sstrain),
finite_diff_err * c4["sstrain"],
)
# %% [markdown]
# Error propagation.
# %%
euumvv = 0.5 * esum(c4["euu"], c4["evv"])
c4["enstress"] = euumvv.copy()
enorm = emult(
-0.5 * (c4["Puu_int"] - c4["Pvv_int"]), c4["nstrain"], euumvv, c4["enstrain"]
)
eshear = emult(c4["Cuv_int"], c4["sstrain"], c4["euv"], c4["esstrain"])
c4["errF_horiz_norm"] = enorm.copy()
c4["errF_horiz_shear"] = eshear.copy()
c4["errF_horiz"] = esum(enorm, eshear)
euumvv = 0.5 * esum(c4["euu"], c4["evv"])
c4["enstress_cov"] = euumvv.copy()
enorm = emult(-0.5 * (c4["couu"] - c4["covv"]), c4["nstrain"], euumvv, c4["enstrain"])
eshear = emult(c4["couv"], c4["sstrain"], c4["euv"], c4["esstrain"])
c4["errF_horiz_norm_cov"] = enorm.copy()
c4["errF_horiz_shear_cov"] = eshear.copy()
c4["errF_horiz_cov"] = esum(enorm, eshear)
euwmvb = esum(c4["euw"], np.abs(cc["f"] / c4["N"] ** 2) * c4["evb"])
evwpub = esum(c4["evw"], np.abs(cc["f"] / c4["N"] ** 2) * c4["eub"])
c4["evstressu"] = euwmvb
c4["evstressv"] = evwpub
edu = emult(
-(c4["Cuwg_int"] - cc["f"] * c4["Cvb_int"] / c4["N"] ** 2),
c4["dudz"],
euwmvb,
c4["edudz"],
)
edv = emult(
-(c4["Cvwg_int"] + cc["f"] * c4["Cub_int"] / c4["N"] ** 2),
c4["dvdz"],
evwpub,
c4["edvdz"],
)
c4["errEPu"] = edu.copy()
c4["errEPv"] = edv.copy()
c4["errF_vert"] = esum(edu, edv)
c4["errEPu_alt"] = emult(-c4["Tuwg_int"], c4["dudz"], c4["euw"], c4["edudz"])
c4["errEPv_alt"] = emult(-c4["Tvwg_int"], c4["dvdz"], c4["evw"], c4["edvdz"])
c4["errF_vert_alt"] = esum(c4["errEPu_alt"], c4["errEPv_alt"])
edu = emult(
-(c4["couw"] - cc["f"] * c4["covb"] / c4["N"] ** 2), c4["dudz"], euwmvb, c4["edudz"]
)
edv = emult(
-(c4["covw"] + cc["f"] * c4["coub"] / c4["N"] ** 2), c4["dvdz"], evwpub, c4["edvdz"]
)
c4["errEPu_cov"] = edu.copy()
c4["errEPv_cov"] = edv.copy()
c4["errF_vert_cov"] = esum(edu, edv)
c4["errF_total"] = esum(c4["errF_vert"], c4["errF_horiz"])
c4["errF_total_cov"] = esum(c4["errF_vert_cov"], c4["errF_horiz_cov"])
# %% [markdown]
# Save the interpolated data.
# %% ########################## SAVE CORRECTED FILES ##########################
io.savemat(os.path.join(data_out, "C_alt.mat"), c4)
io.savemat(os.path.join(data_out, "C_altw.mat"), c4w)
# %% [markdown]
# <a id="ADCP"></a>
# %% [markdown]
# # ADCP Processing
# %% ########################## PROCESS ADCP DATA #############################
print("ADCP PROCESSING")
tf = np.array([16.0, 2.0]) # band pass filter cut off hours
tc_hrs = 40.0 # Low pass cut off (hours)
dt = 0.5 # Data sample period hr
print("Loading ADCP data from file.")
file = os.path.expanduser(os.path.join(data_in, "ladcp_data.mat"))
adcp = utils.loadmat(file)["ladcp2"]
print("Removing all NaN rows.")
varl = ["u", "v", "z"]
for var in varl: # Get rid of the all nan row.
adcp[var] = adcp.pop(var)[:-1, :]
print("Calculating vertical shear.")
z = adcp["z"]
dudz = np.diff(adcp["u"], axis=0) / np.diff(z, axis=0)
dvdz = np.diff(adcp["v"], axis=0) / np.diff(z, axis=0)
nans = np.isnan(dudz) | np.isnan(dvdz)
dudz[nans] = np.nan
dvdz[nans] = np.nan
adcp["zm"] = utils.mid(z, axis=0)
adcp["dudz"] = dudz
adcp["dvdz"] = dvdz
# Low pass filter data.
print("Low pass filtering at {:1.0f} hrs.".format(tc_hrs))
varl = ["u", "v", "dudz", "dvdz"]
for var in varl:
data = adcp[var]
nans = np.isnan(data)
adcp[var + "_m"] = np.nanmean(data, axis=0)
datalo = utils.butter_filter(
utils.interp_nans(adcp["dates"], data, axis=1), 1 / tc_hrs, 1 / dt, btype="low"
)
# Then put nans back...
if nans.any():
datalo[nans] = np.nan
namelo = var + "_lo"
adcp[namelo] = datalo
namehi = var + "_hi"
adcp[namehi] = adcp[var] - adcp[namelo]
# Band pass filter the data.
print("Band pass filtering between {:1.0f} and {:1.0f} hrs.".format(*tf))
varl = ["u", "v", "dudz", "dvdz"]
for var in varl:
data = adcp[var]
nans = np.isnan(data)
databp = utils.butter_filter(
utils.interp_nans(adcp["dates"], data, axis=1), 1 / tf, 1 / dt, btype="band"
)
# Then put nans back...
if nans.any():
databp[nans] = np.nan
namebp = var + "_bp"
adcp[namebp] = databp
io.savemat(os.path.join(data_out, "ADCP.mat"), adcp)
# %% [markdown]
# <a id="VMP"></a>
# %% [markdown]
# ## VMP data
# %%
print("VMP PROCESSING")
vmp = utils.loadmat(os.path.join(data_in, "jc054_vmp_cleaned.mat"))["d"]
box = np.array([[-58.0, -58.0, -57.7, -57.7], [-56.15, -55.9, -55.9, -56.15]]).T
p = path.Path(box)
in_box = p.contains_points(np.vstack((vmp["startlon"], vmp["startlat"])).T)
idxs = np.argwhere(in_box).squeeze()
Np = len(idxs)
print("Isolate profiles in match around mooring.")
for var in vmp:
ndim = np.ndim(vmp[var])
if ndim == 2:
vmp[var] = vmp[var][:, idxs]
if ndim == 1 and vmp[var].size == 36:
vmp[var] = vmp[var][idxs]
print("Rename variables.")
vmp["P"] = vmp.pop("press")
vmp["T"] = vmp.pop("temp")
vmp["S"] = vmp.pop("salin")
print("Deal with profiles where P[0] != 1.")
P_ = np.arange(1.0, 10000.0)
i0o = np.zeros((Np), dtype=int)
i1o = np.zeros((Np), dtype=int)
i0n = np.zeros((Np), dtype=int)
i1n = np.zeros((Np), dtype=int)
pmax = 0.0
for i in range(Np):
nans = np.isnan(vmp["eps"][:, i])
i0o[i] = i0 = np.where(~nans)[0][0]
i1o[i] = i1 = np.where(~nans)[0][-1]
P0 = vmp["P"][i0, i]
P1 = vmp["P"][i1, i]
i0n[i] = np.searchsorted(P_, P0)
i1n[i] = np.searchsorted(P_, P1)
pmax = max(P1, pmax)
P = np.tile(np.arange(1.0, pmax + 2)[:, np.newaxis], (1, len(idxs)))
eps = np.full_like(P, np.nan)
chi = np.full_like(P, np.nan)
T = np.full_like(P, np.nan)
S = np.full_like(P, np.nan)
for i in range(Np):
eps[i0n[i] : i1n[i] + 1, i] = vmp["eps"][i0o[i] : i1o[i] + 1, i]
chi[i0n[i] : i1n[i] + 1, i] = vmp["chi"][i0o[i] : i1o[i] + 1, i]
T[i0n[i] : i1n[i] + 1, i] = vmp["T"][i0o[i] : i1o[i] + 1, i]
S[i0n[i] : i1n[i] + 1, i] = vmp["S"][i0o[i] : i1o[i] + 1, i]
vmp["P"] = P
vmp["eps"] = eps
vmp["chi"] = chi
vmp["T"] = T
vmp["S"] = S
vmp["z"] = gsw.z_from_p(vmp["P"], vmp["startlat"])
print("Calculate neutral density.")
# Compute potential temperature using the 1983 UNESCO EOS.
vmp["PT0"] = seawater.ptmp(vmp["S"], vmp["T"], vmp["P"])
# Flatten variables for analysis.
lons = np.ones_like(P) * vmp["startlon"]
lats = np.ones_like(P) * vmp["startlat"]
S_ = vmp["S"].flatten()
T_ = vmp["PT0"].flatten()
P_ = vmp["P"].flatten()
LO_ = lons.flatten()
LA_ = lats.flatten()
gamman = gamma_GP_from_SP_pt(S_, T_, P_, LO_, LA_)
vmp["gamman"] = np.reshape(gamman, vmp["P"].shape) + 1000.0
io.savemat(os.path.join(data_out, "VMP.mat"), vmp)
| StarcoderdataPython |
11274669 | # This code contains at least 9 defects .
# Try to fix them all based on the error messages。
import pygame
from pygame import image, Rect, Surface
TILE_POSITIONS = [
('#', 0, 0), # wall
(' ', 0, 1), # floor
('.', 2, 0), # dot
('*', 3, 0), # player
]
SIZE = 32
# image = 'tiles.xpm'
def get_tile_rect(x, y):
return Rect(x * SIZE, y * SIZE, SIZE, SIZE)
def load_tiles():
tiles = {}
tile_img = image.load('tiles.xpm')
for symbol, x, y in TILE_POSITIONS:
rect = Rect(x * SIZE, y * SIZE, SIZE, SIZE)
tiles[symbol] = rect
return tile_img, tiles
if __name__ == '__main__':
tile_img, tiles = load_tiles()
m = Surface((32 * 4, 32))
m.blit(tile_img, get_tile_rect(0, 0), tiles['#'])
m.blit(tile_img, get_tile_rect(1, 0), tiles[' '])
m.blit(tile_img, get_tile_rect(2, 0), tiles['*'])
m.blit(tile_img, get_tile_rect(3, 0), tiles['.'])
image.save(m, 'tile_combo.png')
# Optional exercise:
# make the print statement below work by modifying the class
# so that it prints the char attribute
class Tile:
def __init__(self, achar, x, y):
self.char = achar
t = Tile('#', 0, 0)
print(t.char)
| StarcoderdataPython |
1649608 | """
"""
import wtl.options as wopt
import gzip
import json
import itertools
import re
program = 'likeligrid'
def iter_args(infiles, range_s, concurrency, rest, epistasis, tp53):
const = [program, '-j{}'.format(concurrency)] + rest
axes = wopt.OrderedDict()
axes['s'] = range_s
for v in wopt.sequential(axes):
args = wopt.make_args(v)
for f in infiles:
if epistasis:
if '-g' in const:
npath = count_pathways_tsv(f)
else:
npath = count_pathways_json(f)
for x in itertools.combinations(range(npath), 2):
yield const + ['-e {} {}'.format(*x)] + args + [f]
elif tp53:
pair = tp53_pleiotropic_pair(f)
if re.search(r'-e\dx\d', f):
# filter correct pairs from globbed infiles
if not re.search('-e{}x{}'.format(*pair), f):
continue
yield const + ['-e {} {}'.format(*pair), '-p'] + args + [f]
else:
yield const + args + [f]
def read_pathways_tsv(infile):
"""Read result file"""
with gzip.open(infile, 'rt') as fin:
for line in fin:
if line.startswith('#'):
continue
header = line.rstrip().split('\t')
break
header.pop(0) # loglik
return header
def count_pathways_tsv(infile):
"""Read result file"""
header = read_pathways_tsv(infile)
if header[-1] == 'pleiotropy':
header.pop(-1)
if ':' in header[-1]:
header.pop(-1)
return len(header)
def count_pathways_json(infile):
"""Read genotype file"""
with gzip.open(infile, 'r') as fin:
d = json.load(fin)
return len(d['pathway'])
def tp53_pleiotropic_pair(infile):
"""Read result file"""
columns = read_pathways_tsv(infile)
return (columns.index('Cycle'), columns.index('Damage'))
def tp53_pleiotropic_pair_json(infile):
"""Read genotype file"""
with gzip.open(infile, 'r') as fin:
d = json.load(fin)
p = d['pathway']
return (p.index('Cycle'), p.index('Damage'))
def main():
parser = wopt.ArgumentParser()
parser.add_argument('-o', '--outdir', default='.stdout')
parser.add_argument('--begin', type=int, default=4)
parser.add_argument('--end', type=int, default=6)
parser.add_argument('-e', '--epistasis', action='store_true')
parser.add_argument('--tp53', action='store_true')
parser.add_argument('infile', nargs='+')
(args, rest) = parser.parse_known_args()
print("cpu_count(): {}".format(wopt.cpu_count()))
print('{} jobs * {} cores/job'.format(args.jobs, args.parallel))
range_s = range(args.begin, args.end)
it = iter_args(args.infile, range_s, args.parallel, rest,
args.epistasis, args.tp53)
wopt.map_async(it, args.jobs, args.dry_run, outdir=args.outdir)
print('End of ' + __file__)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4935917 | # -*- coding: utf-8 -*-
"""
flybirds common error
"""
import flybirds.core.global_resource as gr
class FlybirdNotFoundException(Exception):
"""
not find flybirds
"""
def __init__(self, message, select_dic, error=None):
message = f"selectors={str(select_dic)} {message}"
if error is not None:
message = f"{message} innerErr:{error}"
super().__init__(message)
self.message = message
def __str__(self):
return str(self.message)
class PositionNotChangeException(Exception):
"""
position not change
"""
def __init__(self, message):
super().__init__(message)
self.message = message
def __str__(self):
return str(self.message)
class FlybirdCallMethodParamsException(Exception):
"""
params error
"""
def __init__(self, method, param_name):
message = f"call method:{method} has invalid params:{param_name}"
super().__init__(message)
self.message = message
def __str__(self):
return str(self.message)
class FlybirdEleExistsException(Exception):
"""
ele not exists
"""
def __init__(self, message):
super().__init__(message)
self.message = message
def __str__(self):
return str(self.message)
class FlybirdVerifyException(Exception):
"""
verify error
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return str(self.message)
class FlybirdPositionChanging(Exception):
"""
position changing
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return str(self.message)
class ScreenRecordException(Exception):
"""
screen record error
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return str(self.message)
class FlybirdsVerifyEleException(Exception):
"""
timeout error
"""
def __init__(self, message=None, selector=None):
super().__init__()
if message is not None:
self.message = message
elif selector is not None:
self.message = self.print_message(selector)
def __str__(self):
return str(self.message)
@staticmethod
def print_message(param):
default_timeout = gr.get_frame_config_value("wait_ele_timeout", 30)
message = f'Timeout {default_timeout * 1000}ms exceeded.\n'
message += '=' * 20 + ' logs ' + '=' * 20
message += f'\nwaiting for selector "{param}"\n'
message += '=' * 46
return message
class FlybirdsException(Exception):
"""
flybirds exception base class
"""
def __init__(self, message):
super().__init__()
self.message = message
def __str__(self):
return str(self.message)
| StarcoderdataPython |
8172126 | # -*- coding: utf-8 -*-
# @Time : 2020/1/7 晚上10:00
# @Author : upcbdipt
# @Project : CDW_FedAvg
# @FileName: tf_utils
import tensorflow as tf
def __num_elems(shape):
"""Returns the number of elements in the given shape
Args:
shape: TensorShape
Return:
tot_elems: int
"""
tot_elems = 1
for s in shape:
tot_elems *= int(s)
return tot_elems
def graph_size(graph):
"""Returns the size of the given graph in bytes
The size of the graph is calculated by summing up the sizes of each
trainable variable. The sizes of variables are calculated by multiplying
the number of bytes in their dtype with their number of elements, captured
in their shape attribute
Args:
graph: TF graph
Return:
integer representing size of graph (in bytes)
"""
tot_size = 0
with graph.as_default():
vs = tf.trainable_variables()
for v in vs:
tot_elems = __num_elems(v.shape)
dtype_size = int(v.dtype.size)
var_size = tot_elems * dtype_size
tot_size += var_size
return tot_size
| StarcoderdataPython |
1691787 | # -*- coding: utf-8 -*-
"""
Created 2018/12 by Shintaro
Modified 2021/02 by Hermann for usage at Wodan; look for "HE:"
"""
from qcodes import Instrument, validators as vals
from qcodes.instrument.channel import InstrumentChannel, ChannelList
from qcodes.utils.validators import Validator
from qcodes.instrument.parameter import ArrayParameter
from typing import List, Dict, Callable, Union
from nifpga import Session
from nifpga import nifpga
import time
import numpy as np
import logging
log = logging.getLogger(__name__)
bit_file = '..\\tools\\drivers\\fpgabatchhewodan_sbRIO9612RIO0_hewodan_kUFBPXPrLOs.lvbitx'
ip_address = '192.168.0.3'
channels_per_panel = 8
"""-------------------------
Utility functions
-------------------------"""
def split_number(a, size = 32):
"""
Split for example 32bit uint to 2 16bit uint.
Args:
a: Input number
size: bit size of the input number
Returns:
b: from upper bits
c: from lower bits
"""
b = 0
c = 0
for i in range(size):
if i < size//2:
c += a & 2**i
else:
if (a & 2**i) != 0:
b += 2**(i-size//2)
if size == 64:
b = np.uint32(b)
c = np.uint32(c)
elif size == 32:
b = np.uint16(b)
c = np.uint16(c)
elif size == 16:
b = np.uint8(b)
c = np.uint8(c)
return b, c
def join_numbers(a, b, final_size=32):
"""
Join 2 numbers and make a number with double bit size
Args:
a: input1 (Becomes upper bits)
b: input2 (Becomes lower bits)
final_size: bit size of the returned number
Returns:
c: Joined number
"""
if final_size == 64:
a = np.uint32(a)
b = np.uint32(b)
c = (a << 32) + b
c = np.uint64(c)
elif final_size == 32:
a = np.uint16(a)
b = np.uint16(b)
c = (a << 16) + b
c = np.uint32(c)
elif final_size == 16:
a = np.uint8(a)
b = np.uint8(b)
c = (a << 8) + b
c = np.uint16(c)
return c
def join_8_8bit264bit(a,b,c,d,e,f,g,h):
"""
Join 8 8bit unsigned integer into 64bit unsigned integer.
Args:
a,b,c,d,: 8bit unsigned integers
(a: uuu, b: uul, c: ulu, d: ull, ...)
Returns:
result: 64 bit unsined integer
"""
i = join_numbers(a,b,16)
j = join_numbers(c,d,16)
k = join_numbers(e,f,16)
l = join_numbers(g,h,16)
m = join_numbers(i,j,32)
n = join_numbers(k,l,32)
result = join_numbers(m,n,64)
return result
def ms2FS_divider(ms:Union[int, float] = 3.0) -> int:
"""
Convert duration (ms) of pulse for ramp mode.
Typical values: 3 ms -> 6661, 20 ms -> 44439
Args:
ms (float): Duration between each trigger pulse for ramp mode (trigger 1, active when it is off).
Return:
divider (int)
"""
if ms < 0:
# Make minimum to be about 100 us.
ms = 220
elif ms < 10.0:
ms = int(ms /3 * 6661)
else:
ms = int(ms / 20 * 44439)
return ms
"""----------------
Define classes
------------------"""
class NEEL_DAC_channel(InstrumentChannel):
"""
This class holds information about each DAC channel.
Args:
parent (InstrumentChannel): NEEL_DAC_Bus
name (str): name of the channel
channel (int): channel number (0 ~ 7)
value (float): output value of the DAC.
"""
def __init__(self,
parent: InstrumentChannel,
name:str,
channel:int,
value:float=-0.0003,
vmax:float=5.0,
vmin:float=-5.0,
alias:str=None,
**kwargs) -> None:
super().__init__(parent, name, **kwargs)
self.dac = self._parent.dac
self.panel = self._parent.bus_number
self.channel = channel
self.val = value
self.alias = alias
self.add_parameter('v',
label='Value',
unit='V',
scale = 1.0,
get_cmd = self.get_value,
set_cmd = self.set_value,
get_parser = float,
set_parser = float,
vals = vals.Numbers(vmin, vmax),
)
def get_value(self):
return self.val
def set_value(self, val:float):
#print(self.panel,self.channel,val)
# Set DAC value if it is not np.nan.
if not np.isnan(val):
self.dac.DAC_set_value(panel_channel={'panel':self.panel, 'channel':self.channel},
DAC_goto_value=val)
#self.dac.move() # HE: let it move when set.
self.val = val
class NEEL_DAC_Bus(InstrumentChannel):
"""
This class holds information about a bus containing 8 DAC channels.
Args:
parent (Instrument): NEEL_DAC
name (str): name of the bus
bus_number (int): bus_number (typically 0 ~ 4, max 7)
"""
def __init__(self, parent: Instrument, name:str, bus_number:int, **kwargs) -> None:
super().__init__(parent, name, **kwargs)
self.dac = self._parent
self.bus_number = bus_number
# Add dummy parameter since we get error with snapshot without it.
self.add_parameter('dummy',
label='dummy',
get_cmd = self.get_dummy,
get_parser = int,
)
for channel in range(8):
s = 'c{:d}'.format(channel)
channel_instance = NEEL_DAC_channel(self, s, channel)
self.add_submodule(s, channel_instance)
def get_dummy(self):
return 0
class NEEL_DAC(Instrument):
"""
This is the qcodes driver for NEEL DAC controlled by National Instruments single board RIO 9612.
Args:
name (str): name of the instrument
bitFilePath(str): path to the bit file
address (str): IP address of NI sbrio9612 (can be checked by NI MAX)
LI_frequency (float): lock-in frequency
LI_amplitude (float): lock-in amplitude
LI_channel (int): panel = N // 8, channel = N % 8
LI_status (bool): status of lock-in (On: True, Off: False)
used_buses (List[int]): list of DAC buses to be used
ms2wait (int): wait time between each DAC bit movement
v (dict): dictionary of short-cut-references to NEEL_DAC_CHANNELs via alias-name
FS_divider (Union[float, int]): For fast sequence ramp mode it determines time between each DAC step (ms). (trigger from DIO1/panel 9)
For fast sequence mode it determines time of pulse from DIO1/panel 9.
FS_ramp (bool): ramp mode (True) or not (False)
FS_pulse_len (int): Length of trigger (check minimum trigger length of each instrument, which accept the trigger.)
FS_chan_list (List[int]): List of fast sequence channel (up to 16 channels). Pannel = N // 8, channel = N % 8, Dummy = 255
FS_status (bool): whether fast sequence is running (True) or not (False).
FS_sample_count (int): Length of the fast sequence slot
FS_move_limit (List[float, float]): minimum and maximum for the dac movement for fast ramp and sequence.
init_zero (bool): (True) initialize all DAC channels to zero or (False) keep the current configuration
"""
def __init__(self, name:str,
bitFilePath:str=bit_file,
address:str=ip_address,
LI_frequency:float=23.3,
LI_amplitude:float=0.0,
# LI_channel:int=0,
LI_channel:list=[1,0], # HE
LI_status:bool=False,
used_buses:List[int]=[1,2,4,6],
ms2wait:int=1,
FS_divider:Union[int, float]=3,
FS_ramp:bool=True,
FS_pulse_len:int=100,
FS_chan_list:List[int]=list(range(16)),
FS_status:bool=False,
FS_sample_count:int=10,
FS_move_limit:List[float]=[-0.5, 0.3],
init_zero:bool=False,
**kwargs) -> None:
super().__init__(name, **kwargs)
# Address information
self.bitFilePath = bitFilePath
self.address =address
# Define reference to access FPGA.
self.ref = None
self.openRef()
# lock-in related parameters
self._LI_status = LI_status
self._LI_frequency = LI_frequency
self._LI_amplitude = LI_amplitude
self._LI_channel = LI_channel
# DAC related parameters
self._used_buses = used_buses
self._ms2wait = ms2wait
self.v = dict()
# Fast sequence realted parameters
self._FS_divider = FS_divider
self._FS_ramp = FS_ramp
self._FS_pulse_len = FS_pulse_len
self._FS_chan_list = FS_chan_list
self._FS_status = FS_status
self._FS_sample_count = FS_sample_count
self._FS_move_limit = FS_move_limit
seq = np.zeros((2,10), dtype=float)
seq[:, 0] = [101, 0]
seq[:, 9] = [103, 9]
self._FS_slots = seq
if init_zero:
self.initialise()
self.add_parameter('LI_status',
label='Lock-in status',
get_cmd=self.get_lock_in_status,
set_cmd=self.set_lock_in_status,
initial_value=LI_status,
)
self.add_parameter('LI_frequency',
label='Lock-in frequency',
unit='Hz',
get_cmd=self.get_lock_in_frequency,
set_cmd=self.set_lock_in_frequency,
get_parser=float,
set_parser=float,
post_delay=0.45, # HE: wait after move such that the lock-in-detector can follow
vals=vals.Numbers(0.0, 50000.0),
initial_value=LI_frequency,
)
self.add_parameter('LI_amplitude',
label='Lock-in amplitude',
unit='V',
get_cmd=self.get_lock_in_amplitude,
set_cmd=self.set_lock_in_amplitude,
get_parser=float,
set_parser=float,
post_delay=0.45, # HE: wait after move such that the lock-in-detector can follow
vals=vals.Numbers(0.0, 2.0),
initial_value=LI_amplitude,
)
# self.add_parameter('LI_channel',
# label='Lock-in channel',
# get_cmd=self.get_lock_in_channel,
# set_cmd=self.set_lock_in_channel,
# get_parser=int,
# set_parser=int,
# vals=vals.Ints(0, 63),
# initial_value=LI_channel,
# )
self.add_parameter('LI_channel', # HE
label='Lock-in channel',
get_cmd=self.get_lock_in_channel,
set_cmd=self.set_lock_in_channel,
get_parser=list,
set_parser=list,
vals=vals.Lists(vals.Ints(0,7)),
initial_value=LI_channel,
)
self.add_parameter('used_buses',
label='Used DAC buses',
get_cmd=self.get_used_buses,
set_cmd=self.set_used_buses,
initial_value=used_buses,
)
self.add_parameter('ms2wait',
label='Wait time of DAC bit movement',
unit = 'ms',
get_cmd=self.get_ms2wait,
set_cmd=self.set_ms2wait,
get_parser=int,
set_parser=int,
vals=vals.Ints(0,5),
initial_value=ms2wait,
)
self.add_parameter('FS_divider',
label='Fast sequence divider',
unit = 'ms',
get_cmd = self.get_FS_divider,
set_cmd = self.set_FS_divider,
get_parser=float,
set_parser=float,
vals=vals.Numbers(4.6e-4, 450),
initial_value=FS_divider,
)
self.add_parameter('FS_ramp',
label='Fast sequence ramp mode',
get_cmd = self.get_FS_ramp,
set_cmd = self.set_FS_ramp,
get_parser = bool,
set_parser = bool,
initial_value=FS_ramp,
)
self.add_parameter('FS_pulse_len',
label='Fast sequence pulse length',
get_cmd = self.get_FS_pulse_len,
set_cmd = self.set_FS_pulse_len,
get_parser = int,
set_parser = int,
vals=vals.Ints(100, 10000),
initial_value=FS_pulse_len,
)
self.add_parameter('FS_chan_list',
label='Fast sequence channel list',
get_cmd = self.get_FS_chan_list,
set_cmd = self.set_FS_chan_list,
initial_value=FS_chan_list,
)
self.add_parameter('FS_status',
label='Fast sequence status',
get_cmd = self.get_FS_status,
set_cmd = self.set_FS_status,
get_parser=bool,
set_parser=bool,
initial_value=FS_status,
)
self.add_parameter('FS_sample_count',
label='Fast sequence sample count',
get_cmd = self.get_FS_sample_count,
set_cmd = self.set_FS_sample_count,
get_parser=int,
set_parser=int,
vals=vals.Ints(1, 100000),
initial_value=FS_sample_count,
)
self.add_parameter('FS_move_limit',
label='Fast sequence DAC move limit',
unit = 'V',
get_cmd = self.get_FS_move_limit,
set_cmd = self.set_FS_move_limit,
initial_value=FS_move_limit,
)
self.add_parameter('FS_slots',
label = 'Fast sequence slots',
get_cmd = self.get_FS_slots,
set_cmd = self.set_FS_slots,
snapshot_get = False,
snapshot_value = False,
)
# Initialize used buses
self.set_used_buses(used_buses)
self.set_ms2wait(ms2wait)
# Define Buses
for n in self._used_buses:
if 0 <= n <=7:
s = 'p{:d}'.format(n)
bus = NEEL_DAC_Bus(self, s, n)
self.add_submodule(s, bus)
def get_lock_in_status(self):
return self._LI_status
def set_lock_in_status(self, val: bool):
self._LI_status = val
self.lock_in_send_order(order=3,
inhibate = not val)
def get_lock_in_frequency(self):
return self._LI_frequency
def set_lock_in_frequency(self, val: float):
self._LI_frequency = val
if self._LI_status:
# If lock-in is running, once stop it and restart after change.
self.set_lock_in_status(False)
self.lock_in_send_order(order=0,
frequency = val)
self.set_lock_in_status(True)
else:
self.lock_in_send_order(order=0,
frequency = val)
def get_lock_in_amplitude(self):
return self._LI_amplitude
def set_lock_in_amplitude(self, val: float):
self._LI_amplitude = np.abs(val)
if self._LI_status:
# If lock-in is running, once stop it and restart after change.
self.set_lock_in_status(False)
self.lock_in_send_order(order=2,
amplitude = val)
self.set_lock_in_status(True)
else:
self.lock_in_send_order(order=2,
amplitude = val)
def get_lock_in_channel(self):
return self._LI_channel
# def set_lock_in_channel(self, val: int):
# self._LI_channel = val
# panel = val // 8
# channel = val % 8
# LI_panel_channel = {'panel':panel, 'channel':channel}
# if self._LI_status:
# # If lock-in is running, once stop it and restart after change.
# self.set_lock_in_status(False)
# self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
# self.set_lock_in_status(True)
# else:
# self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
def set_lock_in_channel(self, val: int): #HE
panel = val[0]
channel = val[1]
LI_panel_channel = {'panel':panel, 'channel':channel}
if self._LI_status:
# If lock-in is running, once stop it and restart after change.
self.set_lock_in_status(False)
self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
self.set_lock_in_status(True)
else:
self.lock_in_send_order(order=1, panel_channel=LI_panel_channel)
def get_used_buses(self):
return self._used_buses
def set_used_buses(self, val: List[int]):
self._used_buses = val
busses_to_use = [False]*8
for n in val:
if n > 7:
print('Bus{:d} is out of range.'.format(n))
else:
busses_to_use[n] = True
self.DAC_send_order(order=1,
busses_to_use=busses_to_use)
def get_ms2wait(self):
return self._ms2wait
def set_ms2wait(self, val: int):
self._ms2wait = val
self.DAC_send_order(order=2,
delay_between_steps_ms = val)
def get_FS_divider(self):
return self._FS_divider
def set_FS_divider(self, val: Union[int, float]):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_divider = val
self.fastseq_set_orders(order = 1,
divider = ms2FS_divider(val))
def get_FS_ramp(self):
return self._FS_ramp
def set_FS_ramp(self, val: bool):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_ramp = val
if val:
# When ramp mode, unset stop count.
self.fastseq_set_orders(order=3)
else:
# When fast cycle mode ('start'), unset ramp.
self.fastseq_set_orders(order=2)
def get_FS_pulse_len(self):
return self._FS_pulse_len
def set_FS_pulse_len(self, val:int):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_pulse_len = val
self.fastseq_set_orders(order=4,
pulse_length=val)
def get_FS_chan_list(self):
return self._FS_chan_list
def set_FS_chan_list(self, val:List[int]):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_chan_list = val
size = len(val)
# for i in range(16):
for i in range(32): # HE 32
if i < size:
v = val[i]
if 0 <= v < 64:
panel = v // 8
channel = v % 8
self.fastseq_set_fastChannel(fast_chan_number=i,
panel_channel={'panel':panel, 'channel':channel},
is_dummy = False)
else:
# set dummy
self.fastseq_set_fastChannel(fast_chan_number=i,
panel_channel={'panel':0, 'channel':0},
is_dummy = True)
else:
self.fastseq_set_fastChannel(fast_chan_number=i,
panel_channel={'panel':0, 'channel':0},
is_dummy = True)
def get_FS_status(self):
return self._FS_status
def set_FS_status(self, val:bool, sample_count=True):
# Control start and stop of fast sequence.
# When we start the fast sequence, each time we have to set sample count.
# Therefore I include it from the beggining.
if val:
if sample_count:
# Set sample count.
self.FS_sample_count(self.FS_sample_count())
# Start fast sequence
self.fastseq_set_orders(order=6)
else:
# Stop fast sequence
self.fastseq_set_orders(order=0)
self._FS_status = val
def get_FS_sample_count(self):
return self._FS_sample_count
def set_FS_sample_count(self, val:int):
if self._FS_status:
# stop fast sequence if running.
self.set_FS_status(False)
self._FS_sample_count = val
if self._FS_ramp:
# Ramp mode
#- For ramp mode we add trigger count +2 (make sure that ADC obtain enough amount of trigger pulse.)
self.fastseq_set_orders(order=5,
sample_count=val+2)
else:
# Fast cycle mode
self.DAC_set_stop_sample_count(sample_count = val)
def get_FS_move_limit(self):
return self._FS_move_limit
def set_FS_move_limit(self, val:List[float]):
self._FS_move_limit = val
def get_FS_slots(self):
return self._FS_slots
def set_FS_slots(self, val:np.ndarray, store_seq2meta=True):
shape = val.shape
# Check shape of the input variable
if (not len(shape) == 2) or (not shape[0]==2):
raise ValueError('Shape of fast sequence array is invalid.')
self.fast_seq_set_slots(val)
if store_seq2meta:
self.FS_slots.metadata['fast_seq'] = [list(val[0,:]), list(val[1,:])]
self._FS_slots = val
def get_DAC_values(self, mode:int=1, fill_modules:bool = False):
"""
Get all the DAC values from FPGA.
Args:
mode (int): 0: returns 8 by 8 array,
1: returns information only for used buses
fill_modules (bool): whether we set obtained values to sub-modules or not
It is useful when we first define the instrument.
"""
dac_values = self.DAC_current_values()
if mode==1:
a = np.zeros((len(self._used_buses), 8), dtype=float)
for i, n in enumerate(self._used_buses):
a[i,:] = dac_values[n,:]
dac_values = a
# Set values to submodules
if fill_modules:
for n in self._used_buses:
panel = getattr(self, 'p{:d}'.format(n))
for c in range(8):
ch = getattr(panel, 'c{:d}'.format(c))
ch.v(dac_values[n,c])
return dac_values
"""-----------------------
Control functions
------------------------"""
def DAC_start_movement(self):
"""
Start DAC movement
"""
self.DAC_send_order(order=0)
def init(self, value:float=0.0):
"""
Initialize all the DAC values in the used buses to "value".
For the procedure once move all the DAC to -0.1 V and come back
to the given "value".
"""
self.move_all_to(-0.01)
self.move_all_to(value)
initialize=init; initialise=init; DAC_init_values=init
"""===================================
FPGA control functions from LabVIEW
==================================="""
def openRef(self):
# Open FPGA reference and return it.
self.ref = Session(bitfile=self.bitFilePath, resource='rio://'+self.address+'/RIO0')
# if not (self.ref.fpga_vi_state==nifpga.FpgaViState.Running):
# # If not run, run.
# self.ref.run()
# perform lock-in-configure
self.lock_in_configure_analysis()
def close(self):
# Close FPGA reference
self.ref.close()
"""---------------------
Lock-in related functions
------------------------"""
def lock_in_configure_analysis(self):
"""
Function to setup FPGA at the beggining.
"""
# Data set to host
self.lock_in_send_analysis(order = {'NULL':0, 'Data_sent_to_host':1, 'dt/tau':2, 'Voltage_range':3}['Data_sent_to_host'],
voltage_range = {'10V':0, '5V':1, '1V':2}['10V'],
dt_over_tau = 0.0,
data_sent_back = {'LI':0, 'average':1}['average'],
)
# dt/tau
self.lock_in_send_analysis(order = {'NULL':0, 'Data_sent_to_host':1, 'dt/tau':2, 'Voltage_range':3}['dt/tau'],
voltage_range = {'10V':0, '5V':1, '1V':2}['10V'],
dt_over_tau = 8.00006091594696044921875000000000E-6,
data_sent_back = {'LI':0, 'average':1}['average'],
)
def lock_in_send_analysis(self,
order = {'NULL':0, 'Data_sent_to_host':1, 'dt/tau':2, 'Voltage_range':3}['Data_sent_to_host'],
voltage_range = {'10V':0, '5V':1, '1V':2}['10V'],
dt_over_tau = 0.0,
data_sent_back = {'LI':0, 'average':1}['average'],
):
"""
Function to perform initial setup of FPGA.
Args:
order (int): selection of operation
votage_range (int): voltage range
dt_over_tau (float): ??
data_sent_back (int): ??
"""
# 1st frame of LabVIEW program
if order == 0:
# NULL
order_number = join_8_8bit264bit(3,0,0,0,0,0,0,0)
elif order == 1:
# Data set to host
order_number = join_8_8bit264bit(3,1,0,0,0,0,0,data_sent_back)
elif order == 2:
# dt/tau
dt_over_tau = dt_over_tau * (2**32) # Convert Fixed point to 32 bit integer
order_number = join_numbers(3,2,16)
order_number = join_numbers(order_number, 0, 32)
order_number = join_numbers(order_number, dt_over_tau, 64)
elif order == 3:
# Voltage range
order_number = join_8_8bit264bit(3,3,0,0,0,0,0,voltage_range)
# 2nd frame of LabVIEW program
order_in = self.ref.registers['order in']
order_in.write(np.uint64(order_number))
orderXmitted = self.ref.registers['order Xmitted']
orderXmitted.write(True)
# 3rd frame of LabVIEW program
time.sleep(0.01)
orderXmitted.write(False)
# 4th frame of LabVIEW program
if order == 2:
# dt/tau
# Wait until move bus gets ready.
move_bus_ready = self.ref.registers['move bus ready'].read()
while move_bus_ready == False:
move_bus_ready = self.ref.registers['move bus ready'].read()
def lock_in_send_order(self,
order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['inhibate'],
frequency = 0.0,
amplitude = 0.0,
inhibate = False,
panel_channel = {'panel':0, 'channel':0},
):
"""
Send order to lock-in sub-system.
"""
if order == 0:
# Frequency (Hz)
f = 25000/frequency
if f < 1:
f = 1
elif f > 4e9:
f = 4e9
f = np.uint32(f)
a,b = split_number(f, size=32)
c,d = split_number(a, size=16)
e,f = split_number(b, size=16)
order_number = join_8_8bit264bit(2,4,0,0,c,d,e,f)
elif order == 1:
# channel
order_number = join_8_8bit264bit(2,1,0,0,0,0,panel_channel['panel'],panel_channel['channel'])
elif order == 2:
# Amplitude
if amplitude < -5:
amplitude = -5
elif amplitude > 5:
amplitude = 5
# a = amplitude/5.0*(2**16)
a = amplitude/10.0*(2**16)
a = np.uint16(a)
b,c = split_number(a, 16)
order_number = join_8_8bit264bit(2,2,0,0,0,0,b,c)
elif order == 3:
# Inhibate
if inhibate:
v = 1
else:
v = 0
order_number = join_8_8bit264bit(2,3,0,0,0,0,0,v)
self.DAC_Xmit_order(order = order_number)
def DAC_lock_in_init(self,
frequency = 0.0,
amplitude = 0.0,
inhibate = True,
panel_channel = {'panel':0, 'channel':0},
):
"""
Initialize lock-in
"""
# Stop lock-in before changing the setup.
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['inhibate'],
frequency = frequency,
amplitude = amplitude,
inhibate = True,
panel_channel = panel_channel,
)
# Set panel and channel
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['channel'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
# Set frequency
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['frequency'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
# Set amplitude
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['amplitude'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
# Start or not
self.lock_in_send_order(order = {'frequency':0, 'channel':1, 'amplitude':2, 'inhibate':3}['inhibate'],
frequency = frequency,
amplitude = amplitude,
inhibate = inhibate,
panel_channel = panel_channel,
)
"""===================
DAC related functions
==================="""
def DAC_set_use_buses(self,
busses_to_use = [False]*8,
delay_between_steps_ms = 2,
):
if True in busses_to_use:
# Buses to use
self.DAC_send_order(order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['busses to use'],
busses_to_use = busses_to_use,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
delay_between_steps_ms = delay_between_steps_ms,
)
# delay between each DAC movement
self.DAC_send_order(order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['delay'],
busses_to_use = busses_to_use,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
delay_between_steps_ms = delay_between_steps_ms,
)
def DAC_send_order(self,
order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['busses to use'],
busses_to_use = [False]*8,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
delay_between_steps_ms = 2,
):
"""
This function is used to send an order to DAC.
Security for DAC go to value will be implemented at different location.
"""
if order == 0:
# Start movement
order_number = join_8_8bit264bit(1,2,0,0,0,0,0,0)
elif order == 1:
# buses to use
bus = 0
for i, b in enumerate(busses_to_use):
if b:
bus += 2**i
order_number = join_8_8bit264bit(1,1,0,0,0,0,0,bus)
elif order == 2:
# delay
order_number = join_8_8bit264bit(1,3,0,0,0,0,0,delay_between_steps_ms)
elif order == 3:
# value
value = np.int16(DAC_goto_value/5.0*32768) + 32768
a,b = split_number(value, size=16)
order_number = join_8_8bit264bit(1,4,0,0,panel_channel['panel'],panel_channel['channel'],a,b)
elif order == 4:
# stop
order_number = join_8_8bit264bit(1,5,0,0,0,0,0,0)
self.DAC_Xmit_order(order=order_number)
def DAC_Xmit_order(self,
order=0):
"""
Main program to send an order to FPGA.
Arg:
order: uint64
"""
order_in = self.ref.registers['order in']
order_Xmitted = self.ref.registers['order Xmitted']
order_in.write(order)
order_Xmitted.write(True)
i=0
while order_Xmitted.read()==True:
i+=1
def DAC_set_value(self,
panel_channel = {'panel':0, 'channel':0},
DAC_goto_value = 0.0,
):
"""
Set goto value of DAC.
Note:
Meanwhile I do not implement safety check here since for QuCoDeS there is another safety chaeck.
"""
self.DAC_send_order(order = {'start movement':0, 'busses to use':1, 'delay':2, 'value':3, 'stop':4}['value'],
busses_to_use = [False]*8,
panel_channel = panel_channel,
DAC_goto_value = DAC_goto_value,
delay_between_steps_ms = 2,
)
def DAC_wait_end_of_move(self):
"""
Wait until all the DAC movement finishes.
"""
move_bus_ready = self.ref.registers['move bus ready']
i=0
while move_bus_ready.read()==False:
i += 1
def move(self):
self.DAC_start_movement()
self.DAC_wait_end_of_move()
DAC_move=move
def move_all_to(self, value:float=0.0):
"""
Move all DAC values in the used buses to "value".
"""
for i in self._used_buses:
for j in range(8):
self.DAC_set_value(panel_channel={'panel':i, 'channel':j},
DAC_goto_value=value)
self.move()
def DAC_current_values(self,precision=4):
"""
Get current values of DAC
"""
# Get rid of an eventual unfinished retrieving sequence
get_DAC_value = self.ref.registers['get DAC value']
got_DAC_value = self.ref.registers['got DAC value']
got_DAC_value.write(True)
while get_DAC_value.read()==True:
got_DAC_value.write(True)
# Read values
values = np.zeros((8,8),dtype=float)
DAC_to_retrieve = self.ref.registers['DAC to retrieve']
DAC_data = self.ref.registers['DAC data']
for i in range(64):
DAC_to_retrieve.write(i)
got_DAC_value.write(True)
get_DAC_value.write(True)
j=0
while got_DAC_value.read()==True:
j+=1
data = DAC_data.read()
panel_channel, value = split_number(data, size=32)
panel = int(panel_channel)//8
channel = int(panel_channel) % 8
value = (value - 32768)/32768*5.0 # Convert to real unit
values[panel, channel] = value
#print(panel,channel,value)
got_DAC_value.write(True)
return np.round(values,precision)
values = get_DAC_values
"""========================================
Fast sequence related functions
========================================"""
def fastseq_set_orders(self,
order={'stop':0, 'set divider':1, 'unset ramp mode':2, 'unset stop count':3, 'set pulse length':4, 'set ramp':5, 'start':6}['stop'],
divider = 6661,
pulse_length=0,
sample_count = 0,
):
"""
Program to send an order to fast sequence sub-system.
"""
if order == 0:
# stop
order_number = join_8_8bit264bit(5,1,0,0,0,0,0,0)
elif order == 1:
# set divider
order_number = join_numbers(5,7, final_size=16)
order_number = join_numbers(order_number, 0, final_size=32)
order_number = join_numbers(order_number, divider, final_size=64)
elif order == 2:
# unset ramp mode
order_number = join_8_8bit264bit(6,9,0,0,0,0,0,0)
elif order == 3:
# unset stop count
order_number = join_8_8bit264bit(5,6,0,0,0,0,0,0)
elif order == 4:
# set pulse length
order_number = join_numbers(5, 10, final_size=16)
order_number = join_numbers(order_number, 0, final_size=32)
pulse_length = join_numbers(0, pulse_length, final_size=32)
order_number = join_numbers(order_number, pulse_length, final_size=64)
elif order == 5:
# set ramp
order_number = join_numbers(5, 8, final_size=16)
order_number = join_numbers(order_number, 0, final_size=32)
sample_count = join_numbers(0, sample_count, final_size=32)
order_number = join_numbers(order_number, sample_count, final_size=64)
elif order == 6:
# start
order_number = join_8_8bit264bit(5,2,0,0,0,0,0,0)
self.DAC_Xmit_order(order = order_number)
# def fastseq_set_fastChannel(self,
# fast_chan_number=0,
# panel_channel = {'panel':0, 'channel':0},
# is_dummy = False,
# ):
# """
# Allocate DAC panel_channel to fast sequence channels (up to 16 DACs).
# """
# panel = panel_channel['panel']
# if is_dummy:
# # Dummy channel is 255.
# channel = 255
# else:
# channel = panel_channel['channel']
# # Check whether fast_chan_number is out of range or not.
# if fast_chan_number < 0:
# fast_chan_number = 0
# print('fast channel number is out of range and cast to closest available value.')
# elif fast_chan_number > 15:
# fast_chan_number = 15
# print('fast channel number is out of range and cast to closest available value.')
# order_number = join_8_8bit264bit(5,3,0,0,fast_chan_number,0,panel,channel)
# self.DAC_Xmit_order(order = order_number)
def fastseq_set_fastChannel(self,
fast_chan_number=0,
panel_channel = {'panel':0, 'channel':0},
is_dummy = False,
):
"""
Allocate DAC panel_channel to fast sequence channels (up to 32 DACs). # HE 32
"""
panel = panel_channel['panel']
if is_dummy:
# Dummy channel is 255.
channel = 255
else:
channel = panel_channel['channel']
# Check whether fast_chan_number is out of range or not.
if fast_chan_number < 0:
fast_chan_number = 0
print('fast channel number is out of range and cast to closest available value.')
elif fast_chan_number > 31:
fast_chan_number = 31
print('fast channel number is out of range and cast to closest available value.')
order_number = join_8_8bit264bit(5,3,0,0,fast_chan_number,0,panel,channel)
self.DAC_Xmit_order(order = order_number)
def fastseq_set_slot(self,
choice={'DAC':0, 'timing':1, 'triggers':2, 'jump':3}['DAC'],
slot_number=0,
fast_chan_number=0,
DAC_Offset = 0.0,
time_ms = 0.0,
trigger = {'trig1_ramp':False, 'trig2':False, 'trig3':False, 'trig4':False, 'stop':False},
jump2 = 0,
):
"""
Set fast sequence slot
"""
if choice == 0:
#DAC
if fast_chan_number < 0:
fast_chan_number = 0
# elif fast_chan_number > (2**4-1):
# fast_chan_number = (2**4-1)
# val = fast_chan_number + (choice << 4)
elif fast_chan_number > (2**5-1): # HE 32
fast_chan_number = (2**5-1)
val = fast_chan_number + (choice << 4)
print(val) # HE
# order_number = join_numbers(5,4,final_size=16)
order_number = join_numbers(5,4,final_size=16) # HE 32
val = join_numbers(val, 0, final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
# detailed safe check will be performed elsewhere
# here we only check the value is smaller than |5|.
if DAC_Offset < -5.0:
DAC_Offset = -5.0
print('DAC offset input value is not normal. Please check it.')
elif DAC_Offset > 5.0:
DAC_Offset = 5.0
print('DAC offset input value is not normal. Please check it.')
DAC_Offset = DAC_Offset/5.0 * 32768
if slot_number < 0:
slot_number = 0
elif slot_number > (2**16-1):
slot_number = 65535
val = join_numbers(slot_number, DAC_Offset, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
elif choice == 1:
# Timing
val = (choice << 4)
order_number = join_numbers(5,4,final_size=16)
val = join_numbers(val,0,final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
# Convert time to us
time_ms = np.abs(time_ms*1000.0)
if time_ms < 1:
# Force wait time above 1 us.
time_ms = 1.0
val = np.int64(np.floor(np.log2(time_ms))) - 10
if val < 0:
val = 0
time_ms = np.floor(time_ms * (2.0**(-val)))
if time_ms > ((2**11)-1):
# Time(ms) is casted to 11bit in LabVIEW program
# so I will do the same.
time_ms = ((2**11)-1)
val = time_ms + (val << 11)
val = join_numbers(slot_number, val, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
elif choice == 2:
# triggers
val = (choice << 4)
order_number = join_numbers(5,4,final_size=16)
val = join_numbers(val,0,final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
val = 0
if trigger['trig1_ramp']:
val += 2**0
if trigger['trig2']:
val += 2**1
if trigger['trig3']:
val += 2**2
if trigger['trig4']:
val += 2**3
if trigger['stop']:
val += 2**15
val = join_numbers(slot_number, val, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
elif choice == 3:
# jump
val = (choice << 4)
order_number = join_numbers(5,4,final_size=16)
val = join_numbers(val,0,final_size=16)
order_number = join_numbers(order_number, val, final_size=32)
val = join_numbers(slot_number, jump2, final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
self.DAC_Xmit_order(order = order_number)
def fast_seq_set_slots(self,
seq_array: np.ndarray):
"""
This function set slots of fast sequence by the given array.
Args:
seq_array: (2,N) dimensional array
[Limitation for N: 1<= N <= 4096
(0,:) is parameter (0 ~ 15: fast channels, 101: trigger,
102: timing (ms), 103: jump, else: jump to its index)
(1,:) is values. (DAC = value offset,
trigger = bit wise value for each trigger (1~4, stop)
timing = ms to wait, jump = # of slot ot jump)]
"""
# Check array size and cut down if it is too large.
if seq_array.shape[1] > 4096:
seq_array = seq_array[:,0:4096]
N = seq_array.shape[1]
for i in range(N):
tp = int(seq_array[0,i])
value = seq_array[1,i]
# if tp < 16:
if tp < 32:
# DAC shift
dac_move_min = min(self._FS_move_limit[0], self._FS_move_limit[1])
dac_move_max = max(self._FS_move_limit[0], self._FS_move_limit[1])
# Limit check
if value < dac_move_min:
value = dac_move_min
print('Compliance is applied and dac move value is cast to {:f}'.format(dac_move_min))
if value > dac_move_max:
value = dac_move_max
print('Compliance is applied and dac move value is cast to {:f}'.format(dac_move_max))
self.fastseq_set_slot(choice=0,
slot_number=i,
fast_chan_number=tp,
DAC_Offset = value)
elif tp == 101:
# Trigger control
trigger = {'trig1_ramp':False, 'trig2':False, 'trig3':False, 'trig4':False, 'stop':False}
value = int(value)
if not (value & 2**0)==0:
trigger['trig1_ramp']=True
if not (value & 2**1)==0:
trigger['trig2']=True
if not (value & 2**2)==0:
trigger['trig3']=True
if not (value & 2**3)==0:
trigger['trig4']=True
if not (value & 2**4)==0:
trigger['stop']=True
self.fastseq_set_slot(choice=2,
slot_number=i,
trigger = trigger)
elif tp == 102:
# Timing (wait) (ms)
self.fastseq_set_slot(choice=1,
slot_number=i,
time_ms = value)
elif tp == 103:
# Jump to slot ??
self.fastseq_set_slot(choice=3,
slot_number=i,
jump2 = np.uint16(value))
else:
raise ValueError('fast sequence contains undefined type number.')
def DAC_set_stop_sample_count(self,
sample_count=0,
):
order_number = join_numbers(5,5,final_size=16)
order_number = join_numbers(order_number,0,final_size=32)
val = join_numbers(0,sample_count,final_size=32)
order_number = join_numbers(order_number, val, final_size=64)
self.DAC_Xmit_order(order = order_number)
# """ FUNCTIONS TO CONTROL SHORT-CUT REFERENCE TO NEEL_DAC_CHANNEL """
#
# def configure(self, settings = None):
# """
# This function applies a list of settings on various NEEL_DAC_CHANNELS.
#
# settings (list): list of dictionaries for different channels.
# Example:
# settings = [
# { 'channel': [1,0], 'alias': 'right barrier', 'voltage': -0.1, 'range': [-5.0,+0.3], 'label': r'$V_{\rm BR}$'},
# { 'channel': [2,0], 'alias': 'left barrier', 'voltage': -0.2, 'range': [-5.0,+0.3], 'label': r'$V_{\rm BL}$'},
# ...
# ]
# """
# for setting in settings:
# panel = 'p{:d}'.format(setting['channel'][0])
# channel = 'c{:d}'.format(setting['channel'][1])
# self.v[setting['alias']] = self.submodules[panel].submodules[channel].v
# # transform range-attribute for QCoDeS:
# setting['vals'] = vals.Numbers( np.min(setting['range']), np.max(setting['range']) )
# # set voltage:
# self.v[setting['alias']].set(setting['voltage'])
# # set channel attributes:
# for key, item in setting.items():
# try:
# setattr(self.v[setting['alias']], key, item)
# except:
# #print(key,'not found!') # for testing of code
# pass
# def clear_v(self, aliases = None):
if __name__=='__main__':
dac = NEEL_DAC('dac')
#------------------------
# Test DAC movement
#------------------------
# dac.p0.c0.v(-0.0)
# dac.DAC_start_movement()
# dac.DAC_wait_end_of_move()
#
# # Test lock-in
# dac.LI_status(False)
# dac.LI_frequency(20.0)
# dac.LI_amplitude(0.2)
# dac.LI_channel(0)
# dac.LI_status(False)
#------------------------
# Test fast sequence
#------------------------
ramp = True
divider = 6661
sample_count = 403
# Stop fast sequence
dac.FS_status(False)
# Set fast sequence divider
dac.FS_divider(divider)
# set operation mode ('ramp' or 'start')
dac.FS_ramp(ramp)
# Set fast sequence channels
dac.FS_chan_list(list(range(16)))
# Set pulse length
dac.FS_pulse_len(1000)
# Set fast sequence
seq_array = np.zeros((2,sample_count))
seq_array[:,0] = [101,0]
seq_array[1,1:sample_count-1] = np.linspace(0.0, -0.5,num=sample_count-2)
seq_array[:,sample_count-1] = [103, sample_count-1]
dac.FS_slots(seq_array)
# Set sample count
size = seq_array.shape[1]
dac.FS_sample_count(size)
dac.FS_status(True)
# sleep
sleep_time = 4.5e-7*divider*sample_count+5
time.sleep(sleep_time)
dac.FS_status(False)
dac.close()
| StarcoderdataPython |
9721086 | '''
Created on Nov 29, 2011
@author: jpoyau
'''
from service_template import ServiceTemplate
from setup_utils import getInput, PublishDoc, isBoolean, YES, isInt
import pystache, uuid
import json
def install(server, dbname, setupInfo):
custom_opts = {}
active = getInput("Enable Network Node Status?", "T", isBoolean)
custom_opts["active"] = active.lower() in YES
custom_opts["node_endpoint"] = setupInfo["nodeUrl"]
custom_opts["service_id"] = uuid.uuid4().hex
return __NetworkNodeStatusServiceTemplate().install(server, dbname, custom_opts)
class __NetworkNodeStatusServiceTemplate(ServiceTemplate):
def __init__(self):
ServiceTemplate.__init__(self)
self.service_data_template = '''{}'''
def _optsoverride(self):
opts = {
"active": "false",
"service_type": "administrative",
"service_name": "Network Node Status",
"service_version": "0.23.0",
"service_endpoint": "/status",
"service_key": "false",
"service_https": "false",
}
return opts
if __name__ == "__main__":
import couchdb
nodeSetup = {
'couchDBUrl': "http://localhost:5984",
'nodeUrl': "http://test.example.com"
}
def doesNotEndInSlash(input=None):
return input is not None and input[-1] != "/"
def notExample(input=None):
return input is not None and input != nodeSetup["nodeUrl"]
nodeSetup["couchDBUrl"] = getInput("Enter the CouchDB URL:", nodeSetup["couchDBUrl"], doesNotEndInSlash)
nodeSetup["nodeUrl"] = getInput("Enter the public URL of the LR Node", nodeSetup["nodeUrl"], notExample)
server = couchdb.Server(url= nodeSetup['couchDBUrl'])
install(server, "node", nodeSetup)
| StarcoderdataPython |
336633 | <reponame>lezh/ai-platform-samples<filename>notebooks/samples/tensorflow/sentiment_analysis/dataflow/PubSubToBigQueryWithAIPlatform.py
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import numpy as np
import os
import socket
import subprocess
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from googleapiclient import discovery
from googleapiclient.errors import HttpError
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
MODEL_NAME = os.getenv('MODEL_NAME', 'sentiment_classifier')
MODEL_VERSION = 'projects/{}/models/{}'.format(PROJECT_ID, MODEL_NAME) # Required field.
api_client = None
def initialize_api():
"""
:return:
"""
global api_client
if api_client is None:
api_client = discovery.build('ml', 'v1', cache_discovery=True)
def get_sentiment(instances):
"""Calls the Model prediction API on AI Platform to get scores.
Args:
instances: list of strings.
Returns:
[float]: A list of estimated values.
"""
# Init the Platform API
initialize_api()
# Call the model
try:
responses = api_client.projects().predict(body={'instances': instances},
name=MODEL_VERSION,
).execute()
return [response.get('score') if
response.get('score') else -1 for response in
responses['predictions']]
except HttpError as err:
logging.exception(err)
def format_text(text):
"""
:param text:
:return:
"""
if not text:
raise ValueError('Empty text')
return text.encode('utf-8') if isinstance(text,
unicode) else str(
text)
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[format_text(instance.get('text')) for instance in instances if
instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
instance['sentiment'] = scores[i]
return instances
else:
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
return
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-aiplatform-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>'
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/'
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/'
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=9,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=10,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
| StarcoderdataPython |
6567320 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 29 17:07:52 2017
@author: Rigved
"""
#data analysis and wrangling
import pandas as pd
#import numpy as np
#import random as rnd
# visualization
import matplotlib.pyplot as plt
#import plotly.offline as py
#import plotly.graph_objs as go
import seaborn as sns
train_df = pd.read_csv('test2.csv',parse_dates=[[4,5]],error_bad_lines=False)#[['Created on', 'Time']]
train_df.head()
train_df.tail()
train_df.info()
train_df.describe()
train_df.describe(include=['O'])
#train_df[['Net_price', 'Created by']].groupby(['Created by'], as_index=False).mean().sort_values(by='Net_price', ascending=False)
#train_df[['Net price', 'Created by']].groupby(['Created by'], as_index=False).sum().sort_values(by='Net price', ascending=False)
sg = train_df[['Net_price', 'SG']].groupby(['SG'], as_index=False).sum().sort_values(by='Net_price', ascending=False)
sgsup = train_df[['Net_price', 'SGsup']].groupby(['SGsup'], as_index=False).sum().sort_values(by='Net_price', ascending=False)
df = train_df[['Net price', 'State']].groupby(['State'], as_index=False).sum().sort_values(by='Net price', ascending=False)
g = sns.FacetGrid(df, col='State')
g.map(plt.hist, 'Net price')
sns.stripplot(x="State", y="Net price", data=df);
sns.stripplot(x="SG", y="Net price", data=sg);
sns.stripplot(x="SGsup", y="Net price", data=sgsup);
'''py.init_notebook_mode()
Df1 = [go.Scatter(x=train_df.Time, y=train_df.Net_price)]
py.iplot(Df1)'''
df = df.to_string()
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
df = sc_X.fit_transform(df)
df.describe()
df.info()
plt.plot(df.State, df[['Net price']])
plt.show()
df.describe() | StarcoderdataPython |
6627050 | import sys
import re
if len(sys.argv) < 3 :
print "Usage: pyton extractOptimizedCoords.py input.log xyz|gjf"
print "The output file name will be input[_optimized]_out.xyz|gjf"
print "If optimization failed, the coordinates for the lowest energy structure will be used."
exit()
finput = sys.argv[1]
fformat = sys.argv[2]
if fformat not in ["xyz", "gjf"] :
print "The output file format has to be either xyz or gjf"
exit()
def getEnergy(structure) :
for line in structure.split("\n") :
if line.startswith(" SCF Done:") :
arr = line.split("=")
return float(re.split(" +", arr[1].strip())[0])
return 1000.0
infoBlock = ""
optimized = False
optimized_structure = ""
with open(finput, "r") as fin :
isStructure = True
isInfo = True
structures = []
currentStructure = ""
for line in fin :
if line.startswith(" GradGrad") :
if isInfo :
isInfo = False
if currentStructure != "" :
structures.append((getEnergy(currentStructure), currentStructure))
currentStructure = ""
isStructure = not isStructure
elif isInfo :
infoBlock += line
elif isStructure :
currentStructure += line
else :
if line.find("Optimized") != -1 :
optimized = True
if optimized :
optimized_structure = currentStructure
else :
if currentStructure != "" :
structures.append((getEnergy(currentStructure), currentStructure))
structures = sorted(structures, key=lambda item : item[0])
optimized_structure = structures[0][1]
def findInList(dataList, target) :
for i in range(0, len(dataList)) :
if dataList[i].find(target) != -1 :
return i
return -1
def getCoordinates(dataList) :
start = findInList(dataList, "Standard orientation")
dataList = dataList[start + 5 : ]
dataList = dataList[: findInList(dataList, "-----")]
return dataList
def getChargeAndMultiplicity(infoBlock) :
lines = infoBlock.split("\n")
for line in lines :
if line.startswith(" Charge = ") :
arr = re.split(" +", line.strip())
return (int(arr[2]), int(arr[5]))
return (-1, -1)
code = {"1" : "H", "2" : "He", "3" : "Li", "4" : "Be", "5" : "B", \
"6" : "C", "7" : "N", "8" : "O", "9" : "F", "10" : "Ne", \
"11" : "Na" , "12" : "Mg" , "13" : "Al" , "14" : "Si" , "15" : "P", \
"16" : "S" , "17" : "Cl" , "18" : "Ar" , "19" : "K" , "20" : "Ca", \
"21" : "Sc" , "22" : "Ti" , "23" : "V" , "24" : "Cr" , "25" : "Mn", \
"26" : "Fe" , "27" : "Co" , "28" : "Ni" , "29" : "Cu" , "30" : "Zn", \
"31" : "Ga" , "32" : "Ge" , "33" : "As" , "34" : "Se" , "35" : "Br", \
"36" : "Kr" , "37" : "Rb" , "38" : "Sr" , "39" : "Y" , "40" : "Zr", \
"41" : "Nb" , "42" : "Mo" , "43" : "Tc" , "44" : "Ru" , "45" : "Rh", \
"46" : "Pd" , "47" : "Ag" , "48" : "Cd" , "49" : "In" , "50" : "Sn", \
"51" : "Sb" , "52" : "Te" , "53" : "I" , "54" : "Xe" , "55" : "Cs", \
"56" : "Ba" , "57" : "La" , "58" : "Ce" , "59" : "Pr" , "60" : "Nd", \
"61" : "Pm" , "62" : "Sm" , "63" : "Eu" , "64" : "Gd" , "65" : "Tb", \
"66" : "Dy" , "67" : "Ho" , "68" : "Er" , "69" : "Tm" , "70" : "Yb", \
"71" : "Lu" , "72" : "Hf" , "73" : "Ta" , "74" : "W" , "75" : "Re", \
"76" : "Os" , "77" : "Ir" , "78" : "Pt" , "79" : "Au" , "80" : "Hg", \
"81" : "Tl" , "82" : "Pb" , "83" : "Bi" , "84" : "Po" , "85" : "At", \
"86" : "Rn" , "87" : "Fr" , "88" : "Ra" , "89" : "Ac" , "90" : "Th", \
"91" : "Pa" , "92" : "U" , "93" : "Np" , "94" : "Pu" , "95" : "Am", \
"96" : "Cm" , "97" : "Bk" , "98" : "Cf" , "99" : "Es" ,"100" : "Fm", \
"101": "Md" ,"102" : "No" ,"103" : "Lr" ,"104" : "Rf" ,"105" : "Db", \
"106": "Sg" ,"107" : "Bh" ,"108" : "Hs" ,"109" : "Mt" ,"110" : "Ds", \
"111": "Rg" ,"112" : "Uub","113" : "Uut","114" : "Uuq","115" : "Uup", \
"116": "Uuh","117" : "Uus","118" : "Uuo"}
prefix = finput.strip(".log")
foutput= ""
chk = ""
if optimized :
chk = prefix + "_optimized_out.chk"
foutput = prefix + "_optimized_out." + fformat
else :
chk = prefix + "_out.chk"
foutput = prefix + "_out." + fformat
with open(foutput, "w") as fout :
dataList = optimized_structure.split("\n")
atoms = getCoordinates(dataList)
# print format specific headers
if fformat == "xyz" :
fout.write(str(len(atoms)) + "\n\n")
else :
fout.write("%mem=\n%nprocshared=\n%chk=" + chk + "\n# \n\nComplex " + prefix + "\n\n")
charge, multiplicity = getChargeAndMultiplicity(infoBlock)
fout.write(str(charge) + " " + str(multiplicity) + "\n")
for atom in atoms :
arr = atom.split()
symbol = code.get(arr[1], 'X')
fout.write(" %s %16.7f %16.7f %16.7f\n" % (symbol,float(arr[3]),float(arr[4]),float(arr[5])))
if fformat == "gjf" :
fout.write("\n") | StarcoderdataPython |
363177 | <gh_stars>0
#-*-coding:utf-8-*-
import redis
conn = redis.Redis() | StarcoderdataPython |
3281960 | <gh_stars>0
from unittest.mock import patch
import pandas as pd
import pytest
from powersimdata.tests.mock_scenario import MockScenario
from postreise.plot.plot_bar_generation_stack import plot_bar_generation_stack
mock_plant = {
"plant_id": ["A", "B", "C", "D", "E", "F", "G", "H"],
"zone_id": [301, 302, 303, 304, 305, 306, 307, 308],
"Pmax": [100, 75, 150, 30, 50, 300, 200, 80],
"Pmin": [0, 0, 0, 0, 0, 100, 10, 0],
"type": ["solar", "wind", "solar", "wind", "wind", "solar", "solar", "wind"],
"zone_name": [
"Far West",
"North",
"West",
"South",
"North Central",
"South Central",
"Coast",
"East",
],
}
mock_solar = pd.DataFrame(
{
"A": [95, 95, 96, 94],
"C": [140, 135, 136, 144],
"F": [299, 298, 299, 298],
"G": [195, 195, 193, 199],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
mock_wind = pd.DataFrame(
{
"B": [70, 71, 70, 72],
"D": [29, 29, 29, 29],
"E": [40, 39, 38, 41],
"H": [71, 74, 68, 69],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
mock_pg = pd.DataFrame(
{
"A": [80, 75, 72, 81],
"B": [22, 22, 25, 20],
"C": [130, 130, 130, 130],
"D": [25, 26, 27, 28],
"E": [10, 11, 9, 12],
"F": [290, 295, 295, 294],
"G": [190, 190, 191, 190],
"H": [61, 63, 65, 67],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
mock_demand = pd.DataFrame(
{
301: [11, 12, 13, 14],
302: [2, 3, 8, 10],
303: [38, 39, 40, 40],
304: [20, 19, 18, 17],
305: [4, 3, 2, 1],
306: [200, 250, 225, 275],
307: [100, 125, 150, 175],
308: [36, 36, 37, 38],
},
index=pd.date_range(start="2016-01-01", periods=4, freq="H"),
)
grid_attrs = {"plant": mock_plant}
scenario = MockScenario(
grid_attrs, pg=mock_pg, solar=mock_solar, wind=mock_wind, demand=mock_demand
)
scenario.info["name"] = "Best Scenario"
scenario.info["interconnect"] = "Texas"
scenario.info["start_date"] = pd.Timestamp(2016, 1, 1)
scenario.info["end_date"] = pd.Timestamp(2016, 1, 1, 3)
scenario.state.grid.interconnect = ["Texas"]
scenario.state.grid.id2zone = {
k: v for k, v in zip(mock_plant["zone_id"], mock_plant["zone_name"])
}
scenario.state.grid.zone2id = {v: k for k, v in scenario.state.grid.id2zone.items()}
@patch("postreise.plot.plot_bar_generation_stack.Scenario", return_value=scenario)
def test_plot_bar_generation_stack(monkeypatch):
plot_bar_generation_stack(
"Texas", 1000, "wind", area_types="interconnect", plot_show=False
)
plot_bar_generation_stack(
"Texas",
1000,
"wind",
area_types="interconnect",
scenario_names="Worst Scenario",
plot_show=False,
)
plot_bar_generation_stack(
"Far West",
200,
"solar",
area_types="loadzone",
titles={"Far West": "Where?"},
t2c={"solar": "#FFBB45"},
t2hc={"solar_curtailment": "#996100"},
t2l={"solar": "shiny"},
curtailment_split=False,
plot_show=False,
)
def test_plot_bar_generation_stack_argument_type():
with pytest.raises(TypeError) as excinfo:
plot_bar_generation_stack(
"Western",
{823, 824},
["wind", "solar", "coal"],
)
assert "scenario_ids must be a int, str or list" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
plot_bar_generation_stack(
"Western",
[823, 824],
{"wind", "solar", "coal"},
)
assert "resources must be a list or str" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
plot_bar_generation_stack(
["Western", "Eastern"],
[823, 824],
["wind", "solar", "coal"],
titles=["WECC", "EI"],
)
assert "titles must be a dictionary" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
plot_bar_generation_stack(
["Western", "Eastern"],
[823, 824],
["wind", "solar", "coal"],
save=True,
filenames=["WECC", "EI"],
)
assert "filenames must be a dictionary" in str(excinfo.value)
def test_plot_bar_generation_stack_argument_value():
with pytest.raises(ValueError) as excinfo:
plot_bar_generation_stack(
["Western", "Eastern"],
[823, 824],
["wind", "solar", "coal"],
area_types="interconnect",
)
assert "area_types must have same size as areas" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
plot_bar_generation_stack(
["Western", "Eastern"],
[823, 824],
["wind", "solar", "coal"],
scenario_names="USA Basecase",
)
assert "scenario_names must have same size as scenario_ids" in str(excinfo.value)
| StarcoderdataPython |
1893215 | def fib(num):
a, b = 1, 1
while a < num:
print(a, end=" ")
a, b = b, a + b
fib(100)
# a =1
| StarcoderdataPython |
6695081 | #
# ===============LICENSE_START=======================================================
# Acumos
# ===================================================================================
# Copyright (C) 2018 AT&T Intellectual Property. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by AT&T
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END=========================================================
from flask_restplus import fields
from predictormanagerservice.api.namespaces import predictors_namespace as api
create_fields = api.model('CreatePredictor', {
'predictorName': fields.String(required=True, description='Name of the Predictor.', example='PMMLPredictor'),
'predictorType': fields.String(required=True, example='PMML'),
'description': fields.String(required=True, description='Description of current Predictor',
example='This is a test.'),
})
update_fields = api.model('UpdatePredictor', {
'description': fields.String(required=True, description='Description of current Predictor',
example='This is a test.'),
})
| StarcoderdataPython |
12861781 | from django.shortcuts import render
# Create your views here.
def home(request):
return render(request,'core/index.html')
def detail(request):
pass
| StarcoderdataPython |
275043 | <filename>hexa/pipelines/management/commands/environment_sync.py
from logging import getLogger
from django.core.management.base import BaseCommand
from django.db import transaction
from hexa.pipelines.models import Environment
from hexa.plugins.app import ConnectorAppConfig
logger = getLogger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
"--app", dest="filter_app", help="Limit the sync to a single app"
)
def handle(self, *args, filter_app, **options):
syncables = ConnectorAppConfig.get_models_by_capability("sync", filter_app)
for app, models in syncables.items():
for model in models:
if not issubclass(model, Environment):
# ignore sync-able non environment
continue
for instance in model.objects.all():
if not instance.auto_sync:
logger.info(
"sync environment %s:%s skipped", model, instance.id
)
continue
try:
logger.info("sync environment %s:%s", model, instance.id)
with transaction.atomic():
instance.sync()
except Exception:
logger.exception("sync error")
| StarcoderdataPython |
137541 | '''
Psychophysics.py: Script containing functions to measure relevant psychophysical metrics for various tasks.
'''
import matplotlib.pyplot as plt
import numpy as np
import numpy.random as npr
import time
def dRSG_metronomic(RNN, ntrials=1, threshold=0.1, showtrialplots=1, **kwargs):
'''
Function designed for the delayed ready-set-go (dRSG) task. The network should be
trained using the RHY task, with n_ints=1 and cont=1 (one interval, varying continuously).
The function will plot the "metronomic" curve for the task, showing the relationship between the
sampled interval (t_s) and the produced interval (t_p). The function draws values of t_s from the
default range for the RHY function.
Inputs:
RNN: The trained JazNet.
ntrials: Number of trials to use per t_s.
threshold: Error threshold for considering a trial to be a "success"
showtrialplots: Determines if a plot is created showing the network's output for each condition.
Outputs:
ts: Array with ntrials columns, where each row is a different interval, containing sample intervals
tp: Same as above for produced intervals
Also produces a scatter plot showing the "metronomic curve" (ts vs tp)
'''
from JazNets.Tasks import RHY
import inspect
# Initialize network
RNN.initialize_act()
init_trials = 3
print('Initializing',end="")
for init_trial in range(init_trials):
inp = RHY(n_ints=1, cont=1, **kwargs)[0]
RNN.run(inp)
print('.',end="")
print("")
# Set values of the intervals (based on defaults from Tasks.RHY)
argspecs = inspect.getfullargspec(RHY)
int_min = [argspecs.defaults[i] for i in range(len(argspecs))
if argspecs.args[i]=='int_min']
int_max = [argspecs.defaults[i] for i in range(len(argspecs))
if argspecs.args[i]=='int_max']
int_times = np.linspace(int_min,int_max,11)
npatterns = len(int_times)
# Initialize outputs
ts = np.zeros((npatterns,ntrials))
tp = np.zeros((npatterns,ntrials))
dt = RNN.p['dt']
if showtrialplots:
out_fig = plt.figure()
out_ax = out_fig.add_subplot(111)
for pat_idx in range(npatterns): # Iterate over interval patterns
pat = int_times[pat_idx]
if showtrialplots:
inp, targ = RHY(ints=[pat], **kwargs)[0:2]
trig = np.argmax(inp[:,1])
out_ax.plot(inp[trig:], 'g--')
out_ax.plot(1+targ[trig:], 'r--')
else:
print('Interval: %gs' % pat, end="")
for trial in range(ntrials):
s = 0
nopes=0
while not s: # If you aren't successful at training
inp, targ = RHY(ints=[pat], **kwargs)[0:2]
out = RNN.run(inp)[0]
error = np.mean((targ-out)**2)/np.mean((targ)**2)
#print(error) # Use this line to see the error if the network is failing a lot
if error<threshold: # Consider it a success if you are below some threshold
s=1
ts[pat_idx,trial] = (np.argmax(targ[:,1]) - np.argmax(targ[:,0]))*dt
tp[pat_idx,trial] = (np.argmax(out[:,1])-np.argmax(out[:,0]))*dt
if showtrialplots:
trig = np.argmax(inp[:,1])
out_ax.plot(1+out[trig:], 'b', alpha=0.2)
out_ax.set_title(pat)
out_fig.canvas.draw()
elif not trial % (ntrials/50):
print('.',end="")
else:
print(',',end="")
nopes += 1
if nopes>100:
raise RuntimeError('Cannot get a successful run! (error too high)')
if not showtrialplots:
print("")
# Make metronomic curve
plt.figure()
plt.scatter(ts.flatten(), tp.flatten(), alpha=0.1, marker='.')
plt.plot(ts[:,0],ts[:,0],'--')
plt.title('Metronomic curve')
plt.xlabel('$t_s$')
plt.ylabel('$t_p$')
plt.show()
return ts, tp | StarcoderdataPython |
11247688 | # note
import os
import json
the_notes = []
#get the list of notes
def get_notes():
global the_notes
if os.path.exists("notes.json"):
with open("notes.json","r") as f:
the_notes = json.load(f)
return the_notes
# add a note to the list of notes
def add_note(note):
global the_notes
if os.path.exists("notes.json"):
with open("notes.json","r") as f:
the_notes = json.load(f)
print("the initial notes")
print("the notes")
the_notes.append(note)
print("the notes")
print(the_notes)
with open("notes.json","w") as f:
json.dump(the_notes, f)
| StarcoderdataPython |
375430 |
for v in graph.getVertices():
print(v.value.rank) | StarcoderdataPython |
11299924 | <filename>maha/constants/general.py<gh_stars>1-10
from typing import List
from .arabic import ARABIC_NUMBERS, ARABIC_PUNCTUATIONS
from .english import ENGLISH_NUMBERS, ENGLISH_PUNCTUATIONS
SPACE: str = " "
""" Space character """
EMPTY: str = ""
""" Empty character """
PUNCTUATIONS: List[str] = ARABIC_PUNCTUATIONS + ENGLISH_PUNCTUATIONS
""" Arabic and English punctuations """
NUMBERS: List[str] = ARABIC_NUMBERS + ENGLISH_NUMBERS
""" Arabic and English numbers """
| StarcoderdataPython |
3556028 | #
# Copyright (c) 2019-2020 Google LLC. All Rights Reserved.
# Copyright (c) 2016-2018 Nest Labs Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Description:
# This file effects a Weave Data Language (WDL) test for the
# validator that validates and enforces number constraints.
#
"""Validator for number constraints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from gwv import schema
from gwv import validator
from nwv.validators import number_validator
class NumberValidatorTests(validator.ValidatorTestCase):
"""Validator for number constraints."""
def test_bad_precision(self):
trait = self.get_test_trait()
field = schema.Field('some_float', 1000, '')
field.data_type = schema.Field.DataType.FLOAT
field.precision = 0.00
field.max_value = 20.0
field.min_value = -10.0
field.fixed_width = 32
trait.state_list.append(field)
self.assert_invalid(number_validator.NumberValidator,
'Zero or negative precision')
def test_bad_min_max(self):
trait = self.get_test_trait()
field = schema.Field('some_float', 1000, '')
field.data_type = schema.Field.DataType.FLOAT
field.precision = 0.01
field.max_value = 20.0
field.min_value = 20.0
field.fixed_width = 32
trait.state_list.append(field)
self.assert_invalid(number_validator.NumberValidator,
r'Max < min \+ precision')
def test_too_big(self):
trait = self.get_test_trait()
field = schema.Field('some_float', 1000, '')
field.data_type = schema.Field.DataType.FLOAT
field.precision = 0.0000000001
field.max_value = 2**32
field.min_value = -2**32
field.fixed_width = 32
trait.state_list.append(field)
self.assert_invalid(number_validator.NumberValidator,
'fixed_encoding_width > 64')
def test_bad_fixed_width(self):
trait = self.get_test_trait()
field = schema.Field('some_float', 1000, '')
field.data_type = schema.Field.DataType.FLOAT
field.precision = 0.01
field.max_value = 20.0
field.min_value = -10.0
field.fixed_width = 128
trait.state_list.append(field)
self.assert_invalid(number_validator.NumberValidator, 'Fixed width must be')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8016736 | def is_ajax(request):
"""Check if the request is ajax or not"""
return any(
[
request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest',
request.META.get('HTTP_ACCEPT') == 'application/json'
]
)
| StarcoderdataPython |
6676339 | list1 = [34, 46, 58, 56, 22, 62, 34, 97, 57]
print(list1[0:3])
print(list1[3:6])
print(list1[6:9])
print(list1[-2:])
print(list1[7:])
print(list1[:-2])
for index in list1:
print(index)
print('----')
# for문 내에서 index 값을 변경하여도 원래 지정한 range 범위 순서를 벗어나지 않음을 보장함
for index in range(0, len(list1)):
print(f'index = {index} / list1[index] = {list1[index]}')
index = index + 3
print(f'changed index = {index}')
del(list1[0])
print(f'list1 = {list1}') | StarcoderdataPython |
9680664 | """
Django settings for reminders project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from os.path import dirname as up
import dotenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = up(up(up(os.path.abspath(__file__))))
dotenv.read_dotenv(os.path.join(BASE_DIR, '.env'))
# BASE_URL = os.environ.get('BASE_URL')
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cases',
'remind',
'api',
'material',
'django_celery_beat',
'django_celery_results',
'users.apps.UsersConfig',
'guardian',
'corsheaders',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'reminders.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'reminders.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
)
# Custom User model
AUTH_USER_MODEL = 'users.CustomUser'
LOGIN_URL = 'home'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# Email settings
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
# Emails for roles
ADMINISTRATION_EMAIL = os.environ.get('ADMINISTRATION_EMAIL')
SUPPORT_EMAIL = os.environ.get('SUPPORT_EMAIL')
SYSADMIN_EMAIL = os.environ.get('SYSADMIN_EMAIL')
| StarcoderdataPython |
4989634 | <filename>data_augmentation/setup.py
from setuptools import find_packages, setup
setup(
name="EDA",
version="0.0.1",
description="Understanding the data augmentation using Emmnetal.",
install_requires=[
"emmental>=0.0.5,<0.1.0",
"pillow>=8.3.2,<9.0.0",
"torchvision>=0.4.2,<0.5.0",
],
scripts=["bin/image"],
packages=find_packages(),
)
| StarcoderdataPython |
4826954 | # Copyright 2020 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from .toolchain_profiler import ToolchainProfiler
import json
import logging
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
from subprocess import PIPE
from . import diagnostics
from . import response_file
from . import shared
from . import webassembly
from . import config
from . import utils
from .shared import CLANG_CC, CLANG_CXX, PYTHON
from .shared import LLVM_NM, EMCC, EMAR, EMXX, EMRANLIB, WASM_LD, LLVM_AR
from .shared import LLVM_LINK, LLVM_OBJCOPY
from .shared import try_delete, run_process, check_call, exit_with_error
from .shared import configuration, path_from_root
from .shared import asmjs_mangle, DEBUG
from .shared import TEMP_DIR
from .shared import CANONICAL_TEMP_DIR, LLVM_DWARFDUMP, demangle_c_symbol_name
from .shared import get_emscripten_temp_dir, exe_suffix, is_c_symbol
from .utils import WINDOWS
from .settings import settings
logger = logging.getLogger('building')
# Building
binaryen_checked = False
EXPECTED_BINARYEN_VERSION = 101
# cache results of nm - it can be slow to run
nm_cache = {}
# Stores the object files contained in different archive files passed as input
ar_contents = {}
_is_ar_cache = {}
# the exports the user requested
user_requested_exports = set()
class ObjectFileInfo:
def __init__(self, returncode, output, defs=set(), undefs=set(), commons=set()):
self.returncode = returncode
self.output = output
self.defs = defs
self.undefs = undefs
self.commons = commons
def is_valid_for_nm(self):
return self.returncode == 0
# llvm-ar appears to just use basenames inside archives. as a result, files
# with the same basename will trample each other when we extract them. to help
# warn of such situations, we warn if there are duplicate entries in the
# archive
def warn_if_duplicate_entries(archive_contents, archive_filename):
if len(archive_contents) != len(set(archive_contents)):
msg = '%s: archive file contains duplicate entries. This is not supported by emscripten. Only the last member with a given name will be linked in which can result in undefined symbols. You should either rename your source files, or use `emar` to create you archives which works around this issue.' % archive_filename
warned = set()
for i in range(len(archive_contents)):
curr = archive_contents[i]
if curr not in warned and curr in archive_contents[i + 1:]:
msg += '\n duplicate: %s' % curr
warned.add(curr)
diagnostics.warning('emcc', msg)
# Extracts the given list of archive files and outputs their contents
def extract_archive_contents(archive_files):
archive_results = shared.run_multiple_processes([[LLVM_AR, 't', a] for a in archive_files], pipe_stdout=True)
unpack_temp_dir = tempfile.mkdtemp('_archive_contents', 'emscripten_temp_')
def clean_at_exit():
try_delete(unpack_temp_dir)
shared.atexit.register(clean_at_exit)
archive_contents = []
for i in range(len(archive_results)):
a = archive_results[i]
contents = [l for l in a.splitlines() if len(l)]
if len(contents) == 0:
logger.debug('Archive %s appears to be empty (recommendation: link an .so instead of .a)' % a)
# `ar` files can only contains filenames. Just to be sure, verify that each
# file has only as filename component and is not absolute
for f in contents:
assert not os.path.dirname(f)
assert not os.path.isabs(f)
warn_if_duplicate_entries(contents, a)
archive_contents += [{
'archive_name': archive_files[i],
'o_files': [os.path.join(unpack_temp_dir, c) for c in contents]
}]
shared.run_multiple_processes([[LLVM_AR, 'xo', a] for a in archive_files], cwd=unpack_temp_dir)
# check that all files were created
for a in archive_contents:
missing_contents = [x for x in a['o_files'] if not os.path.exists(x)]
if missing_contents:
exit_with_error(f'llvm-ar failed to extract file(s) {missing_contents} from archive file {f}!')
return archive_contents
def unique_ordered(values):
"""return a list of unique values in an input list, without changing order
(list(set(.)) would change order randomly).
"""
seen = set()
def check(value):
if value in seen:
return False
seen.add(value)
return True
return [v for v in values if check(v)]
# clear caches. this is not normally needed, except if the clang/LLVM
# used changes inside this invocation of Building, which can happen in the benchmarker
# when it compares different builds.
def clear():
nm_cache.clear()
ar_contents.clear()
_is_ar_cache.clear()
# .. but for Popen, we cannot have doublequotes, so provide functionality to
# remove them when needed.
def remove_quotes(arg):
if isinstance(arg, list):
return [remove_quotes(a) for a in arg]
if arg.startswith('"') and arg.endswith('"'):
return arg[1:-1].replace('\\"', '"')
elif arg.startswith("'") and arg.endswith("'"):
return arg[1:-1].replace("\\'", "'")
else:
return arg
def get_building_env():
env = os.environ.copy()
# point CC etc. to the em* tools.
env['CC'] = EMCC
env['CXX'] = EMXX
env['AR'] = EMAR
env['LD'] = EMCC
env['NM'] = LLVM_NM
env['LDSHARED'] = EMCC
env['RANLIB'] = EMRANLIB
env['EMSCRIPTEN_TOOLS'] = path_from_root('tools')
env['HOST_CC'] = CLANG_CC
env['HOST_CXX'] = CLANG_CXX
env['HOST_CFLAGS'] = '-W' # if set to nothing, CFLAGS is used, which we don't want
env['HOST_CXXFLAGS'] = '-W' # if set to nothing, CXXFLAGS is used, which we don't want
env['PKG_CONFIG_LIBDIR'] = shared.Cache.get_sysroot_dir('local', 'lib', 'pkgconfig') + os.path.pathsep + shared.Cache.get_sysroot_dir('lib', 'pkgconfig')
env['PKG_CONFIG_PATH'] = os.environ.get('EM_PKG_CONFIG_PATH', '')
env['EMSCRIPTEN'] = path_from_root()
env['PATH'] = shared.Cache.get_sysroot_dir('bin') + os.pathsep + env['PATH']
env['CROSS_COMPILE'] = path_from_root('em') # produces /path/to/emscripten/em , which then can have 'cc', 'ar', etc appended to it
return env
# Returns a clone of the given environment with all directories that contain
# sh.exe removed from the PATH. Used to work around CMake limitation with
# MinGW Makefiles, where sh.exe is not allowed to be present.
def remove_sh_exe_from_path(env):
# Should only ever be called on WINDOWS
assert WINDOWS
env = env.copy()
path = env['PATH'].split(';')
path = [p for p in path if not os.path.exists(os.path.join(p, 'sh.exe'))]
env['PATH'] = ';'.join(path)
return env
def make_paths_absolute(f):
if f.startswith('-'): # skip flags
return f
else:
return os.path.abspath(f)
# Runs llvm-nm for the given list of files.
# The results are populated in nm_cache
@ToolchainProfiler.profile_block('llvm_nm_multiple')
def llvm_nm_multiple(files):
if len(files) == 0:
return []
# Run llvm-nm on files that we haven't cached yet
llvm_nm_files = [f for f in files if f not in nm_cache]
# We can issue multiple files in a single llvm-nm calls, but only if those
# files are all .o or .bc files. Because of llvm-nm output format, we cannot
# llvm-nm multiple .a files in one call, but those must be individually checked.
a_files = [f for f in llvm_nm_files if is_ar(f)]
o_files = [f for f in llvm_nm_files if f not in a_files]
# Issue parallel calls for .a files
if len(a_files) > 0:
results = shared.run_multiple_processes([[LLVM_NM, a] for a in a_files], pipe_stdout=True, check=False)
for i in range(len(results)):
nm_cache[a_files[i]] = parse_symbols(results[i])
# Issue a single batch call for multiple .o files
if len(o_files) > 0:
cmd = [LLVM_NM] + o_files
cmd = get_command_with_possible_response_file(cmd)
results = run_process(cmd, stdout=PIPE, stderr=PIPE, check=False)
# If one or more of the input files cannot be processed, llvm-nm will return a non-zero error code, but it will still process and print
# out all the other files in order. So even if process return code is non zero, we should always look at what we got to stdout.
if results.returncode != 0:
logger.debug(f'Subcommand {" ".join(cmd)} failed with return code {results.returncode}! (An input file was corrupt?)')
results = results.stdout
# llvm-nm produces a single listing of form
# file1.o:
# 00000001 T __original_main
# U __stack_pointer
#
# file2.o:
# 0000005d T main
# U printf
#
# ...
# so loop over the report to extract the results
# for each individual file.
filename = o_files[0]
# When we dispatched more than one file, we must manually parse
# the file result delimiters (like shown structured above)
if len(o_files) > 1:
file_start = 0
i = 0
while True:
nl = results.find('\n', i)
if nl < 0:
break
colon = results.rfind(':', i, nl)
if colon >= 0 and results[colon + 1] == '\n': # New file start?
nm_cache[filename] = parse_symbols(results[file_start:i - 1])
filename = results[i:colon].strip()
file_start = colon + 2
i = nl + 1
nm_cache[filename] = parse_symbols(results[file_start:])
else:
# We only dispatched a single file, so can parse all of the result directly
# to that file.
nm_cache[filename] = parse_symbols(results)
return [nm_cache[f] if f in nm_cache else ObjectFileInfo(1, '') for f in files]
def llvm_nm(file):
return llvm_nm_multiple([file])[0]
@ToolchainProfiler.profile_block('read_link_inputs')
def read_link_inputs(files):
# Before performing the link, we need to look at each input file to determine which symbols
# each of them provides. Do this in multiple parallel processes.
archive_names = [] # .a files passed in to the command line to the link
object_names = [] # .o/.bc files passed in to the command line to the link
for f in files:
absolute_path_f = make_paths_absolute(f)
if absolute_path_f not in ar_contents and is_ar(absolute_path_f):
archive_names.append(absolute_path_f)
elif absolute_path_f not in nm_cache and is_bitcode(absolute_path_f):
object_names.append(absolute_path_f)
# Archives contain objects, so process all archives first in parallel to obtain the object files in them.
archive_contents = extract_archive_contents(archive_names)
for a in archive_contents:
ar_contents[os.path.abspath(a['archive_name'])] = a['o_files']
for o in a['o_files']:
if o not in nm_cache:
object_names.append(o)
# Next, extract symbols from all object files (either standalone or inside archives we just extracted)
# The results are not used here directly, but populated to llvm-nm cache structure.
llvm_nm_multiple(object_names)
def llvm_backend_args():
# disable slow and relatively unimportant optimization passes
args = ['-combiner-global-alias-analysis=false']
# asm.js-style exception handling
if not settings.DISABLE_EXCEPTION_CATCHING:
args += ['-enable-emscripten-cxx-exceptions']
if settings.EXCEPTION_CATCHING_ALLOWED:
# When 'main' has a non-standard signature, LLVM outlines its content out to
# '__original_main'. So we add it to the allowed list as well.
if 'main' in settings.EXCEPTION_CATCHING_ALLOWED:
settings.EXCEPTION_CATCHING_ALLOWED += ['__original_main']
allowed = ','.join(settings.EXCEPTION_CATCHING_ALLOWED)
args += ['-emscripten-cxx-exceptions-allowed=' + allowed]
if settings.SUPPORT_LONGJMP:
# asm.js-style setjmp/longjmp handling
args += ['-enable-emscripten-sjlj']
# better (smaller, sometimes faster) codegen, see binaryen#1054
# and https://bugs.llvm.org/show_bug.cgi?id=39488
args += ['-disable-lsr']
return args
@ToolchainProfiler.profile_block('linking to object file')
def link_to_object(args, target):
# link using lld unless LTO is requested (lld can't output LTO/bitcode object files).
if not settings.LTO:
link_lld(args + ['--relocatable'], target)
else:
link_bitcode(args, target)
def link_llvm(linker_inputs, target):
# runs llvm-link to link things.
cmd = [LLVM_LINK] + linker_inputs + ['-o', target]
cmd = get_command_with_possible_response_file(cmd)
check_call(cmd)
def lld_flags_for_executable(external_symbols):
cmd = []
if external_symbols:
undefs = configuration.get_temp_files().get('.undefined').name
utils.write_file(undefs, '\n'.join(external_symbols))
cmd.append('--allow-undefined-file=%s' % undefs)
else:
cmd.append('--import-undefined')
if settings.IMPORTED_MEMORY:
cmd.append('--import-memory')
if settings.USE_PTHREADS:
cmd.append('--shared-memory')
if settings.MEMORY64:
cmd.append('-mwasm64')
# wasm-ld can strip debug info for us. this strips both the Names
# section and DWARF, so we can only use it when we don't need any of
# those things.
if settings.DEBUG_LEVEL < 2 and (not settings.EMIT_SYMBOL_MAP and
not settings.PROFILING_FUNCS and
not settings.ASYNCIFY):
cmd.append('--strip-debug')
if settings.LINKABLE:
cmd.append('--export-all')
cmd.append('--no-gc-sections')
else:
c_exports = [e for e in settings.EXPORTED_FUNCTIONS if is_c_symbol(e)]
# Strip the leading underscores
c_exports = [demangle_c_symbol_name(e) for e in c_exports]
if external_symbols:
# Filter out symbols external/JS symbols
c_exports = [e for e in c_exports if e not in external_symbols]
for export in c_exports:
cmd.append('--export-if-defined=' + export)
for export in settings.EXPORT_IF_DEFINED:
cmd.append('--export-if-defined=' + export)
if settings.RELOCATABLE:
cmd.append('--experimental-pic')
if settings.SIDE_MODULE:
cmd.append('-shared')
else:
cmd.append('-pie')
if not settings.LINKABLE:
cmd.append('--no-export-dynamic')
else:
cmd.append('--export-table')
if settings.ALLOW_TABLE_GROWTH:
cmd.append('--growable-table')
if not settings.SIDE_MODULE:
# Export these two section start symbols so that we can extact the string
# data that they contain.
cmd += [
'-z', 'stack-size=%s' % settings.TOTAL_STACK,
'--initial-memory=%d' % settings.INITIAL_MEMORY,
]
if settings.STANDALONE_WASM:
# when settings.EXPECT_MAIN is set we fall back to wasm-ld default of _start
if not settings.EXPECT_MAIN:
cmd += ['--entry=_initialize']
else:
if settings.EXPECT_MAIN and not settings.IGNORE_MISSING_MAIN:
cmd += ['--entry=main']
else:
cmd += ['--no-entry']
if not settings.ALLOW_MEMORY_GROWTH:
cmd.append('--max-memory=%d' % settings.INITIAL_MEMORY)
elif settings.MAXIMUM_MEMORY != -1:
cmd.append('--max-memory=%d' % settings.MAXIMUM_MEMORY)
if not settings.RELOCATABLE:
cmd.append('--global-base=%s' % settings.GLOBAL_BASE)
return cmd
def link_lld(args, target, external_symbols=None):
if not os.path.exists(WASM_LD):
exit_with_error('linker binary not found in LLVM directory: %s', WASM_LD)
# runs lld to link things.
# lld doesn't currently support --start-group/--end-group since the
# semantics are more like the windows linker where there is no need for
# grouping.
args = [a for a in args if a not in ('--start-group', '--end-group')]
# Emscripten currently expects linkable output (SIDE_MODULE/MAIN_MODULE) to
# include all archive contents.
if settings.LINKABLE:
args.insert(0, '--whole-archive')
args.append('--no-whole-archive')
if settings.STRICT:
args.append('--fatal-warnings')
cmd = [WASM_LD, '-o', target] + args
for a in llvm_backend_args():
cmd += ['-mllvm', a]
# Wasm exception handling. This is a CodeGen option for the LLVM backend, so
# wasm-ld needs to take this for the LTO mode.
if settings.EXCEPTION_HANDLING:
cmd += ['-mllvm', '-exception-model=wasm', '-mllvm', '-wasm-enable-eh']
# For relocatable output (generating an object file) we don't pass any of the
# normal linker flags that are used when building and exectuable
if '--relocatable' not in args and '-r' not in args:
cmd += lld_flags_for_executable(external_symbols)
cmd = get_command_with_possible_response_file(cmd)
check_call(cmd)
def link_bitcode(args, target, force_archive_contents=False):
# "Full-featured" linking: looks into archives (duplicates lld functionality)
input_files = [a for a in args if not a.startswith('-')]
files_to_link = []
# Tracking unresolveds is necessary for .a linking, see below.
# Specify all possible entry points to seed the linking process.
# For a simple application, this would just be "main".
unresolved_symbols = set([func[1:] for func in settings.EXPORTED_FUNCTIONS])
resolved_symbols = set()
# Paths of already included object files from archives.
added_contents = set()
has_ar = any(is_ar(make_paths_absolute(f)) for f in input_files)
# If we have only one archive or the force_archive_contents flag is set,
# then we will add every object file we see, regardless of whether it
# resolves any undefined symbols.
force_add_all = len(input_files) == 1 or force_archive_contents
# Considers an object file for inclusion in the link. The object is included
# if force_add=True or if the object provides a currently undefined symbol.
# If the object is included, the symbol tables are updated and the function
# returns True.
def consider_object(f, force_add=False):
new_symbols = llvm_nm(f)
# Check if the object was valid according to llvm-nm. It also accepts
# native object files.
if not new_symbols.is_valid_for_nm():
diagnostics.warning('emcc', 'object %s is not valid according to llvm-nm, cannot link', f)
return False
# Check the object is valid for us, and not a native object file.
if not is_bitcode(f):
exit_with_error('unknown file type: %s', f)
provided = new_symbols.defs.union(new_symbols.commons)
do_add = force_add or not unresolved_symbols.isdisjoint(provided)
if do_add:
logger.debug('adding object %s to link (forced: %d)' % (f, force_add))
# Update resolved_symbols table with newly resolved symbols
resolved_symbols.update(provided)
# Update unresolved_symbols table by adding newly unresolved symbols and
# removing newly resolved symbols.
unresolved_symbols.update(new_symbols.undefs.difference(resolved_symbols))
unresolved_symbols.difference_update(provided)
files_to_link.append(f)
return do_add
# Traverse a single archive. The object files are repeatedly scanned for
# newly satisfied symbols until no new symbols are found. Returns true if
# any object files were added to the link.
def consider_archive(f, force_add):
added_any_objects = False
loop_again = True
logger.debug('considering archive %s' % (f))
contents = ar_contents[f]
while loop_again: # repeatedly traverse until we have everything we need
loop_again = False
for content in contents:
if content in added_contents:
continue
# Link in the .o if it provides symbols, *or* this is a singleton archive (which is
# apparently an exception in gcc ld)
if consider_object(content, force_add=force_add):
added_contents.add(content)
loop_again = True
added_any_objects = True
logger.debug('done running loop of archive %s' % (f))
return added_any_objects
read_link_inputs(input_files)
# Rescan a group of archives until we don't find any more objects to link.
def scan_archive_group(group):
loop_again = True
logger.debug('starting archive group loop')
while loop_again:
loop_again = False
for archive in group:
if consider_archive(archive, force_add=False):
loop_again = True
logger.debug('done with archive group loop')
current_archive_group = None
in_whole_archive = False
for a in args:
if a.startswith('-'):
if a in ['--start-group', '-(']:
assert current_archive_group is None, 'Nested --start-group, missing --end-group?'
current_archive_group = []
elif a in ['--end-group', '-)']:
assert current_archive_group is not None, '--end-group without --start-group'
scan_archive_group(current_archive_group)
current_archive_group = None
elif a in ['--whole-archive', '-whole-archive']:
in_whole_archive = True
elif a in ['--no-whole-archive', '-no-whole-archive']:
in_whole_archive = False
else:
# Command line flags should already be vetted by the time this method
# is called, so this is an internal error
exit_with_error('unsupported link flag: %s', a)
else:
lib_path = make_paths_absolute(a)
if is_ar(lib_path):
# Extract object files from ar archives, and link according to gnu ld semantics
# (link in an entire .o from the archive if it supplies symbols still unresolved)
consider_archive(lib_path, in_whole_archive or force_add_all)
# If we're inside a --start-group/--end-group section, add to the list
# so we can loop back around later.
if current_archive_group is not None:
current_archive_group.append(lib_path)
elif is_bitcode(lib_path):
if has_ar:
consider_object(a, force_add=True)
else:
# If there are no archives then we can simply link all valid object
# files and skip the symbol table stuff.
files_to_link.append(a)
else:
exit_with_error('unknown file type: %s', a)
# We have to consider the possibility that --start-group was used without a matching
# --end-group; GNU ld permits this behavior and implicitly treats the end of the
# command line as having an --end-group.
if current_archive_group:
logger.debug('--start-group without matching --end-group, rescanning')
scan_archive_group(current_archive_group)
current_archive_group = None
try_delete(target)
# Finish link
# tolerate people trying to link a.so a.so etc.
files_to_link = unique_ordered(files_to_link)
logger.debug('emcc: linking: %s to %s', files_to_link, target)
link_llvm(files_to_link, target)
def get_command_with_possible_response_file(cmd):
# 8k is a bit of an arbitrary limit, but a reasonable one
# for max command line size before we use a response file
if len(' '.join(cmd)) <= 8192:
return cmd
logger.debug('using response file for %s' % cmd[0])
filename = response_file.create_response_file(cmd[1:], TEMP_DIR)
new_cmd = [cmd[0], "@" + filename]
return new_cmd
def parse_symbols(output):
defs = []
undefs = []
commons = []
for line in output.split('\n'):
if not line or line[0] == '#':
continue
# e.g. filename.o: , saying which file it's from
if ':' in line:
continue
parts = [seg for seg in line.split(' ') if len(seg)]
# pnacl-nm will print zero offsets for bitcode, and newer llvm-nm will print present symbols
# as -------- T name
if len(parts) == 3 and parts[0] == "--------" or re.match(r'^[\da-f]{8}$', parts[0]):
parts.pop(0)
if len(parts) == 2:
# ignore lines with absolute offsets, these are not bitcode anyhow
# e.g. |00000630 t d_source_name|
status, symbol = parts
if status == 'U':
undefs.append(symbol)
elif status == 'C':
commons.append(symbol)
elif status == status.upper():
# FIXME: using WTD in the previous line fails due to llvm-nm behavior on macOS,
# so for now we assume all uppercase are normally defined external symbols
defs.append(symbol)
return ObjectFileInfo(0, None, set(defs), set(undefs), set(commons))
def emar(action, output_filename, filenames, stdout=None, stderr=None, env=None):
try_delete(output_filename)
response_filename = response_file.create_response_file(filenames, TEMP_DIR)
cmd = [EMAR, action, output_filename] + ['@' + response_filename]
try:
run_process(cmd, stdout=stdout, stderr=stderr, env=env)
finally:
try_delete(response_filename)
if 'c' in action:
assert os.path.exists(output_filename), 'emar could not create output file: ' + output_filename
def opt_level_to_str(opt_level, shrink_level=0):
# convert opt_level/shrink_level pair to a string argument like -O1
if opt_level == 0:
return '-O0'
if shrink_level == 1:
return '-Os'
elif shrink_level >= 2:
return '-Oz'
else:
return f'-O{min(opt_level, 3)}'
def js_optimizer(filename, passes):
from . import js_optimizer
try:
return js_optimizer.run(filename, passes)
except subprocess.CalledProcessError as e:
exit_with_error("'%s' failed (%d)", ' '.join(e.cmd), e.returncode)
# run JS optimizer on some JS, ignoring asm.js contents if any - just run on it all
def acorn_optimizer(filename, passes, extra_info=None, return_output=False):
optimizer = path_from_root('tools/acorn-optimizer.js')
original_filename = filename
if extra_info is not None:
temp_files = configuration.get_temp_files()
temp = temp_files.get('.js').name
shutil.copyfile(filename, temp)
with open(temp, 'a') as f:
f.write('// EXTRA_INFO: ' + extra_info)
filename = temp
cmd = config.NODE_JS + [optimizer, filename] + passes
# Keep JS code comments intact through the acorn optimization pass so that JSDoc comments
# will be carried over to a later Closure run.
if settings.USE_CLOSURE_COMPILER:
cmd += ['--closureFriendly']
if settings.VERBOSE:
cmd += ['verbose']
if not return_output:
next = original_filename + '.jso.js'
configuration.get_temp_files().note(next)
check_call(cmd, stdout=open(next, 'w'))
save_intermediate(next, '%s.js' % passes[0])
return next
output = check_call(cmd, stdout=PIPE).stdout
return output
# evals ctors. if binaryen_bin is provided, it is the dir of the binaryen tool
# for this, and we are in wasm mode
def eval_ctors(js_file, binary_file, debug_info=False): # noqa
logger.debug('Ctor evalling in the wasm backend is disabled due to https://github.com/emscripten-core/emscripten/issues/9527')
return
# TODO re-enable
# cmd = [PYTHON, path_from_root('tools/ctor_evaller.py'), js_file, binary_file, str(settings.INITIAL_MEMORY), str(settings.TOTAL_STACK), str(settings.GLOBAL_BASE), binaryen_bin, str(int(debug_info))]
# if binaryen_bin:
# cmd += get_binaryen_feature_flags()
# check_call(cmd)
def get_closure_compiler():
# First check if the user configured a specific CLOSURE_COMPILER in thier settings
if config.CLOSURE_COMPILER:
return shared.CLOSURE_COMPILER
# Otherwise use the one installed vai npm
cmd = shared.get_npm_cmd('google-closure-compiler')
if not WINDOWS:
# Work around an issue that Closure compiler can take up a lot of memory and crash in an error
# "FATAL ERROR: Ineffective mark-compacts near heap limit Allocation failed - JavaScript heap
# out of memory"
cmd.insert(-1, '--max_old_space_size=8192')
return cmd
def check_closure_compiler(cmd, args, env, allowed_to_fail):
try:
output = run_process(cmd + args + ['--version'], stdout=PIPE, env=env).stdout
except Exception as e:
if allowed_to_fail:
return False
logger.warn(str(e))
exit_with_error('closure compiler ("%s --version") did not execute properly!' % str(cmd))
if 'Version:' not in output:
if allowed_to_fail:
return False
exit_with_error('unrecognized closure compiler --version output (%s):\n%s' % (str(cmd), output))
return True
# Remove this once we require python3.7 and can use std.isascii.
# See: https://docs.python.org/3/library/stdtypes.html#str.isascii
def isascii(s):
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
@ToolchainProfiler.profile_block('closure_compiler')
def closure_compiler(filename, pretty, advanced=True, extra_closure_args=None):
env = shared.env_with_node_in_path()
user_args = []
env_args = os.environ.get('EMCC_CLOSURE_ARGS')
if env_args:
user_args += shlex.split(env_args)
if extra_closure_args:
user_args += extra_closure_args
# Closure compiler expects JAVA_HOME to be set *and* java.exe to be in the PATH in order
# to enable use the java backend. Without this it will only try the native and JavaScript
# versions of the compiler.
java_bin = os.path.dirname(config.JAVA)
if java_bin:
def add_to_path(dirname):
env['PATH'] = env['PATH'] + os.pathsep + dirname
add_to_path(java_bin)
java_home = os.path.dirname(java_bin)
env.setdefault('JAVA_HOME', java_home)
closure_cmd = get_closure_compiler()
native_closure_compiler_works = check_closure_compiler(closure_cmd, user_args, env, allowed_to_fail=True)
if not native_closure_compiler_works and not any(a.startswith('--platform') for a in user_args):
# Run with Java Closure compiler as a fallback if the native version does not work
user_args.append('--platform=java')
check_closure_compiler(closure_cmd, user_args, env, allowed_to_fail=False)
# Closure externs file contains known symbols to be extern to the minification, Closure
# should not minify these symbol names.
CLOSURE_EXTERNS = [path_from_root('src/closure-externs/closure-externs.js')]
# Closure compiler needs to know about all exports that come from the wasm module, because to optimize for small code size,
# the exported symbols are added to global scope via a foreach loop in a way that evades Closure's static analysis. With an explicit
# externs file for the exports, Closure is able to reason about the exports.
if settings.WASM_FUNCTION_EXPORTS and not settings.DECLARE_ASM_MODULE_EXPORTS:
# Generate an exports file that records all the exported symbols from the wasm module.
module_exports_suppressions = '\n'.join(['/**\n * @suppress {duplicate, undefinedVars}\n */\nvar %s;\n' % asmjs_mangle(i) for i in settings.WASM_FUNCTION_EXPORTS])
exports_file = configuration.get_temp_files().get('_module_exports.js')
exports_file.write(module_exports_suppressions.encode())
exports_file.close()
CLOSURE_EXTERNS += [exports_file.name]
# Node.js specific externs
if shared.target_environment_may_be('node'):
NODE_EXTERNS_BASE = path_from_root('third_party/closure-compiler/node-externs')
NODE_EXTERNS = os.listdir(NODE_EXTERNS_BASE)
NODE_EXTERNS = [os.path.join(NODE_EXTERNS_BASE, name) for name in NODE_EXTERNS
if name.endswith('.js')]
CLOSURE_EXTERNS += [path_from_root('src/closure-externs/node-externs.js')] + NODE_EXTERNS
# V8/SpiderMonkey shell specific externs
if shared.target_environment_may_be('shell'):
V8_EXTERNS = [path_from_root('src/closure-externs/v8-externs.js')]
SPIDERMONKEY_EXTERNS = [path_from_root('src/closure-externs/spidermonkey-externs.js')]
CLOSURE_EXTERNS += V8_EXTERNS + SPIDERMONKEY_EXTERNS
# Web environment specific externs
if shared.target_environment_may_be('web') or shared.target_environment_may_be('worker'):
BROWSER_EXTERNS_BASE = path_from_root('src/closure-externs/browser-externs')
if os.path.isdir(BROWSER_EXTERNS_BASE):
BROWSER_EXTERNS = os.listdir(BROWSER_EXTERNS_BASE)
BROWSER_EXTERNS = [os.path.join(BROWSER_EXTERNS_BASE, name) for name in BROWSER_EXTERNS
if name.endswith('.js')]
CLOSURE_EXTERNS += BROWSER_EXTERNS
if settings.DYNCALLS:
CLOSURE_EXTERNS += [path_from_root('src/closure-externs/dyncall-externs.js')]
if settings.MINIMAL_RUNTIME and settings.USE_PTHREADS:
CLOSURE_EXTERNS += [path_from_root('src/minimal_runtime_worker_externs.js')]
args = ['--compilation_level', 'ADVANCED_OPTIMIZATIONS' if advanced else 'SIMPLE_OPTIMIZATIONS']
# Keep in sync with ecmaVersion in tools/acorn-optimizer.js
args += ['--language_in', 'ECMASCRIPT_2020']
# Tell closure not to do any transpiling or inject any polyfills.
# At some point we may want to look into using this as way to convert to ES5 but
# babel is perhaps a better tool for that.
args += ['--language_out', 'NO_TRANSPILE']
# Tell closure never to inject the 'use strict' directive.
args += ['--emit_use_strict=false']
# Closure compiler is unable to deal with path names that are not 7-bit ASCII:
# https://github.com/google/closure-compiler/issues/3784
tempfiles = configuration.get_temp_files()
outfile = tempfiles.get('.cc.js').name # Safe 7-bit filename
def move_to_safe_7bit_ascii_filename(filename):
if isascii(filename):
return filename
safe_filename = tempfiles.get('.js').name # Safe 7-bit filename
shutil.copyfile(filename, safe_filename)
return os.path.relpath(safe_filename, tempfiles.tmpdir)
for e in CLOSURE_EXTERNS:
args += ['--externs', move_to_safe_7bit_ascii_filename(e)]
for i in range(len(user_args)):
if user_args[i] == '--externs':
user_args[i + 1] = move_to_safe_7bit_ascii_filename(user_args[i + 1])
# Specify output file relative to the temp directory to avoid specifying non-7-bit-ASCII path names.
args += ['--js_output_file', os.path.relpath(outfile, tempfiles.tmpdir)]
if settings.IGNORE_CLOSURE_COMPILER_ERRORS:
args.append('--jscomp_off=*')
if pretty:
args += ['--formatting', 'PRETTY_PRINT']
# Specify input file relative to the temp directory to avoid specifying non-7-bit-ASCII path names.
args += ['--js', move_to_safe_7bit_ascii_filename(filename)]
cmd = closure_cmd + args + user_args
logger.debug('closure compiler: ' + ' '.join(cmd))
# Closure compiler does not work if any of the input files contain characters outside the
# 7-bit ASCII range. Therefore make sure the command line we pass does not contain any such
# input files by passing all input filenames relative to the cwd. (user temp directory might
# be in user's home directory, and user's profile name might contain unicode characters)
proc = run_process(cmd, stderr=PIPE, check=False, env=env, cwd=tempfiles.tmpdir)
# XXX Closure bug: if Closure is invoked with --create_source_map, Closure should create a
# outfile.map source map file (https://github.com/google/closure-compiler/wiki/Source-Maps)
# But it looks like it creates such files on Linux(?) even without setting that command line
# flag (and currently we don't), so delete the produced source map file to not leak files in
# temp directory.
try_delete(outfile + '.map')
# Print Closure diagnostics result up front.
if proc.returncode != 0:
logger.error('Closure compiler run failed:\n')
elif len(proc.stderr.strip()) > 0:
if settings.CLOSURE_WARNINGS == 'error':
logger.error('Closure compiler completed with warnings and -s CLOSURE_WARNINGS=error enabled, aborting!\n')
elif settings.CLOSURE_WARNINGS == 'warn':
logger.warn('Closure compiler completed with warnings:\n')
# Print input file (long wall of text!)
if DEBUG == 2 and (proc.returncode != 0 or (len(proc.stderr.strip()) > 0 and settings.CLOSURE_WARNINGS != 'quiet')):
input_file = open(filename, 'r').read().splitlines()
for i in range(len(input_file)):
sys.stderr.write(f'{i + 1}: {input_file[i]}\n')
if proc.returncode != 0:
logger.error(proc.stderr) # print list of errors (possibly long wall of text if input was minified)
# Exit and print final hint to get clearer output
msg = 'closure compiler failed (rc: %d): %s' % (proc.returncode, shared.shlex_join(cmd))
if not pretty:
msg += ' the error message may be clearer with -g1 and EMCC_DEBUG=2 set'
exit_with_error(msg)
if len(proc.stderr.strip()) > 0 and settings.CLOSURE_WARNINGS != 'quiet':
# print list of warnings (possibly long wall of text if input was minified)
if settings.CLOSURE_WARNINGS == 'error':
logger.error(proc.stderr)
else:
logger.warn(proc.stderr)
# Exit and/or print final hint to get clearer output
if not pretty:
logger.warn('(rerun with -g1 linker flag for an unminified output)')
elif DEBUG != 2:
logger.warn('(rerun with EMCC_DEBUG=2 enabled to dump Closure input file)')
if settings.CLOSURE_WARNINGS == 'error':
exit_with_error('closure compiler produced warnings and -s CLOSURE_WARNINGS=error enabled')
return outfile
# minify the final wasm+JS combination. this is done after all the JS
# and wasm optimizations; here we do the very final optimizations on them
def minify_wasm_js(js_file, wasm_file, expensive_optimizations, minify_whitespace, debug_info):
# start with JSDCE, to clean up obvious JS garbage. When optimizing for size,
# use AJSDCE (aggressive JS DCE, performs multiple iterations). Clean up
# whitespace if necessary too.
passes = []
if not settings.LINKABLE:
passes.append('JSDCE' if not expensive_optimizations else 'AJSDCE')
if minify_whitespace:
passes.append('minifyWhitespace')
if passes:
logger.debug('running cleanup on shell code: ' + ' '.join(passes))
js_file = acorn_optimizer(js_file, passes)
# if we can optimize this js+wasm combination under the assumption no one else
# will see the internals, do so
if not settings.LINKABLE:
# if we are optimizing for size, shrink the combined wasm+JS
# TODO: support this when a symbol map is used
if expensive_optimizations:
js_file = metadce(js_file, wasm_file, minify_whitespace=minify_whitespace, debug_info=debug_info)
# now that we removed unneeded communication between js and wasm, we can clean up
# the js some more.
passes = ['AJSDCE']
if minify_whitespace:
passes.append('minifyWhitespace')
logger.debug('running post-meta-DCE cleanup on shell code: ' + ' '.join(passes))
js_file = acorn_optimizer(js_file, passes)
if settings.MINIFY_WASM_IMPORTS_AND_EXPORTS:
js_file = minify_wasm_imports_and_exports(js_file, wasm_file, minify_whitespace=minify_whitespace, minify_exports=settings.MINIFY_ASMJS_EXPORT_NAMES, debug_info=debug_info)
return js_file
# run binaryen's wasm-metadce to dce both js and wasm
def metadce(js_file, wasm_file, minify_whitespace, debug_info):
logger.debug('running meta-DCE')
temp_files = configuration.get_temp_files()
# first, get the JS part of the graph
if settings.MAIN_MODULE:
# For the main module we include all exports as possible roots, not just function exports.
# This means that any usages of data symbols within the JS or in the side modules can/will keep
# these exports alive on the wasm module.
# This is important today for weak data symbols that are defined by the main and the side module
# (i.e. RTTI info). We want to make sure the main module's symbols get added to asmLibraryArg
# when the main module is loaded. If this doesn't happen then the symbols in the side module
# will take precedence.
exports = settings.WASM_EXPORTS
else:
exports = settings.WASM_FUNCTION_EXPORTS
extra_info = '{ "exports": [' + ','.join(f'["{asmjs_mangle(x)}", "{x}"]' for x in exports) + ']}'
txt = acorn_optimizer(js_file, ['emitDCEGraph', 'noPrint'], return_output=True, extra_info=extra_info)
graph = json.loads(txt)
# ensure that functions expected to be exported to the outside are roots
required_symbols = user_requested_exports.union(set(settings.SIDE_MODULE_IMPORTS))
for item in graph:
if 'export' in item:
export = asmjs_mangle(item['export'])
if settings.EXPORT_ALL or export in required_symbols:
item['root'] = True
# in standalone wasm, always export the memory
if not settings.IMPORTED_MEMORY:
graph.append({
'export': 'memory',
'name': 'emcc$export$memory',
'reaches': [],
'root': True
})
if not settings.RELOCATABLE:
graph.append({
'export': '__indirect_function_table',
'name': 'emcc$export$__indirect_function_table',
'reaches': [],
'root': True
})
# fix wasi imports TODO: support wasm stable with an option?
WASI_IMPORTS = set([
'environ_get',
'environ_sizes_get',
'args_get',
'args_sizes_get',
'fd_write',
'fd_close',
'fd_read',
'fd_seek',
'fd_fdstat_get',
'fd_sync',
'fd_pread',
'fd_pwrite',
'proc_exit',
'clock_res_get',
'clock_time_get',
])
for item in graph:
if 'import' in item and item['import'][1][1:] in WASI_IMPORTS:
item['import'][0] = settings.WASI_MODULE_NAME
# fixup wasm backend prefixing
for item in graph:
if 'import' in item:
if item['import'][1][0] == '_':
item['import'][1] = item['import'][1][1:]
# map import names from wasm to JS, using the actual name the wasm uses for the import
import_name_map = {}
for item in graph:
if 'import' in item:
import_name_map[item['name']] = 'emcc$import$' + item['import'][1]
temp = temp_files.get('.txt').name
utils.write_file(temp, json.dumps(graph))
# run wasm-metadce
out = run_binaryen_command('wasm-metadce',
wasm_file,
wasm_file,
['--graph-file=' + temp],
debug=debug_info,
stdout=PIPE)
# find the unused things in js
unused = []
PREFIX = 'unused: '
for line in out.splitlines():
if line.startswith(PREFIX):
name = line.replace(PREFIX, '').strip()
if name in import_name_map:
name = import_name_map[name]
unused.append(name)
# remove them
passes = ['applyDCEGraphRemovals']
if minify_whitespace:
passes.append('minifyWhitespace')
extra_info = {'unused': unused}
return acorn_optimizer(js_file, passes, extra_info=json.dumps(extra_info))
def asyncify_lazy_load_code(wasm_target, debug):
# create the lazy-loaded wasm. remove the memory segments from it, as memory
# segments have already been applied by the initial wasm, and apply the knowledge
# that it will only rewind, after which optimizations can remove some code
args = ['--remove-memory', '--mod-asyncify-never-unwind']
if settings.OPT_LEVEL > 0:
args.append(opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL))
run_wasm_opt(wasm_target,
wasm_target + '.lazy.wasm',
args=args,
debug=debug)
# re-optimize the original, by applying the knowledge that imports will
# definitely unwind, and we never rewind, after which optimizations can remove
# a lot of code
# TODO: support other asyncify stuff, imports that don't always unwind?
# TODO: source maps etc.
args = ['--mod-asyncify-always-and-only-unwind']
if settings.OPT_LEVEL > 0:
args.append(opt_level_to_str(settings.OPT_LEVEL, settings.SHRINK_LEVEL))
run_wasm_opt(infile=wasm_target,
outfile=wasm_target,
args=args,
debug=debug)
def minify_wasm_imports_and_exports(js_file, wasm_file, minify_whitespace, minify_exports, debug_info):
logger.debug('minifying wasm imports and exports')
# run the pass
if minify_exports:
# standalone wasm mode means we need to emit a wasi import module.
# otherwise, minify even the imported module names.
if settings.MINIFY_WASM_IMPORTED_MODULES:
pass_name = '--minify-imports-and-exports-and-modules'
else:
pass_name = '--minify-imports-and-exports'
else:
pass_name = '--minify-imports'
out = run_wasm_opt(wasm_file, wasm_file,
[pass_name],
debug=debug_info,
stdout=PIPE)
# TODO this is the last tool we run, after normal opts and metadce. it
# might make sense to run Stack IR optimizations here or even -O (as
# metadce which runs before us might open up new general optimization
# opportunities). however, the benefit is less than 0.5%.
# get the mapping
SEP = ' => '
mapping = {}
for line in out.split('\n'):
if SEP in line:
old, new = line.strip().split(SEP)
assert old not in mapping, 'imports must be unique'
mapping[old] = new
# apply them
passes = ['applyImportAndExportNameChanges']
if minify_whitespace:
passes.append('minifyWhitespace')
extra_info = {'mapping': mapping}
return acorn_optimizer(js_file, passes, extra_info=json.dumps(extra_info))
def wasm2js(js_file, wasm_file, opt_level, minify_whitespace, use_closure_compiler, debug_info, symbols_file=None, symbols_file_js=None):
logger.debug('wasm2js')
args = ['--emscripten']
if opt_level > 0:
args += ['-O']
if symbols_file:
args += ['--symbols-file=%s' % symbols_file]
wasm2js_js = run_binaryen_command('wasm2js', wasm_file,
args=args,
debug=debug_info,
stdout=PIPE)
if DEBUG:
utils.write_file(os.path.join(get_emscripten_temp_dir(), 'wasm2js-output.js'), wasm2js_js)
# JS optimizations
if opt_level >= 2:
passes = []
if not debug_info and not settings.USE_PTHREADS:
passes += ['minifyNames']
if symbols_file_js:
passes += ['symbolMap=%s' % symbols_file_js]
if minify_whitespace:
passes += ['minifyWhitespace']
passes += ['last']
if passes:
# hackish fixups to work around wasm2js style and the js optimizer FIXME
wasm2js_js = f'// EMSCRIPTEN_START_ASM\n{wasm2js_js}// EMSCRIPTEN_END_ASM\n'
wasm2js_js = wasm2js_js.replace('// EMSCRIPTEN_START_FUNCS;\n', '// EMSCRIPTEN_START_FUNCS\n')
wasm2js_js = wasm2js_js.replace('// EMSCRIPTEN_END_FUNCS;\n', '// EMSCRIPTEN_END_FUNCS\n')
wasm2js_js = wasm2js_js.replace('\n function $', '\nfunction $')
wasm2js_js = wasm2js_js.replace('\n }', '\n}')
wasm2js_js += '\n// EMSCRIPTEN_GENERATED_FUNCTIONS\n'
temp = configuration.get_temp_files().get('.js').name
utils.write_file(temp, wasm2js_js)
temp = js_optimizer(temp, passes)
wasm2js_js = utils.read_file(temp)
# Closure compiler: in mode 1, we just minify the shell. In mode 2, we
# minify the wasm2js output as well, which is ok since it isn't
# validating asm.js.
# TODO: in the non-closure case, we could run a lightweight general-
# purpose JS minifier here.
if use_closure_compiler == 2:
temp = configuration.get_temp_files().get('.js').name
with open(temp, 'a') as f:
f.write(wasm2js_js)
temp = closure_compiler(temp, pretty=not minify_whitespace, advanced=False)
wasm2js_js = utils.read_file(temp)
# closure may leave a trailing `;`, which would be invalid given where we place
# this code (inside parens)
wasm2js_js = wasm2js_js.strip()
if wasm2js_js[-1] == ';':
wasm2js_js = wasm2js_js[:-1]
all_js = utils.read_file(js_file)
# quoted notation, something like Module['__wasm2jsInstantiate__']
finds = re.findall(r'''[\w\d_$]+\[['"]__wasm2jsInstantiate__['"]\]''', all_js)
if not finds:
# post-closure notation, something like a.__wasm2jsInstantiate__
finds = re.findall(r'''[\w\d_$]+\.__wasm2jsInstantiate__''', all_js)
assert len(finds) == 1
marker = finds[0]
all_js = all_js.replace(marker, f'(\n{wasm2js_js}\n)')
# replace the placeholder with the actual code
js_file = js_file + '.wasm2js.js'
utils.write_file(js_file, all_js)
return js_file
def strip(infile, outfile, debug=False, producers=False):
cmd = [LLVM_OBJCOPY, infile, outfile]
if debug:
cmd += ['--remove-section=.debug*']
if producers:
cmd += ['--remove-section=producers']
check_call(cmd)
# extract the DWARF info from the main file, and leave the wasm with
# debug into as a file on the side
# TODO: emit only debug sections in the side file, and not the entire
# wasm as well
def emit_debug_on_side(wasm_file, wasm_file_with_dwarf):
# if the dwarf filename wasn't provided, use the default target + a suffix
wasm_file_with_dwarf = settings.SEPARATE_DWARF
if wasm_file_with_dwarf is True:
wasm_file_with_dwarf = wasm_file + '.debug.wasm'
embedded_path = settings.SEPARATE_DWARF_URL
if not embedded_path:
# a path was provided - make it relative to the wasm.
embedded_path = os.path.relpath(wasm_file_with_dwarf,
os.path.dirname(wasm_file))
# normalize the path to use URL-style separators, per the spec
embedded_path = embedded_path.replace('\\', '/').replace('//', '/')
shutil.move(wasm_file, wasm_file_with_dwarf)
strip(wasm_file_with_dwarf, wasm_file, debug=True)
# embed a section in the main wasm to point to the file with external DWARF,
# see https://yurydelendik.github.io/webassembly-dwarf/#external-DWARF
section_name = b'\x13external_debug_info' # section name, including prefixed size
filename_bytes = embedded_path.encode('utf-8')
contents = webassembly.toLEB(len(filename_bytes)) + filename_bytes
section_size = len(section_name) + len(contents)
with open(wasm_file, 'ab') as f:
f.write(b'\0') # user section is code 0
f.write(webassembly.toLEB(section_size))
f.write(section_name)
f.write(contents)
def little_endian_heap(js_file):
logger.debug('enforcing little endian heap byte order')
return acorn_optimizer(js_file, ['littleEndianHeap'])
def apply_wasm_memory_growth(js_file):
logger.debug('supporting wasm memory growth with pthreads')
fixed = acorn_optimizer(js_file, ['growableHeap'])
ret = js_file + '.pgrow.js'
fixed = utils.read_file(fixed)
support_code = utils.read_file(path_from_root('src/growableHeap.js'))
utils.write_file(ret, support_code + '\n' + fixed)
return ret
def use_unsigned_pointers_in_js(js_file):
logger.debug('using unsigned pointers in JS')
return acorn_optimizer(js_file, ['unsignPointers'])
def instrument_js_for_asan(js_file):
logger.debug('instrumenting JS memory accesses for ASan')
return acorn_optimizer(js_file, ['asanify'])
def instrument_js_for_safe_heap(js_file):
logger.debug('instrumenting JS memory accesses for SAFE_HEAP')
return acorn_optimizer(js_file, ['safeHeap'])
def handle_final_wasm_symbols(wasm_file, symbols_file, debug_info):
logger.debug('handle_final_wasm_symbols')
args = []
if symbols_file:
args += ['--print-function-map']
if not debug_info:
# to remove debug info, we just write to that same file, and without -g
args += ['-o', wasm_file]
else:
# suppress the wasm-opt warning regarding "no output file specified"
args += ['--quiet']
# ignore stderr because if wasm-opt is run without a -o it will warn
output = run_wasm_opt(wasm_file, args=args, stdout=PIPE)
if symbols_file:
utils.write_file(symbols_file, output)
def is_ar(filename):
try:
if _is_ar_cache.get(filename):
return _is_ar_cache[filename]
header = open(filename, 'rb').read(8)
sigcheck = header in (b'!<arch>\n', b'!<thin>\n')
_is_ar_cache[filename] = sigcheck
return sigcheck
except Exception as e:
logger.debug('is_ar failed to test whether file \'%s\' is a llvm archive file! Failed on exception: %s' % (filename, e))
return False
def is_bitcode(filename):
try:
# look for magic signature
b = open(filename, 'rb').read(4)
if b[:2] == b'BC':
return True
# on macOS, there is a 20-byte prefix which starts with little endian
# encoding of 0x0B17C0DE
elif b == b'\xDE\xC0\x17\x0B':
b = bytearray(open(filename, 'rb').read(22))
return b[20:] == b'BC'
except IndexError:
# not enough characters in the input
# note that logging will be done on the caller function
pass
return False
def is_wasm(filename):
if not os.path.isfile(filename):
return False
header = open(filename, 'rb').read(webassembly.HEADER_SIZE)
return header == webassembly.MAGIC + webassembly.VERSION
def is_wasm_dylib(filename):
"""Detect wasm dynamic libraries by the presence of the "dylink" custom section."""
if not is_wasm(filename):
return False
module = webassembly.Module(filename)
section = next(module.sections())
if section.type == webassembly.SecType.CUSTOM:
module.seek(section.offset)
if module.readString() == 'dylink':
return True
return False
def map_to_js_libs(library_name):
"""Given the name of a special Emscripten-implemented system library, returns an
pair containing
1. Array of absolute paths to JS library files, inside emscripten/src/ that corresponds to the
library name. `None` means there is no mapping and the library will be processed by the linker
as a require for normal native library.
2. Optional name of a corresponding native library to link in.
"""
# Some native libraries are implemented in Emscripten as system side JS libraries
library_map = {
'EGL': ['library_egl.js'],
'GL': ['library_webgl.js', 'library_html5_webgl.js'],
'webgl.js': ['library_webgl.js', 'library_html5_webgl.js'],
'GLESv2': ['library_webgl.js'],
# N.b. there is no GLESv3 to link to (note [f] in https://www.khronos.org/registry/implementers_guide.html)
'GLEW': ['library_glew.js'],
'glfw': ['library_glfw.js'],
'glfw3': ['library_glfw.js'],
'GLU': [],
'glut': ['library_glut.js'],
'openal': ['library_openal.js'],
'X11': ['library_xlib.js'],
'SDL': ['library_sdl.js'],
'uuid': ['library_uuid.js'],
'websocket': ['library_websocket.js'],
# These 4 libraries are seperate under glibc but are all rolled into
# libc with musl. For compatibility with glibc we just ignore them
# completely.
'dl': [],
'm': [],
'rt': [],
'pthread': [],
# This is the name of GNU's C++ standard library. We ignore it here
# for compatability with GNU toolchains.
'stdc++': [],
}
# And some are hybrid and require JS and native libraries to be included
native_library_map = {
'GL': 'libGL',
}
if library_name in library_map:
libs = library_map[library_name]
logger.debug('Mapping library `%s` to JS libraries: %s' % (library_name, libs))
return (libs, native_library_map.get(library_name))
if library_name.endswith('.js') and os.path.isfile(path_from_root('src', f'library_{library_name}')):
return ([f'library_{library_name}'], None)
return (None, None)
# Map a linker flag to a settings. This lets a user write -lSDL2 and it will have the same effect as
# -s USE_SDL=2.
def map_and_apply_to_settings(library_name):
# most libraries just work, because the -l name matches the name of the
# library we build. however, if a library has variations, which cause us to
# build multiple versions with multiple names, then we need this mechanism.
library_map = {
# SDL2_mixer's built library name contains the specific codecs built in.
'SDL2_mixer': [('USE_SDL_MIXER', 2)],
}
if library_name in library_map:
for key, value in library_map[library_name]:
logger.debug('Mapping library `%s` to settings changes: %s = %s' % (library_name, key, value))
setattr(settings, key, value)
return True
return False
def emit_wasm_source_map(wasm_file, map_file, final_wasm):
# source file paths must be relative to the location of the map (which is
# emitted alongside the wasm)
base_path = os.path.dirname(os.path.abspath(final_wasm))
sourcemap_cmd = [PYTHON, path_from_root('tools/wasm-sourcemap.py'),
wasm_file,
'--dwarfdump=' + LLVM_DWARFDUMP,
'-o', map_file,
'--basepath=' + base_path]
check_call(sourcemap_cmd)
def get_binaryen_feature_flags():
# settings.BINARYEN_FEATURES is empty unless features have been extracted by
# wasm-emscripten-finalize already.
if settings.BINARYEN_FEATURES:
return settings.BINARYEN_FEATURES
else:
return ['--detect-features']
def check_binaryen(bindir):
opt = os.path.join(bindir, exe_suffix('wasm-opt'))
if not os.path.exists(opt):
exit_with_error('binaryen executable not found (%s). Please check your binaryen installation' % opt)
try:
output = run_process([opt, '--version'], stdout=PIPE).stdout
except subprocess.CalledProcessError:
exit_with_error('error running binaryen executable (%s). Please check your binaryen installation' % opt)
if output:
output = output.splitlines()[0]
try:
version = output.split()[2]
version = int(version)
except (IndexError, ValueError):
exit_with_error('error parsing binaryen version (%s). Please check your binaryen installation (%s)' % (output, opt))
# Allow the expected version or the following one in order avoid needing to update both
# emscripten and binaryen in lock step in emscripten-releases.
if version not in (EXPECTED_BINARYEN_VERSION, EXPECTED_BINARYEN_VERSION + 1):
diagnostics.warning('version-check', 'unexpected binaryen version: %s (expected %s)', version, EXPECTED_BINARYEN_VERSION)
def get_binaryen_bin():
global binaryen_checked
rtn = os.path.join(config.BINARYEN_ROOT, 'bin')
if not binaryen_checked:
check_binaryen(rtn)
binaryen_checked = True
return rtn
# track whether the last binaryen command kept debug info around. this is used
# to see whether we need to do an extra step at the end to strip it.
binaryen_kept_debug_info = False
def run_binaryen_command(tool, infile, outfile=None, args=[], debug=False, stdout=None):
cmd = [os.path.join(get_binaryen_bin(), tool)]
if outfile and tool == 'wasm-opt' and \
(settings.DEBUG_LEVEL < 3 or settings.GENERATE_SOURCE_MAP):
# remove any dwarf debug info sections, if the debug level is <3, as
# we don't need them; also remove them if we use source maps (which are
# implemented separately from dwarf).
# note that we add this pass first, so that it doesn't interfere with
# the final set of passes (which may generate stack IR, and nothing
# should be run after that)
# TODO: if lld can strip dwarf then we don't need this. atm though it can
# only strip all debug info or none, which includes the name section
# which we may need
# TODO: once fastcomp is gone, either remove source maps entirely, or
# support them by emitting a source map at the end from the dwarf,
# and use llvm-objcopy to remove that final dwarf
cmd += ['--strip-dwarf']
cmd += args
if infile:
cmd += [infile]
if outfile:
cmd += ['-o', outfile]
if settings.ERROR_ON_WASM_CHANGES_AFTER_LINK:
# emit some extra helpful text for common issues
extra = ''
# a plain -O0 build *almost* doesn't need post-link changes, except for
# legalization. show a clear error for those (as the flags the user passed
# in are not enough to see what went wrong)
if settings.LEGALIZE_JS_FFI:
extra += '\nnote: to disable int64 legalization (which requires changes after link) use -s WASM_BIGINT'
if settings.OPT_LEVEL > 0:
extra += '\nnote: -O2+ optimizations always require changes, build with -O0 or -O1 instead'
exit_with_error(f'changes to the wasm are required after link, but disallowed by ERROR_ON_WASM_CHANGES_AFTER_LINK: {cmd}{extra}')
if debug:
cmd += ['-g'] # preserve the debug info
# if the features are not already handled, handle them
cmd += get_binaryen_feature_flags()
# if we are emitting a source map, every time we load and save the wasm
# we must tell binaryen to update it
if settings.GENERATE_SOURCE_MAP and outfile:
cmd += [f'--input-source-map={infile}.map']
cmd += [f'--output-source-map={outfile}.map']
ret = check_call(cmd, stdout=stdout).stdout
if outfile:
save_intermediate(outfile, '%s.wasm' % tool)
global binaryen_kept_debug_info
binaryen_kept_debug_info = '-g' in cmd
return ret
def run_wasm_opt(*args, **kwargs):
return run_binaryen_command('wasm-opt', *args, **kwargs)
save_intermediate_counter = 0
def save_intermediate(src, dst):
if DEBUG:
global save_intermediate_counter
dst = 'emcc-%d-%s' % (save_intermediate_counter, dst)
save_intermediate_counter += 1
dst = os.path.join(CANONICAL_TEMP_DIR, dst)
logger.debug('saving debug copy %s' % dst)
shutil.copyfile(src, dst)
| StarcoderdataPython |
93408 | # -*- coding: utf-8 -*-
from .java_data import NontermClass, StateClass, Method, StartingMethod
from .java_data import BOTTOM, CONTEXT, METHOD
def encode(table):
files = dict((BOTTOM, CONTEXT, METHOD))
ntcs = {}
for sym in table.groups:
ntcs[sym] = NontermClass(sym.text, sym == table.start, table.eval)
stcs = {}
for st in table.states:
stcs[st] = StateClass(st.sym.text, st.idx, st.is_fin)
ntcs[st.sym].stcs.add(stcs[st])
for c in table.cells:
ret = [(d.sym.text, d.idx) for d in c.dst]
name = c.sym.text
arg = None
is_native_arg = False
if c.sym.is_term:
arg = ''
is_native_arg = False
elif c.sym.is_typed:
arg = c.sym.type
is_native_arg = True
else:
arg = c.sym.text
is_native_arg = False
if c.src.idx == 0:
method = StartingMethod(ret, name, arg, is_native_arg)
ntcs[c.src.sym].starts.add(method)
repeat = False
if 0 < len(c.dst):
for _c in table.cells:
if c.dst[0] == _c.src:
if _c.sym == c.sym:
repeat = True
break
method = Method(ret, name, arg, is_native_arg, repeat)
stcs[c.src].methods.add(method)
for sym, ntc in ntcs.items():
ret = [(sym.text, 0)]
name = sym.text
method = StartingMethod(ret, name)
for m in ntc.starts:
if m._name == name:
break
else:
ntc.starts.add(method)
for c in ntcs.values():
files[c.fname] = str(c)
pkg = 'package {};\n\n'.format(table.start.text.lower())
return {name: pkg + content for name, content in files.items()}
| StarcoderdataPython |
203854 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""My general server framework."""
import socketserver
import threading
from message import Message
import network_utils as nu
class GameMessageHandler(socketserver.StreamRequestHandler):
def handle(self) -> None:
server = self.server # type: GameServer
server.add_client(self.request, self.client_address)
print(f'| [Server] connected to client {self.client_address}')
while True:
request = self.request
try:
data = nu.recv_message(request)
except ConnectionError as e:
print(f'| [Server] {e}')
break
if not data:
# Reach EOF.
break
msg = Message.from_bytes(data)
msg.set_client_address(self.client_address)
response = server.update_state(msg)
resp_data = response.to_bytes()
server.broadcast(resp_data)
print(f'| [Server] client {self.client_address} disconnected')
server.remove_client(self.client_address)
class GameServer(socketserver.ThreadingTCPServer):
def __init__(self, address, state):
super().__init__(address, GameMessageHandler)
self._state = state
self._clients = {}
self._lock = threading.Lock()
def add_client(self, client_socket, client_address):
self._clients[client_address] = client_socket
def remove_client(self, client_address):
del self._clients[client_address]
def broadcast(self, msg_data: bytes):
for socket in self._clients.values():
try:
nu.send_message(socket, msg_data)
except ConnectionError as e:
print(f'| [Server] {e}')
continue
def update_state(self, msg) -> 'Message':
with self._lock:
return self._state.update_server(msg)
def serve_forever(self, poll_interval: float = 0.5) -> None:
print('| [Server] starting serve ...')
return super().serve_forever(poll_interval)
# TODO: Add support of thread pool server.
def spawn_server(address, state, return_thread=False, poll_interval=0.5):
server = GameServer(address, state)
server_thread = threading.Thread(target=lambda: server.serve_forever(poll_interval=poll_interval))
server_thread.start()
if return_thread:
return server, server_thread
else:
return server
| StarcoderdataPython |
1650344 | from .draw import draw_rectangle
from .grid import create_meshgrid, create_meshgrid3d
from .helpers import _extract_device_dtype
from .image import image_to_tensor, ImageToTensor, tensor_to_image
from .memory import batched_forward
from .one_hot import one_hot
from .pointcloud_io import load_pointcloud_ply, save_pointcloud_ply
__all__ = [
"batched_forward",
"one_hot",
"create_meshgrid",
"create_meshgrid3d",
"tensor_to_image",
"image_to_tensor",
"save_pointcloud_ply",
"load_pointcloud_ply",
"draw_rectangle",
"_extract_device_dtype",
"ImageToTensor",
]
| StarcoderdataPython |
3370044 | from django.urls import path
# from .views import SportsmanView
app_name = "network"
urlpatterns = [
# path("", NetworkView.as_view(), name="index"),
# path("", SportsmanView.as_view()),
]
| StarcoderdataPython |
91280 | <reponame>LI-Mingyu/GraphScope-MY<gh_stars>1000+
import networkx.algorithms.traversal.tests.test_bfs
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.traversal.tests.test_bfs,
decorators=pytest.mark.usefixtures("graphscope_session"))
| StarcoderdataPython |
288975 | import numpy as np
import pandas as pd
import os
from datetime import datetime
from ..options.OptionPortfolio import OptionPortfolio
from matplotlib.backends.backend_pdf import PdfPages
from ..basicData.basicData import BasicData
from ..strategy import *
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
class BacktestFramework:
ALL_TRADE_DATES = BasicData.ALL_TRADE_DATES
TR = 0.0002
def __init__(self):
self.backtest_columns = ['option_value','sigma','stock_price','stock_position','stock_value','stock_pnl','trading_cost',
'delta_nav', 'nav', 'cash_account','option_pnl','cash_delta','cash_gamma',
'total_cash_delta','delta_pnl','gamma_pnl','other_pnl','delta_exposure_pnl',
'exposure_direction','delta_value','gamma_value','other_value','trading_cum_cost',
'trade_dummy','trade_type','delta_pnl_contribution']
self.analysis_index = ['total_pnl','option_pnl','stock_pnl','trading_cost','delta_pnl','gamma_pnl',
'other_pnl','delta_pnl_part1','delta_pnl_part2','delta_pnl_part3',
'delta_pnl_part4','min_cash','max_drawdown']
self.reset()
self.CLASS_PATH = __file__
self.REPORT_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'report')
def reset(self):
self.option_portfolio = OptionPortfolio()
self.set_strategy()
def set_strategy(self, strategy_name=''):
self.strategy_name = strategy_name
self.strategy = None if not strategy_name else eval(strategy_name)()
def set_option(self,option_class,option_position,**option_paras):
self.option_portfolio.add_option_by_dict(option_class,option_position,option_paras)
def run_backtest(self):
self.init_backtest()
self.backtest_df.loc[:, 'stock_position'] = self.strategy.get_hedging_position(self.option_portfolio.get_greeks(),self.stock_prices)
self.backtest_df.loc[:, 'stock_value'] = self.backtest_df.loc[:, 'stock_position']*self.stock_prices
augmented_stock_position = np.hstack((0,self.backtest_df.loc[:, 'stock_position']))
self.backtest_df.loc[:, 'trading_cost'] = np.abs(np.diff(augmented_stock_position))*self.stock_prices*self.TR
self.backtest_df.loc[:, 'option_pnl'] = self.backtest_df.loc[:, 'option_value'].diff().fillna(0)
self.backtest_df.loc[:, 'stock_pnl'] = (self.backtest_df.loc[:, 'stock_position'].shift(1)*self.stock_prices.diff()).fillna(0)
self.backtest_df.loc[:, 'delta_nav'] = self.backtest_df.loc[:, 'stock_pnl']+self.backtest_df.loc[:, 'option_pnl']-self.backtest_df.loc[:, 'trading_cost']
self.backtest_df.loc[:, 'nav'] = np.cumsum(self.backtest_df.loc[:, 'delta_nav'])+self.option_portfolio.option_fee+self.backtest_df.loc[:, 'option_value'].iloc[0]
self.backtest_df.loc[:, 'cash_account'] = self.backtest_df.loc[:, 'nav']-self.backtest_df.loc[:, 'stock_value']-self.backtest_df.loc[:, 'option_value']
self.backtest_df.loc[:, 'cash_delta'] = self.option_portfolio.greek_df.loc[:,'cash_delta']
self.backtest_df.loc[:, 'cash_gamma'] = self.option_portfolio.greek_df.loc[:,'cash_gamma']
self.backtest_df.loc[:, 'total_cash_delta'] = self.backtest_df.loc[:, 'cash_delta']+self.backtest_df.loc[:, 'stock_position']*self.stock_prices
pct_change = self.stock_prices.pct_change().fillna(0)
self.backtest_df.loc[:, 'delta_pnl'] = (self.backtest_df.loc[:, 'cash_delta'].shift(1)*pct_change).fillna(0)
self.backtest_df.loc[:, 'delta_exposure_pnl'] = (self.backtest_df.loc[:, 'total_cash_delta'].shift(1)*pct_change).fillna(0)
self.backtest_df.loc[:, 'gamma_pnl'] = (0.5*self.option_portfolio.greek_df.loc[:,'cash_gamma'].shift(1)*100*np.power(pct_change,2)).fillna(0)
self.backtest_df.loc[:, 'other_pnl'] = self.backtest_df.loc[:, 'option_pnl']-self.backtest_df.loc[:, 'delta_pnl']-self.backtest_df.loc[:, 'gamma_pnl']
self.backtest_df.loc[self.backtest_df.loc[:, 'total_cash_delta']>=0, 'exposure_direction'] = 1
self.backtest_df.loc[:, 'exposure_direction'].fillna(-1, inplace=True)
self.backtest_df.loc[:, 'exposure_direction'] = self.backtest_df.loc[:,'exposure_direction'].shift(1)
self.backtest_df.loc[:, 'exposure_direction'].fillna(0,inplace=True)
self.backtest_df.loc[:,'delta_value'] = self.backtest_df.loc[:, 'delta_exposure_pnl'].cumsum()
self.backtest_df.loc[:, 'gamma_value'] = self.backtest_df.loc[:, 'gamma_pnl'].cumsum()
self.backtest_df.loc[:, 'other_value'] = self.backtest_df.loc[:, 'other_pnl'].cumsum()
self.backtest_df.loc[:, 'trading_cum_cost'] = self.backtest_df.loc[:, 'trading_cost'].cumsum()
self.backtest_df.loc[self.backtest_df.loc[:, 'stock_position'].diff()==0, 'trade_dummy'] = 0
self.backtest_df.loc[:, 'trade_dummy'].fillna(1,inplace=True)
self.trade_ticks = self.backtest_df.loc[self.backtest_df.loc[:, 'trade_dummy']==1].index
self.backtest_df.loc[(self.backtest_df.loc[:, 'exposure_direction']>0)&(self.backtest_df.loc[:, 'stock_price'].diff()>0), 'trade_type'] = 1
self.backtest_df.loc[(self.backtest_df.loc[:, 'exposure_direction']<0)&(self.backtest_df.loc[:, 'stock_price'].diff()>0), 'trade_type'] = 2
self.backtest_df.loc[(self.backtest_df.loc[:, 'exposure_direction']<0)&(self.backtest_df.loc[:, 'stock_price'].diff()<0), 'trade_type'] = 3
self.backtest_df.loc[(self.backtest_df.loc[:, 'exposure_direction']>0)&(self.backtest_df.loc[:, 'stock_price'].diff()<0), 'trade_type'] = 4
self.backtest_df.loc[:, 'trade_type'].fillna(0, inplace=True)
for trade_type in [1,2,3,4]:
self.backtest_df.loc[self.backtest_df.trade_type==trade_type,'delta_pnl_contribution'] = self.backtest_df.loc[self.backtest_df.trade_type==trade_type,'delta_exposure_pnl']/self.backtest_df.loc[self.backtest_df.trade_type==trade_type,'delta_exposure_pnl'].sum()
self.analysis()
def analysis(self):
self.init_analysis()
self.analysis_dict['total_pnl'] = self.backtest_df['nav'].iloc[-1]
self.analysis_dict['option_pnl'] = self.backtest_df['option_value'].iloc[-1]+self.option_portfolio.option_fee
self.analysis_dict['stock_pnl'] = self.backtest_df['stock_pnl'].sum()
self.analysis_dict['trading_cost'] = self.backtest_df['trading_cum_cost'].iloc[-1]
self.analysis_dict['delta_pnl'] = self.backtest_df.loc[:,'delta_value'].iloc[-1]
self.analysis_dict['gamma_pnl'] = self.backtest_df['gamma_value'].iloc[-1]
self.analysis_dict['other_pnl'] = self.backtest_df['other_value'].iloc[-1]
self.analysis_dict['delta_pnl_part1'] = self.backtest_df.loc[self.backtest_df.trade_type == 1, 'delta_exposure_pnl'].sum()
self.analysis_dict['delta_pnl_part2'] = self.backtest_df.loc[self.backtest_df.trade_type == 2, 'delta_exposure_pnl'].sum()
self.analysis_dict['delta_pnl_part3'] = self.backtest_df.loc[self.backtest_df.trade_type == 3, 'delta_exposure_pnl'].sum()
self.analysis_dict['delta_pnl_part4'] = self.backtest_df.loc[self.backtest_df.trade_type == 4, 'delta_exposure_pnl'].sum()
self.analysis_dict['min_cash'] = self.backtest_df['cash_account'].min()
self.analysis_dict['max_drawdown'] = self.cal_MDD(self.backtest_df.loc[:, 'nav'])
def visualize(self,report=False):
# 对冲总览
fig1,ax1 = self.init_canvas([0.08,0.05,0.85,0.81])
ax1.plot(self.trade_dates,self.backtest_df['stock_value']+self.backtest_df['cash_account'],
linewidth=0.5,color='blue',label='hedge_value')
ax1.plot(self.trade_dates,self.backtest_df['option_value'],
linewidth=0.5,color='orange',label='option_value')
ax1.plot(self.trade_dates,self.backtest_df['nav'],
linewidth=0.5,color='red',label='total_value')
t_data = self.backtest_df.loc[:,['stock_value','option_value','nav']]
min_value = t_data.min().min()
max_value = t_data.max().max()
ax1.vlines(self.trade_ticks,min_value,max_value,label='hedge_tradings',
color='black',linewidths=0.1)
ax2 = ax1.twinx()
ax2.plot(self.trade_dates,self.backtest_df['stock_position'],
linewidth=0.5,color='green',label='stock_position(右轴)')
ax1.set_title('股票期权对冲总览,对冲策略:{0:s}\n{1:s}\n整体盈利:{2:,.0f},期权损益:{3:,.0f},对冲损益:{4:,.0f},交易成本:{5:,.0f}\n现金最大占用量:{6:,.0f},组合最大回撤:{7:,.0f}'.format(\
self.strategy_name,self.option_portfolio.option_info,self.analysis_dict['total_pnl'],
self.analysis_dict['option_pnl'],self.analysis_dict['stock_pnl'],
self.analysis_dict['trading_cost'],-self.analysis_dict['min_cash'],
self.analysis_dict['max_drawdown']))
fig1.legend(loc='right')
ax1.set_ylabel('金额/元')
ax2.set_ylabel('股票头寸/股')
plt.show()
# cash_delta可视化
fig2, ax1 = self.init_canvas([0.08, 0.05, 0.85, 0.86])
ax1.plot(self.trade_dates,self.backtest_df['cash_delta'],linewidth=0.5,
color='orange',label='cash_delta')
ax2 = ax1.twinx()
ax2.plot(self.trade_dates,self.backtest_df['stock_price'],linewidth=1,
color='red',label='stock_price(右轴)')
t_data = self.backtest_df['cash_delta']
min_value = t_data.min()
max_value = t_data.max()
ax1.vlines(self.trade_ticks,min_value,max_value,label='hedge_tradings',
color='black',linewidths=0.1)
ax1.set_ylabel('cash_delta')
ax2.set_ylabel('stock_price')
ax1.set_title('cash_delta分析,,对冲策略:{0:s}\n{1:s}'.format(self.strategy_name,self.option_portfolio.option_info))
fig2.legend(loc='right')
plt.show()
# cash_gamma可视化
fig3, ax1 = self.init_canvas([0.08, 0.05, 0.85, 0.86])
ax1.plot(self.trade_dates,self.backtest_df['cash_gamma'],linewidth=0.5,
color='green',label='cash_gamma')
ax2 = ax1.twinx()
ax2.plot(self.trade_dates,self.backtest_df['stock_price'],linewidth=1,
color='red',label='stock_price(右轴)')
t_data = self.backtest_df['cash_gamma']
min_value = t_data.min()
max_value = t_data.max()
ax1.vlines(self.trade_ticks,min_value,max_value,label='hedge_tradings',
color='black',linewidths=0.1)
ax1.set_ylabel('cash_gamma')
ax2.set_ylabel('stock_price')
ax1.set_title('cash_gamma分析,,对冲策略:{0:s}\n{1:s}'.format(self.strategy_name,self.option_portfolio.option_info))
fig3.legend(loc='right')
plt.show()
# 收益拆分
fig4,ax1 = self.init_canvas([0.09,0.05,0.88,0.81])
ax1.plot(self.trade_dates,self.backtest_df['nav'],linewidth=1,
color='red',label='整体盈利')
ax1.plot(self.trade_dates,self.backtest_df['delta_value'],linewidth=0.5,
color='orange',label='delta敞口盈利')
ax1.plot(self.trade_dates,self.backtest_df['gamma_value'],linewidth=0.5,
color='green',label='gamma盈利')
ax1.plot(self.trade_dates,self.backtest_df['other_value'],linewidth=0.5,
color='blue',label='其他盈利')
ax1.plot(self.trade_dates,-self.backtest_df['trading_cum_cost'],linewidth=0.5,
color='grey',label='交易成本')
t_data = self.backtest_df.loc[:,['delta_value','gamma_value','other_value','trading_cum_cost','nav']]
min_value = t_data.min().min()
max_value = t_data.max().max()
ax1.vlines(self.trade_ticks,min_value,max_value,label='hedge_tradings',
color='black',linewidths=0.1)
ax1.legend(loc='best')
ax1.set_ylabel('金额/元')
ax1.set_title('对冲收益分解,对冲策略:{0:s}\n{1:s}\n整体盈利:{2:,.0f},期权初始价值:{7:,.0f},delta敞口盈利:{3:,.0f},gamma盈利:{4:,.0f}\n其他盈利:{5:,.0f},交易成本:{6:,.0f}'.format(
self.strategy_name,self.option_portfolio.option_info,self.analysis_dict['total_pnl'],
self.analysis_dict['delta_pnl'],self.analysis_dict['gamma_pnl'],self.analysis_dict['other_pnl'],
self.analysis_dict['trading_cost'],self.backtest_df.option_value.iloc[0]))
plt.show()
# delta敞口收益分析
fig5,ax1 = self.init_canvas([0.09,0.05,0.88,0.81])
ax1.bar(2,self.analysis_dict['delta_pnl_part1'],color='red')
ax1.bar(4,self.analysis_dict['delta_pnl_part2'],color='orange')
ax1.bar(6,self.analysis_dict['delta_pnl_part3'],color='green')
ax1.bar(8,self.analysis_dict['delta_pnl_part4'],color='blue')
ax1.bar(10,self.analysis_dict['gamma_pnl'],color='purple')
ax1.bar(12,self.analysis_dict['other_pnl'],color='brown')
ax1.bar(14,-self.analysis_dict['trading_cost'],color='grey')
ax1.set_xticks([2,4,6,8,10,12,14])
ax1.set_xticklabels(['cash_delta>0&△S>0','cash_delta<0&△S>0','cash_delta<0&△S<0','cash_delta>0&△S<0',
'gamma_pnl','other_pnl','trading_cost'])
ax1.set_ylabel('金额/元')
ax1.set_title('收益拆解,对冲策略:{0:s}\n{1:s}\n 四个累计收益:{2:,.0f} {3:,.0f} {4:,.0f} {5:,.0f}\ngamma收益:{6:,.0f},其他盈利:{7:,.0f},交易成本:{8:,.0f}'.format(
self.strategy_name,self.option_portfolio.option_info,self.analysis_dict['delta_pnl_part1'],
self.analysis_dict['delta_pnl_part2'],self.analysis_dict['delta_pnl_part3'],self.analysis_dict['delta_pnl_part4'],
self.analysis_dict['gamma_pnl'],self.analysis_dict['other_pnl'],self.analysis_dict['trading_cost']))
plt.show()
# delta敞口收益散点图
fig6,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
t_data = self.backtest_df.loc[:,['trade_type','delta_pnl_contribution','gamma_pnl','other_pnl']].copy()
t_data['delta_price'] = self.backtest_df.loc[:,'stock_price'].diff()
t_data['total_cash_delta'] = self.backtest_df.loc[:,'total_cash_delta'].shift(1)
t_data['delta_pnl_contribution'] = t_data['delta_pnl_contribution'].astype(float)
x_min = t_data['total_cash_delta'].min()
x_max = t_data['total_cash_delta'].max()
y_min = t_data['delta_price'].min()
y_max = t_data['delta_price'].max()
ax1.scatter(t_data.loc[t_data.trade_type==1,'total_cash_delta'],
t_data.loc[t_data.trade_type==1,'delta_price'],color='red',
label='cash_delta>0&△S>0')
ax1.scatter(t_data.loc[t_data.trade_type==2,'total_cash_delta'],
t_data.loc[t_data.trade_type==2,'delta_price'],color='orange',
label='cash_delta<0&△S>0')
ax1.scatter(t_data.loc[t_data.trade_type==3,'total_cash_delta'],
t_data.loc[t_data.trade_type==3,'delta_price'],color='green',
label='cash_delta<0&△S<0')
ax1.scatter(t_data.loc[t_data.trade_type==4,'total_cash_delta'],
t_data.loc[t_data.trade_type==4,'delta_price'],color='blue',
label='cash_delta>0&△S<0')
ax1.vlines(0,y_min,y_max)
ax1.hlines(0,x_min,x_max)
ax1.set_xlabel('cash_delta敞口')
ax1.set_ylabel('价格变动')
ax1.legend(loc='best')
ax1.set_title('delta敞口收益散点图,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
# 1类收益占比分析
fig7,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
ax1.plot(t_data.loc[t_data.trade_type==1].index,
t_data.loc[t_data.trade_type==1,'delta_pnl_contribution'],
color='red',label='cash_delta>0&△S>0类收益贡献比例')
ax1.set_ylabel('delta收益贡献比例')
ax1.legend(loc='best')
ax1.set_title('cash_delta>0&△S>0类收益贡献比例分析,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
# 2类收益占比分析
fig8,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
ax1.plot(t_data.loc[t_data.trade_type==2].index,
t_data.loc[t_data.trade_type==2,'delta_pnl_contribution'],
color='orange',label='cash_delta<0&△S>0类收益贡献比例')
ax1.set_ylabel('delta收益贡献比例')
ax1.legend(loc='best')
ax1.set_title('cash_delta<0&△S>0类收益贡献比例分析,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
# 3类收益占比分析
fig9,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
ax1.plot(t_data.loc[t_data.trade_type==3].index,
t_data.loc[t_data.trade_type==3,'delta_pnl_contribution'],
color='green',label='cash_delta<0&△S<0类收益贡献比例')
ax1.set_ylabel('delta收益贡献比例')
ax1.legend(loc='best')
ax1.set_title('cash_delta<0&△S<0类收益贡献比例分析,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
# 4类收益占比分析
fig10,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
ax1.plot(t_data.loc[t_data.trade_type==4].index,
t_data.loc[t_data.trade_type==4,'delta_pnl_contribution'],
color='blue',label='cash_delta>0&△S<0类收益贡献比例')
ax1.set_ylabel('delta收益贡献比例')
ax1.legend(loc='best')
ax1.set_title('cash_delta>0&△S<0类收益贡献比例分析,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
# gamma收益分析
fig11,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
ax1.plot(t_data.index,t_data.loc[:,'gamma_pnl'],
color='purple',label='gamma日收益')
ax1.set_ylabel('收益/元')
ax1.legend(loc='best')
ax1.set_title('gamma日收益,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
# 其他收益
fig12,ax1 = self.init_canvas([0.09,0.08,0.88,0.84])
ax1.plot(t_data.index,t_data.loc[:,'other_pnl'],
color='brown',label='其他日收益')
ax1.set_ylabel('收益/元')
ax1.legend(loc='best')
ax1.set_title('其他日收益,对冲策略:{0:s}\n{1:s}'.format(
self.strategy_name,self.option_portfolio.option_info))
plt.show()
if report:
current_time = str(datetime.now()).replace(':', ':')
self.check_folder(self.REPORT_FOLDER)
report_name = os.path.join(self.REPORT_FOLDER,'期权类型:{0:s},对冲策略:{1:s}_{2:s}.pdf'.format(self.option_portfolio.option_name,self.strategy_name,current_time))
with PdfPages(report_name) as pdf:
pdf.savefig(fig1)
pdf.savefig(fig2)
pdf.savefig(fig3)
pdf.savefig(fig4)
pdf.savefig(fig5)
pdf.savefig(fig6)
pdf.savefig(fig7)
pdf.savefig(fig8)
pdf.savefig(fig9)
pdf.savefig(fig10)
pdf.savefig(fig11)
pdf.savefig(fig12)
def init_analysis(self):
self.analysis_dict = dict().fromkeys(self.analysis_index)
@staticmethod
def init_canvas(rect=[0.05, 0.05, 0.9, 0.9]):
fig = plt.figure(figsize=(10, 5.7), dpi=300)
ax = fig.add_axes(rect=rect)
return fig,ax
def init_backtest(self):
self.stock_prices = self.option_portfolio.public_df.loc[:, 'stock_price']
self.trade_dates = self.option_portfolio.trade_dates
self.backtest_df = pd.DataFrame(index=self.trade_dates,columns=self.backtest_columns)
self.backtest_df.loc[:, 'option_value'] = self.option_portfolio.greek_df.loc[:, 'option_value']
self.backtest_df.loc[:, 'stock_price'] = self.stock_prices
self.backtest_df.loc[:, 'sigma'] = self.option_portfolio.public_df.loc[:,'sigma']
@staticmethod
def cal_MDD(series):
return np.max(series.cummax()-series)
@staticmethod
def check_folder(temp_folder):
if not os.path.isdir(temp_folder):
BacktestFramework.make_folder(temp_folder)
@staticmethod
def make_folder(temp_folder):
if not os.path.isdir(os.path.dirname(temp_folder)):
BacktestFramework.make_folder(os.path.dirname(temp_folder))
os.makedirs(temp_folder)
| StarcoderdataPython |
6698850 | <gh_stars>0
# import theano
# import theano.tensor as T
from scipy.optimize import fmin_l_bfgs_b
import deepvis.proximal_alg
import numpy as np
import tensorflow as tf
class TensorFlowTheanoFunction(object):
def __init__(self, inputs, outputs):
self._inputs = inputs
self._outputs = outputs
def __call__(self, *args, **kwargs):
feeds = {}
for (argpos, arg) in enumerate(args):
feeds[self._inputs[argpos]] = arg
return tf.get_default_session().run(self._outputs, feeds)
class Visualizer():
def __init__(self, calcGrad, calcCost, input):
"""
Visualizer for Deep Neural Networks. Solves an inverse problem to find a suited input
that minimizes the cost function given in calcCost.
Parameters:
-----------
calcCost : function handle that computes the cost function for a given input
calcGrad : function handle that computes the gradient of the cost function
input : an input image (used for regularization or just to get the shape of the input)
"""
self.calcGrad = calcGrad
self.calcCost = calcCost
self.input = np.asarray(input, dtype=np.float32)
self.inp_shape = input.shape
def optimize(self, x0, cost):
return 0
def map(self, x0):
return self.optimize(x0, self.cost)
class DeepVisualizer(Visualizer):
def __init__(self, calcGrad, calcCost, input, alpha=0.01):
"""
Deep Visualization for Deep Neural Networks. Solves an inverse problem to find a suited input
that minimizes the cost function given in calcCost.
Parameters:
-----------
calcCost : function handle that computes the cost function for a given input
calcGrad : function handle that computes the gradient of the cost function
input : an input image (used for regularization or just to get the shape of the input)
alpha : l2-regularization on the wanted input image to obtain feasible results
"""
Visualizer.__init__(self, calcGrad, calcCost, input)
self.alpha = alpha
def costFun(self, x):
"""
Function that computes the cost value for a given x
Parameters:
-----------
x : input data
"""
tmp = x.reshape(self.inp_shape)
c = np.float64(self.calcCost(np.asarray(tmp, dtype=np.float32))) + self.alpha * np.dot(x.T, x)
return c
def gradFun(self, x):
"""
Function that computes the gradient of the cost function at x
Parameters:
-----------
x : input data
"""
tmp = x.reshape(self.inp_shape)
g = np.ravel(
np.asarray(self.calcGrad(np.asarray(tmp, dtype=np.float32)), dtype=np.float64)) + 2 * self.alpha * x
return g
def optimize(self, x0):
"""
Solves the inverse problem
Parameters:
-----------
x0 : initial solution
"""
(result, f, d) = fmin_l_bfgs_b(lambda x: self.costFun(x), np.ravel(x0), lambda x: self.gradFun(x))
print("optimization completed with cost: " + str(f))
return result.reshape(self.inp_shape)
class SubsetSelection(Visualizer):
def __init__(self, calcGrad, calcCost, input, alpha=0.01, gamma=0.1):
"""
Subset selection for Deep Neural Networks. Solves an inverse problem to find a suited input
that minimizes the cost function given in calcCost.
Parameters:
-----------
calcCost : function handle that computes the cost function for a given input
calcGrad : function handle that computes the gradient of the cost function
input : an input image (used for regularization or just to get the shape of the input)
alpha : l2-regularization on the wanted input image to obtain feasible results
gamma : step size for the proximal gradient algorithm
"""
Visualizer.__init__(self, calcGrad, calcCost, input)
self.alpha = alpha
self.gamma = gamma
def costFun(self, S, x):
"""
Function that computes the cost value for a given x
Parameters:
-----------
x : input data
"""
a=self.calcCost(S * x)
return self.calcCost(S * x)
def gradFun(self, S, x):
"""
Function that computes the gradient of the cost function at x
Parameters:
-----------
x : input data
"""
a=self.calcGrad(S * x) * x
a=np.squeeze(a,axis=0)
return a
def optimize(self, x0, n_iter=50):
"""
Solves the inverse problem
Parameters:
-----------
x0 : initial solution
n_iter : number of proximal gradient steps used for optimization
"""
x0 = np.asarray(x0, dtype=np.float32)
opt = proximal_alg.ProximalGradSolver(self.gamma, self.alpha, lambda x: self.costFun(x, self.input),
lambda x: np.sum(np.abs(x)), lambda x: self.gradFun(x, self.input),
proximal_alg.prox_l1_01)
result = opt.minimize(x0, n_iter=n_iter)
return result
| StarcoderdataPython |
393062 | import pytest
from guardian.shortcuts import assign_perm
from tests.factories import UserFactory
from tests.hanging_protocols_tests.factories import HangingProtocolFactory
from tests.utils import get_view_for_user
@pytest.mark.django_db
def test_permission_required_views(client):
user = UserFactory()
hp = HangingProtocolFactory()
# anyone can view a hanging protocol and the list view
response = get_view_for_user(
viewname="hanging-protocols:list",
client=client,
user=user,
)
assert response.status_code == 200
response = get_view_for_user(
viewname="hanging-protocols:detail",
client=client,
user=user,
reverse_kwargs={"slug": hp.slug},
)
assert response.status_code == 200
# only users with the add.hangingprotocol permision can create one
response = get_view_for_user(
viewname="hanging-protocols:create",
client=client,
user=user,
)
assert response.status_code == 403
assign_perm("hanging_protocols.add_hangingprotocol", user)
response = get_view_for_user(
viewname="hanging-protocols:create",
client=client,
user=user,
)
assert response.status_code == 200
# only the creator can edit a hanging protocol
response = get_view_for_user(
viewname="hanging-protocols:update",
client=client,
user=hp.creator,
reverse_kwargs={"slug": hp.slug},
)
assert response.status_code == 200
response = get_view_for_user(
viewname="hanging-protocols:update",
client=client,
user=user,
reverse_kwargs={"slug": hp.slug},
)
assert response.status_code == 403
| StarcoderdataPython |
67638 | <reponame>e-dang/cookiecutter-django
from rest_framework import serializers
class DetailResponseSerializer(serializers.Serializer):
detail = serializers.CharField(read_only=True)
class NonFieldErrorResponseSerializer(serializers.Serializer):
non_field_errors = serializers.CharField(read_only=True)
| StarcoderdataPython |
363648 | # !/usr/bin/python
class Monday:
def __init__(self,name):
name = name
xx = '1'
@staticmethod
def print_word(w):
print(w)
@classmethod
def class_method(cls,x):
print(x)
def fn(self, y):
print(y)
Monday().print_word('hhh')
Monday.print_word('hhhxxx')
Monday.fn('x')
| StarcoderdataPython |
5156030 | from django.conf import settings
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from wagtail.core import views
from wagtail.core.utils import WAGTAIL_APPEND_SLASH
if WAGTAIL_APPEND_SLASH:
# If WAGTAIL_APPEND_SLASH is True (the default value), we match a
# (possibly empty) list of path segments ending in slashes.
# CommonMiddleware will redirect requests without a trailing slash to
# a URL with a trailing slash
serve_pattern = r'^((?:[\w\-]+/)*)$'
else:
# If WAGTAIL_APPEND_SLASH is False, allow Wagtail to serve pages on URLs
# with and without trailing slashes
serve_pattern = r'^([\w\-/]*)$'
WAGTAIL_FRONTEND_LOGIN_TEMPLATE = getattr(
settings, 'WAGTAIL_FRONTEND_LOGIN_TEMPLATE', 'wagtailcore/login.html'
)
urlpatterns = [
url(r'^_util/authenticate_with_password/(\d+)/(\d+)/$', views.authenticate_with_password,
name='wagtailcore_authenticate_with_password'),
url(r'^_util/login/$', auth_views.LoginView.as_view(template_name=WAGTAIL_FRONTEND_LOGIN_TEMPLATE),
name='wagtailcore_login'),
# Front-end page views are handled through Wagtail's core.views.serve
# mechanism
url(serve_pattern, views.serve, name='wagtail_serve')
]
| StarcoderdataPython |
6652071 | import sys
import math
import posix
from orbit.lattice import AccLattice, AccNode, AccActionsContainer
lattice = AccLattice("test_lattice")
elem1 = AccNode("el-1")
elem2 = AccNode("el-2")
elem3 = AccNode("el-3")
elem1.setLength(1.1)
elem2.setLength(2.1)
elem3.setLength(3.1)
lattice.addNode(elem1)
lattice.addNode(elem2)
lattice.addNode(elem3)
elem1_1 = AccNode("el-1-1")
elem1_1.setnParts(2)
elem1_1_1 = AccNode("el-1-1-1")
elem1_1_2 = AccNode("el-1-1-2")
elem1_1_3 = AccNode("el-1-1-3")
elem1_1_4 = AccNode("el-1-1-4")
elem1.addChildNode(elem1_1,AccNode.ENTRANCE)
elem1_1.addChildNode(elem1_1_1,AccNode.ENTRANCE)
elem1_1.addChildNode(elem1_1_2,AccNode.BODY,0)
elem1_1.addChildNode(elem1_1_3,AccNode.BODY,1)
elem1_1.addChildNode(elem1_1_4,AccNode.EXIT)
elem1_2 = AccNode("el-1-2")
elem2.addChildNode(elem1_2,AccNode.EXIT)
acts = AccActionsContainer()
def Blanks(n):
s = ""
for i in xrange(n):
s += " "
return s
nLevel = [0]
nElems = [0]
def funcEntrance(paramsDict):
nLevel[0] += 1
node = paramsDict["node"]
if(paramsDict.has_key("print") and paramsDict["print"] == True):
print Blanks(nLevel[0]),"ENTER level=",nLevel[0]," node=",node.getName()
nElems[0] += 1
def funcExit(paramsDict):
node = paramsDict["node"]
if(paramsDict.has_key("print") and paramsDict["print"] == True):
print Blanks(nLevel[0]),"EXIT level=",nLevel[0]," node=",node.getName()
nLevel[0] -= 1
def funcTrack(paramsDict):
node = paramsDict["node"]
if(paramsDict.has_key("print") and paramsDict["print"] == True):
print Blanks(nLevel[0]),"BODY TRACK through node =",node.getName()," level=",nLevel[0]
acts.addAction(funcEntrance,AccActionsContainer.ENTRANCE)
acts.addAction(funcTrack,AccActionsContainer.BODY)
acts.addAction(funcExit,AccActionsContainer.EXIT)
lattice.initialize()
print "Total length=",lattice.getLength()
nodes = lattice.getNodes()
for node in nodes:
print "node=",node.getName()," s start,stop = %4.3f %4.3f "%lattice.getNodePositionsDict()[node]
d = {"print":True}
lattice.trackActions(acts,d)
print "Total number of nodes=",nElems[0]
#========Speed test==========================
count = 1
while(count <100000):
#lattice.initialize()
lattice.trackActions(acts)
if( count % 10000 == 0):
print "i=",count, " time= %9.8f "%(posix.times()[0]/count)
count += 1
print "====STOP==="
sys.exit(0)
| StarcoderdataPython |
3591686 | #!/usr/bin/python
import sys, argparse
from subprocess import PIPE, Popen
from multiprocessing import cpu_count, Pool, Lock
glock = Lock()
of = None
def main():
parser = argparse.ArgumentParser(description="",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',nargs='+',help="Files to do")
parser.add_argument('-o','--output',help="output or - for stdout")
parser.add_argument('--threads',type=int,default=cpu_count(),help="number of processes")
args = parser.parse_args()
global of
of = sys.stdout
if args.output: of = open(args.output,'w')
if args.threads > 1:
p = Pool(processes=args.threads)
for file in args.input:
if args.threads > 1:
p.apply_async(do_file,args=(file,),callback=do_out)
else:
r = do_file(file)
do_out(r)
if args.threads > 1:
p.close()
p.join()
of.close()
def do_out(r):
global glock
glock.acquire()
global of
of.write(r+"\n")
glock.release()
def do_file(file):
cmd = "du -hs "+file
p = Popen(cmd.split(),stdout=PIPE)
return p.communicate()[0].rstrip()
if __name__=="__main__":
main()
| StarcoderdataPython |
1863767 | <gh_stars>1-10
from twitter import *
import requests
import os
import config
config.ensure()
TT_CONSUMER = config.get("TWITTER_CAPYBARA_CONSUMER_KEY")
TT_SECRET = config.get("TWITTER_CAPYBARA_SECRET")
MY_TWITTER_CREDS = os.path.expanduser('.capybara-credentials')
if not os.path.exists(MY_TWITTER_CREDS):
oauth_dance("Capybara", TT_CONSUMER, TT_SECRET, MY_TWITTER_CREDS)
oauth_token, oauth_secret = read_token_file(MY_TWITTER_CREDS)
auth = OAuth(oauth_token, oauth_secret, TT_CONSUMER, TT_SECRET)
tt = Twitter(auth=auth)
def tweet(img_url):
img_read = requests.get(img_url).content
t_upload = Twitter(domain="upload.twitter.com", auth=auth)
img_id = t_upload.media.upload(media=img_read)["media_id_string"]
return tt.statuses.update(status="", media_ids=img_id)
| StarcoderdataPython |
1676719 | # -*- coding: utf-8 -*-
from ThymeBoost.trend_models.trend_base_class import TrendBaseModel
import numpy as np
from statsmodels.tsa.api import SimpleExpSmoothing, Holt
class EtsModel(TrendBaseModel):
"""Several ETS methods from Statsmodels including:
'ses': Simple Exponential Smoother
'des': Double Exponential Smoother
'damped_des': Damped Double Exponential Smoother
These are to be passed as the 'trend_estimator' parameter in the ThymeBoost fit method.
If alpha or beta are not given then it will follow Statsmodels optimization.
For more info: https://www.statsmodels.org/stable/examples/notebooks/generated/exponential_smoothing.html
"""
model = 'ets'
def __init__(self):
self.model_params = None
self.fitted = None
def __str__(self):
return f'{self.model}()'
def simple_exponential_smoothing(self, y, bias, alpha):
smoother = SimpleExpSmoothing(y - bias)
fit_model = smoother.fit(smoothing_level=alpha)
fitted = fit_model.fittedvalues
self.model_params = (fit_model, bias, len(y))
return fitted
def double_exponential_smoothing(self, y, bias, alpha, beta):
smoother = Holt(y - bias)
fit_model = smoother.fit(smoothing_level=alpha, smoothing_trend=beta)
fitted = fit_model.fittedvalues
self.model_params = (fit_model, bias, len(y))
return fitted
def damped_double_exponential_smoothing(self, y, bias, alpha, beta):
smoother = Holt(y - bias, damped_trend=True)
fit_model = smoother.fit(smoothing_level=alpha,
smoothing_trend=beta)
fitted = fit_model.fittedvalues
self.model_params = (fit_model, bias, len(y))
return fitted
def fit(self, y, **kwargs):
"""
Fit the trend component in the boosting loop for a ets model.
Parameters
----------
time_series : np.ndarray
DESCRIPTION.
**kwargs :
Key 1: 'alpha': The alpha parameter for the level smoothing. If not given then this will be optimized
Key 2: 'beta': The beta parameter for the trend smoothing. If not given then this will be optimized
Returns
-------
Fitted array.
"""
self.model = kwargs['model']
bias = kwargs['bias']
if self.model == 'ses':
self.fitted = self.simple_exponential_smoothing(y, bias, kwargs['alpha'])
elif self.model == 'des':
self.fitted = self.double_exponential_smoothing(y, bias, kwargs['alpha'], kwargs['beta'])
elif self.model == 'damped_des':
self.fitted = self.damped_double_exponential_smoothing(y, bias, kwargs['alpha'], kwargs['beta'])
else:
raise ValueError('That model type is not implemented!')
return self.fitted
def predict(self, forecast_horizon, model_params):
_start = model_params[2]
_end = _start + forecast_horizon - 1
prediction = model_params[0].predict(start=_start, end=_end) + model_params[1]
return prediction
| StarcoderdataPython |
11321016 | <reponame>iLordTony/django-angular-example<gh_stars>0
from django.conf.urls import url, include
from api.views import UserList, PostDetail, PostList, UserPostList, UserDetail, PostPhotoList, PhotoList, PhotoDetail
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
users_urls = [
url(r'^$', UserList.as_view(), name='user-list'),
url(r'^(?P<username>[0-9a-zA-Z_-]+)/$', UserDetail.as_view(), name='user-detail'),
url(r'^(?P<username>[0-9a-zA-Z_-]+)/posts/$', UserPostList.as_view(), name='userpost-list'),
]
posts_urls = [
url(r'^$', PostList.as_view(), name='post-list'),
url(r'^(?P<pk>\d+)/photos/$', PostPhotoList.as_view(), name='postphoto-list'),
url(r'^(?P<pk>\d+)/$', PostDetail.as_view(), name='post-detail'),
]
photos_urls = [
url(r'^(?P<pk>\d+)/$', PhotoDetail.as_view(), name='photo-detail'),
url(r'^$', PhotoList.as_view(), name='photo-list'),
]
urlpatterns = [
url(r'^users/', include(users_urls)),
url(r'^posts/', include(posts_urls)),
url(r'^photos/', include(photos_urls)),
]
| StarcoderdataPython |
4855042 | <reponame>fareszr/app
from flask import url_for
from app.db import Session
from app.models import User, ApiKey
from tests.utils import login
def test_create_delete_api_key(flask_client):
user = login(flask_client)
Session.commit()
# create api_key
create_r = flask_client.post(
url_for("dashboard.api_key"),
data={"form-name": "create", "name": "for test"},
follow_redirects=True,
)
assert create_r.status_code == 200
api_key = ApiKey.get_by(user_id=user.id)
assert ApiKey.count() == 1
assert api_key.name == "for test"
# delete api_key
delete_r = flask_client.post(
url_for("dashboard.api_key"),
data={"form-name": "delete", "api-key-id": api_key.id},
follow_redirects=True,
)
assert delete_r.status_code == 200
assert ApiKey.count() == 0
def test_delete_all_api_keys(flask_client):
# create two test users
user_1 = login(flask_client)
user_2 = User.create(
email="<EMAIL>", password="password", name="Test User 2", activated=True
)
Session.commit()
# create api_key for both users
ApiKey.create(user_1.id, "for test")
ApiKey.create(user_1.id, "for test 2")
ApiKey.create(user_2.id, "for test")
Session.commit()
assert (
ApiKey.count() == 3
) # assert that the total number of API keys for all users is 3.
# assert that each user has the API keys created
assert ApiKey.filter(ApiKey.user_id == user_1.id).count() == 2
assert ApiKey.filter(ApiKey.user_id == user_2.id).count() == 1
# delete all of user 1's API keys
r = flask_client.post(
url_for("dashboard.api_key"),
data={"form-name": "delete-all"},
follow_redirects=True,
)
assert r.status_code == 200
assert (
ApiKey.count() == 1
) # assert that the total number of API keys for all users is now 1.
assert (
ApiKey.filter(ApiKey.user_id == user_1.id).count() == 0
) # assert that user 1 now has 0 API keys
assert (
ApiKey.filter(ApiKey.user_id == user_2.id).count() == 1
) # assert that user 2 still has 1 API key
| StarcoderdataPython |
3563985 | class prime():
def __init__(self, n):
self.n = n
def get_factor(self):
for i in range(2, self.n+1):
if self.n % i == 0:
yield i
#return self.fac
def prime_or_not(self, n):
for i in range(2, int(n/2)+1):
if n % i == 0:
return False
else:
return True
def get_prime_factor(self):
for i in self.get_factor():
if self.prime_or_not(i):
yield i
def get_largest_prime_factor(self):
return max(self.get_prime_factor())
def main():
n = 600851475143
p = prime(n)
p.get_factor()
p.get_prime_factor()
print(p.get_largest_prime_factor())
if __name__ == "__main__":
main()
| StarcoderdataPython |
3559827 | #!/usr/bin/env python
#
# Original Version: bjian 2008/10/27
# 3-D extension: PJackson 2013/06/06
# More datatypes, Multiple Channels, Python 3, ...: <NAME>
#
from __future__ import division, print_function
import os
import numpy as np
import array
import zlib
def read_meta_header(filename):
"""Return a dictionary of meta data from meta header file"""
fileIN = open(filename, "r")
line = fileIN.readline()
meta_dict = {}
tag_set = []
tag_set.extend(['ObjectType', 'NDims', 'DimSize', 'ElementType', 'ElementDataFile', 'ElementNumberOfChannels'])
tag_set.extend(['BinaryData', 'BinaryDataByteOrderMSB', 'CompressedData', 'CompressedDataSize'])
tag_set.extend(['Offset', 'CenterOfRotation', 'AnatomicalOrientation', 'ElementSpacing', 'TransformMatrix'])
tag_set.extend(['Comment', 'SeriesDescription', 'AcquisitionDate', 'AcquisitionTime', 'StudyDate', 'StudyTime'])
tag_flag = [False] * len(tag_set)
while line:
tags = str.split(line, '=')
# print(tags[0])
for i in range(len(tag_set)):
tag = tag_set[i]
if (str.strip(tags[0]) == tag) and (not tag_flag[i]):
# print(tags[1])
content = str.strip(tags[1])
if tag in ['ElementSpacing', 'Offset', 'CenterOfRotation', 'TransformMatrix']:
meta_dict[tag] = [float(s) for s in content.split()]
elif tag in ['NDims', 'ElementNumberOfChannels']:
meta_dict[tag] = int(content)
elif tag in ['DimSize']:
meta_dict[tag] = [int(s) for s in content.split()]
elif tag in ['BinaryData', 'BinaryDataByteOrderMSB', 'CompressedData']:
if content == "True":
meta_dict[tag] = True
else:
meta_dict[tag] = False
else:
meta_dict[tag] = content
tag_flag[i] = True
line = fileIN.readline()
# print(comment)
fileIN.close()
return meta_dict
def load_raw_data_with_mhd(filename):
meta_dict = read_meta_header(filename)
dim = int(meta_dict['NDims'])
if "ElementNumberOfChannels" in meta_dict:
element_channels = int(meta_dict["ElementNumberOfChannels"])
else:
element_channels = 1
if meta_dict['ElementType'] == 'MET_FLOAT':
np_type = np.float32
elif meta_dict['ElementType'] == 'MET_DOUBLE':
np_type = np.float64
elif meta_dict['ElementType'] == 'MET_CHAR':
np_type = np.byte
elif meta_dict['ElementType'] == 'MET_UCHAR':
np_type = np.ubyte
elif meta_dict['ElementType'] == 'MET_SHORT':
np_type = np.short
elif meta_dict['ElementType'] == 'MET_USHORT':
np_type = np.ushort
elif meta_dict['ElementType'] == 'MET_INT':
np_type = np.int32
elif meta_dict['ElementType'] == 'MET_UINT':
np_type = np.uint32
else:
raise NotImplementedError("ElementType " + meta_dict['ElementType'] + " not understood.")
compressed = meta_dict['CompressedData']
arr = list(meta_dict['DimSize'])
volume = np.prod(arr[0:dim - 1])
pwd = os.path.split(filename)[0]
if pwd:
data_file = pwd + '/' + meta_dict['ElementDataFile']
else:
data_file = meta_dict['ElementDataFile']
shape = (arr[dim - 1], volume, element_channels)
if not compressed:
with open(data_file, 'rb') as fid:
data = np.fromfile(fid, dtype=np_type)
else:
with open(data_file, 'rb') as fid:
f_dec = zlib.decompress(fid.read())
data = np.fromstring(f_dec, dtype=np_type, count=-1)
data = np.array(data, np_type).reshape(shape)
# Begin 3D fix
arr.reverse()
if element_channels > 1:
data = data.reshape(arr + [element_channels])
else:
data = data.reshape(arr)
# End 3D fix
data = np.rollaxis(data,0,3)
return (data, meta_dict)
def write_meta_header(filename, meta_dict):
header = ''
# do not use tags = meta_dict.keys() because the order of tags matters
tags = ['ObjectType', 'NDims', 'BinaryData',
'BinaryDataByteOrderMSB', 'CompressedData', 'CompressedDataSize',
'TransformMatrix', 'Offset', 'CenterOfRotation',
'AnatomicalOrientation', 'ElementSpacing',
'DimSize', 'ElementNumberOfChannels', 'ElementType', 'ElementDataFile',
'Comment', 'SeriesDescription', 'AcquisitionDate',
'AcquisitionTime', 'StudyDate', 'StudyTime']
for tag in tags:
if tag in meta_dict.keys():
header += '%s = %s\n' % (tag, meta_dict[tag])
f = open(filename, 'w')
f.write(header)
f.close()
def dump_raw_data(filename, data, dsize, element_channels=1):
""" Write the data into a raw format file. Big endian is always used. """
data = data.reshape(dsize[0], -1, element_channels)
rawfile = open(filename, 'wb')
if data.dtype == np.float32:
array_string = 'f'
elif data.dtype == np.double or data.dtype == np.float64:
array_string = 'd'
elif data.dtype == np.short:
array_string = 'h'
elif data.dtype == np.ushort:
array_string = 'H'
elif data.dtype == np.int32:
array_string = 'i'
elif data.dtype == np.uint32:
array_string = 'I'
else:
raise NotImplementedError("ElementType " + str(data.dtype) + " not implemented.")
a = array.array(array_string)
a.fromlist(list(data.ravel()))
# if is_little_endian():
# a.byteswap()
a.tofile(rawfile)
rawfile.close()
def write_mhd_file(mhdfile, data, **meta_dict):
assert (mhdfile[-4:] == '.mhd')
meta_dict['ObjectType'] = 'Image'
meta_dict['BinaryData'] = 'True'
meta_dict['BinaryDataByteOrderMSB'] = 'False'
if data.dtype == np.float32:
meta_dict['ElementType'] = 'MET_FLOAT'
elif data.dtype == np.double or data.dtype == np.float64:
meta_dict['ElementType'] = 'MET_DOUBLE'
elif data.dtype == np.byte:
meta_dict['ElementType'] = 'MET_CHAR'
elif data.dtype == np.uint8 or data.dtype == np.ubyte:
meta_dict['ElementType'] = 'MET_UCHAR'
elif data.dtype == np.short or data.dtype == np.int16:
meta_dict['ElementType'] = 'MET_SHORT'
elif data.dtype == np.ushort or data.dtype == np.uint16:
meta_dict['ElementType'] = 'MET_USHORT'
elif data.dtype == np.int32:
meta_dict['ElementType'] = 'MET_INT'
elif data.dtype == np.uint32:
meta_dict['ElementType'] = 'MET_UINT'
else:
raise NotImplementedError("ElementType " + str(data.dtype) + " not implemented.")
dsize = list(data.shape)
if 'ElementNumberOfChannels' in meta_dict.keys():
element_channels = int(meta_dict['ElementNumberOfChannels'])
assert (dsize[-1] == element_channels)
dsize = dsize[:-1]
else:
element_channels = 1
dsize.reverse()
meta_dict['NDims'] = str(len(dsize))
meta_dict['DimSize'] = dsize
meta_dict['ElementDataFile'] = os.path.split(mhdfile)[1].replace('.mhd',
'.raw')
# Tags that need conversion of list to string
tags = ['ElementSpacing', 'Offset', 'DimSize', 'CenterOfRotation', 'TransformMatrix']
for tag in tags:
if tag in meta_dict.keys() and not isinstance(meta_dict[tag], str):
meta_dict[tag] = ' '.join([str(i) for i in meta_dict[tag]])
write_meta_header(mhdfile, meta_dict)
pwd = os.path.split(mhdfile)[0]
if pwd:
data_file = pwd + '/' + meta_dict['ElementDataFile']
else:
data_file = meta_dict['ElementDataFile']
dump_raw_data(data_file, data, dsize, element_channels=element_channels)
| StarcoderdataPython |
3360383 | from typing import Sequence
from uuid import UUID
from hexbytes import HexBytes
from web3 import Web3
def calculate_device_registration_hash(
timestamp: int,
identifier: UUID,
cloud_messaging_token: str,
safes: Sequence[str],
prefix: str = "gnosis-safe",
) -> HexBytes:
safes_to_str = "".join(sorted(safes))
str_to_sign = (
f"{prefix}{timestamp}{identifier}{cloud_messaging_token}{safes_to_str}"
)
return Web3.keccak(text=str_to_sign)
| StarcoderdataPython |
1981333 | <filename>kolibri/core/auth/constants/role_kinds.py
"""
This module contains constants representing the kinds of "roles" that a user can have with respect to a Collection.
"""
from __future__ import unicode_literals
ADMIN = "admin"
COACH = "coach"
ASSIGNABLE_COACH = "classroom assignable coach"
choices = (
(ADMIN, "Admin"),
(COACH, "Coach"),
(ASSIGNABLE_COACH, "Classroom Assignable Coach"),
)
| StarcoderdataPython |
378795 | import pathlib
import tempfile
from unittest import mock
from docker.errors import ImageNotFound
from docker.errors import NotFound
from teststack import cli
def test_render(runner, tag):
with tempfile.NamedTemporaryFile() as tmpfile:
result = runner.invoke(cli, ['render', f'--dockerfile={tmpfile.name}'])
assert result.exit_code == 0
with open(tmpfile.name, 'r') as fh_:
assert fh_.readline() == 'FROM python:slim\n'
assert fh_.readline() == 'ENV PYTHON=True\n'
assert fh_.readline() == 'WORKDIR /srv\n'
assert fh_.readline() == '\n'
assert 'docker-metadata' in fh_.readline()
assert tag['commit'] in fh_.readline()
def test_render_isolated(runner):
with open('Dockerfile.j2') as fh_, runner.isolated_filesystem() as th_:
with open('Dockerfile.j2', 'w') as wh_:
wh_.write(fh_.read())
result = runner.invoke(cli, [f'--path={th_}', 'render'])
assert result.exit_code == 0
with open('Dockerfile', 'r') as fh_:
assert fh_.readline() == 'FROM python:slim\n'
assert fh_.readline() == 'ENV PYTHON=True\n'
assert fh_.readline() == 'WORKDIR /srv\n'
assert not fh_.readline()
def test_container_start_no_tests(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start', '-n'])
assert client.containers.get.call_count == 4
assert client.containers.run.called is False
assert result.exit_code == 0
def test_container_start_no_tests_not_started(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.containers.get.side_effect = NotFound('container not found')
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start', '-n'])
assert client.containers.get.call_count == 2
assert client.containers.run.call_count == 2
assert result.exit_code == 0
def test_container_start_with_tests(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.images.get.return_value.id = client.containers.get.return_value.image.id
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 11
assert client.containers.run.called is False
assert result.exit_code == 0
def test_container_start_with_tests_old_image(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 11
assert client.containers.run.called is True
assert client.containers.get.return_value.stop.called is True
assert client.containers.get.return_value.wait.called is True
client.containers.get.return_value.remove.assert_called_with(v=True)
assert result.exit_code == 0
def test_container_start_with_tests_not_started(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.containers.get.side_effect = NotFound('container not found')
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 6
assert client.containers.run.call_count == 3
assert result.exit_code == 0
def test_container_stop(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
with mock.patch('docker.from_env', return_value=client), mock.patch(
'teststack.containers.docker.Client.end_container'
) as end_container:
result = runner.invoke(cli, ['stop'])
assert client.containers.get.call_count == 3
assert end_container.call_count == 3
assert result.exit_code == 0
def test_container_stop_without_containers(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.containers.get.side_effect = NotFound('container not found')
with mock.patch('docker.from_env', return_value=client), mock.patch(
'teststack.containers.docker.Client.end_container'
) as end_container:
result = runner.invoke(cli, ['stop'])
assert client.containers.get.call_count == 3
assert end_container.called is False
assert result.exit_code == 0
def test_container_build(runner, build_output):
client = mock.MagicMock()
client.api.build.return_value = build_output
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['build', '--tag=blah'])
client.api.build.assert_called_with(path='.', dockerfile='Dockerfile', tag='blah', nocache=False, rm=True)
assert result.exit_code == 0
def test_container_start_with_tests_without_image(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
image = mock.MagicMock()
client.images.get.side_effect = [ImageNotFound('image not found'), image, image, image]
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['start'])
assert client.containers.get.call_count == 11
assert client.containers.run.called is True
assert client.images.get.call_count == 4
assert result.exit_code == 0
def test_container_run(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.images.get.return_value.id = client.containers.get.return_value.image.id
client.containers.get.return_value.exec_run.return_value.output = [
'foo',
'bar',
'baz',
]
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['run'])
assert client.containers.get.call_count == 14
assert client.containers.run.called is False
assert result.exit_code == 0
assert 'foobarbaz' in result.output
assert 'Run Command: env' in result.output
def test_container_run_step(runner, attrs):
client = mock.MagicMock()
client.containers.get.return_value.attrs = attrs
client.images.get.return_value.id = client.containers.get.return_value.image.id
client.containers.get.return_value.exec_run.return_value.output = [
'foo',
'bar',
'baz',
]
with mock.patch('docker.from_env', return_value=client):
result = runner.invoke(cli, ['run', '--step=install'])
assert client.containers.get.call_count == 13
assert client.containers.run.called is False
assert result.exit_code == 0
assert 'foobarbaz' in result.output
assert 'Run Command: env' not in result.output
assert 'Run Command: python -m pip install' in result.output
| StarcoderdataPython |
126452 | <reponame>jessicachung/rna_seq_pipeline
#---------------------------------
# BATCH DEFAULTS
#---------------------------------
# The default options applied to each stage in the shell script. These options
# are overwritten by the options provided by the individual stages in the next
# section.
# Stage options which Rubra will recognise are:
# - distributed: a boolean determining whether the task should be submitted to
# a cluster job scheduling system (True) or run on the system local to Rubra
# (False).
# - walltime: for a distributed PBS job, gives the walltime requested from the
# job queue system; the maximum allowed runtime. For local jobs has no
# effect.
# - memInGB: for a distributed PBS job, gives the memory in gigabytes requested
# from the job queue system. For local jobs has no effect.
# - queue: for a distributed PBS job, this is the name of the queue to submit
# the job to. For local jobs has no effect.
# - modules: the modules to be loaded before running the task. This is intended
# for systems with environment modules installed. Rubra will call module load
# on each required module before running the task. Note that defining modules
# for individual stages will override (not add to) any modules listed here.
# This currently only works for distributed jobs.
stageDefaults = {
"distributed": True,
"walltime": "01:00:00",
"memInGB": 4,
"queue": "main",
"modules": [
"perl/5.18.0",
"java/1.7.0_25",
"samtools-intel/0.1.19",
"python-gcc/2.7.5",
"fastqc/0.10.1",
"bowtie2-intel/2.1.0",
"tophat-gcc/2.0.8",
"cufflinks-gcc/2.1.1",
"bwa-intel/0.7.5a",
],
"manager": "slurm",
}
#---------------------------------
# PBS STAGES
#---------------------------------
# The configuration options for individual stages.
stages = {
"makeIndexHtml": {
"command": "python %html_index %name %samp %comp %results %output",
"walltime": "10:00",
},
"fastQC": {
"command": "fastqc -o %outdir -f fastq %pair1 %pair2",
"walltime": "30:00"
},
"trimReads": {
"command": "java -Xmx6g %tmp -jar %trimmomatic %paired -threads 1 " \
"-phred33 %log %parameters",
"walltime": "10:00:00",
"memInGB": 10,
},
"fastQCSummary": {
"command": "python %script %fastqc_dir %fastqc_post_trim_dir " \
"%qc_summary %fastqc_summary %paired > %summary_txt",
"walltime": "10:00",
},
"buildTranscriptomeIndex": {
"command": "sh %buildIndex %seq %tmp_dir %index_dir %index %gene_ref " \
"%genome_ref",
"walltime": "2:00:00",
"queue": "smp"
},
"tophatAlign": {
"command": "sh %tophat %pair1 %pair2 %out_dir %gene_ref %genome_ref " \
"%known %rgsm %rglb %rgid %rgpl %link",
"walltime": "10:00:00",
"memInGB": 32,
"queue": "smp"
},
"sortBam": {
"command": "samtools sort %bam %output",
"walltime": "2:00:00"
},
"indexBam": {
"command": "samtools index %bam",
"walltime": "1:00:00"
},
"mergeTophat" : {
"command": "sh %merge %fix_reads %samp_dir %output",
"walltime": "1:00:00",
"memInGB": 24
},
"reorderBam": {
"command": "java -Xmx2g %tmp -jar %reorder_sam INPUT=%input " \
"OUTPUT=%output REFERENCE=%genome_ref",
"walltime": "1:00:00"
},
"addRG": {
"command": "java -Xmx2g %tmp -jar %add_rg INPUT=%bam OUTPUT=%output " \
"RGSM=%rgsm RGLB=%rglb RGID=%rgid RGPL=%rgpl RGPU=%rgpu",
"walltime": "1:00:00"
},
"markDuplicates": {
"command": "java -Xmx10g %tmp -jar %mark_dup INPUT=%input " \
"REMOVE_DUPLICATES=false VALIDATION_STRINGENCY=LENIENT " \
"AS=true METRICS_FILE=%log OUTPUT=%output",
"walltime": "1:00:00",
"memInGB": 12
},
"rnaSeQC": {
"command": "java -Xmx10g %tmp -jar %rnaseqc %paired -n 1000 -s %samp " \
"-r %genome_ref -t %gene_ref %rrna_ref -o %outDir",
"walltime": "1:00:00",
"memInGB": 16
},
"cufflinksAssembly": {
"command": "cufflinks -p 8 -o %outdir %in",
"walltime": "2:00:00",
"memInGB": 32,
"queue": "smp"
},
"cuffmerge": {
"command": "cuffmerge -g %gene_ref -s %genome_ref -p 8 -o %outdir %in",
"walltime": "2:00:00",
"memInGB": 32,
"queue": "smp"
},
"cuffdiff": {
"command": "cuffdiff %mask -o %outdir -p 8 -L %labels -u %merged_gtf " \
"%samples1 %samples2",
"walltime": "8:00:00",
"memInGB": 40,
"queue": "smp"
},
"sortBamByName": {
"command": "samtools sort -n %bam_file %sorted_file",
"walltime": "1:00:00",
},
"alignmentStats": {
"command": "sh %script %bam %unmapped %output %paired",
"walltime": "1:00:00",
},
"qcSummary": {
"command": "python %qc_script %fastqc_dir %fastqc_post_dir " \
"%alignment_stats_dir %rnaqc_dir %qc_summary %paired",
"walltime": "10:00",
},
"countReads": {
"command": "sh %htseq %bam %gene_ref %union %strict %stranded",
"walltime": "5:00:00",
},
"combineAndAnnotate": {
"command": "R --no-save --args %samples %comparisons %plain_text " \
"%rdata %annotate < %combine > %stdout 2> %stderr",
"walltime": "30:00",
"modules": ["R-intel/2.15.3"],
},
"voom": {
"command": "R --no-save --args %rdata %outdir %voom < %script " \
"> %stdout 2> %stderr",
"walltime": "30:00",
"modules": ["R-intel/2.15.3"]
},
"edgeR": {
"command": "R --no-save --args %rdata %outdir %edgeR < %script " \
"> %stdout 2> %stderr",
"walltime": "30:00",
"modules": ["R-intel/2.15.3"]
},
}
| StarcoderdataPython |
1734770 | <filename>__init__.py
import logging
import asyncio
import collections
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.helpers import aiohttp_client
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'grohe_sense'
CONF_REFRESH_TOKEN = '<PASSWORD>'
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema({
vol.Required(CONF_REFRESH_TOKEN): cv.string,
}),
},
extra=vol.ALLOW_EXTRA,
)
BASE_URL = 'https://idp2-apigw.cloud.grohe.com/v3/iot/'
GROHE_SENSE_TYPE = 101 # Type identifier for the battery powered water detector
GROHE_SENSE_GUARD_TYPE = 103 # Type identifier for sense guard, the water guard installed on your water pipe
GroheDevice = collections.namedtuple('GroheDevice', ['locationId', 'roomId', 'applianceId', 'type', 'name'])
async def async_setup(hass, config):
_LOGGER.debug("Loading Grohe Sense")
await initialize_shared_objects(hass, config.get(DOMAIN).get(CONF_REFRESH_TOKEN))
await hass.helpers.discovery.async_load_platform('sensor', DOMAIN, {}, config)
await hass.helpers.discovery.async_load_platform('switch', DOMAIN, {}, config)
return True
async def initialize_shared_objects(hass, refresh_token):
session = aiohttp_client.async_get_clientsession(hass)
auth_session = OauthSession(session, refresh_token)
devices = []
hass.data[DOMAIN] = { 'session': auth_session, 'devices': devices }
locations = await auth_session.get(BASE_URL + f'locations')
for location in locations:
_LOGGER.debug('Found location %s', location)
locationId = location['id']
rooms = await auth_session.get(BASE_URL + f'locations/{locationId}/rooms')
for room in rooms:
_LOGGER.debug('Found room %s', room)
roomId = room['id']
appliances = await auth_session.get(BASE_URL + f'locations/{locationId}/rooms/{roomId}/appliances')
for appliance in appliances:
_LOGGER.debug('Found appliance %s', appliance)
applianceId = appliance['appliance_id']
devices.append(GroheDevice(locationId, roomId, applianceId, appliance['type'], appliance['name']))
class OauthException(Exception):
def __init__(self, error_code, reason):
self.error_code = error_code
self.reason = reason
class OauthSession:
def __init__(self, session, refresh_token):
self._session = session
self._refresh_token = refresh_token
self._access_token = None
self._fetching_new_token = None
@property
def session(self):
return self._session
async def token(self, old_token=None):
""" Returns an authorization header. If one is supplied as old_token, invalidate that one """
if self._access_token not in (None, old_token):
return self._access_token
if self._fetching_new_token is not None:
await self._fetching_new_token.wait()
return self._access_token
self._access_token = None
self._fetching_new_token = asyncio.Event()
data = { 'refresh_token': self._refresh_token }
headers = { 'Content-Type': 'application/json' }
refresh_response = await self._http_request(BASE_URL + 'oidc/refresh', 'post', headers=headers, json=data)
if not 'access_token' in refresh_response:
_LOGGER.error('OAuth token refresh did not yield access token! Got back %s', refresh_response)
else:
self._access_token = 'Bearer ' + refresh_response['access_token']
self._fetching_new_token.set()
self._fetching_new_token = None
return self._access_token
async def get(self, url, **kwargs):
return await self._http_request(url, auth_token=self, **kwargs)
async def post(self, url, json, **kwargs):
return await self._http_request(url, method='post', auth_token=self, json=json, **kwargs)
async def _http_request(self, url, method='get', auth_token=None, headers={}, **kwargs):
_LOGGER.debug('Making http %s request to %s, headers %s', method, url, headers)
headers = headers.copy()
tries = 0
while True:
if auth_token != None:
# Cache token so we know which token was used for this request,
# so we know if we need to invalidate.
token = await auth_token.token()
headers['Authorization'] = token
try:
async with self._session.request(method, url, headers=headers, **kwargs) as response:
_LOGGER.debug('Http %s request to %s got response %d', method, url, response.status)
if response.status in (200, 201):
return await response.json()
elif response.status == 401:
if auth_token != None:
_LOGGER.debug('Request to %s returned status %d, refreshing auth token', url, response.status)
token = await auth_token.token(token)
else:
_LOGGER.error('Grohe sense refresh token is invalid (or expired), please update your configuration with a new refresh token')
raise OauthException(response.status, await response.text())
else:
_LOGGER.debug('Request to %s returned status %d, %s', url, response.status, await response.text())
except OauthException as oe:
raise
except Exception as e:
_LOGGER.debug('Exception for http %s request to %s: %s', method, url, e)
tries += 1
await asyncio.sleep(min(600, 2**tries))
| StarcoderdataPython |
249291 | import numpy as np
class Poetry:
def __init__(self):
self.poetry_file = 'poetry.txt'
self.poetry_list = self._get_poetry()
self.poetry_vectors, self.word_to_int, self.int_to_word = self._gen_poetry_vectors()
self.batch_size = 64
self.chunk_size = len(self.poetry_vectors) // self.batch_size
def _get_poetry(self):
with open(self.poetry_file, "r", encoding='utf-8') as f:
poetry_list = [line for line in f]
return poetry_list
def _gen_poetry_vectors(self):
words = sorted(set(''.join(self.poetry_list)+' '))
# 每一个字符分配一个索引 为后续诗词向量化做准备
int_to_word = {i: word for i, word in enumerate(words)}
word_to_int = {v: k for k, v in int_to_word.items()}
to_int = lambda word: word_to_int.get(word)
poetry_vectors = [list(map(to_int, poetry)) for poetry in self.poetry_list]
return poetry_vectors, word_to_int, int_to_word
def batch(self):
# 生成器
start = 0
end = self.batch_size
for _ in range(self.chunk_size):
batches = self.poetry_vectors[start:end]
# 输入数据 按每块数据中诗句最大长度初始化数组,缺失数据补全
x_batch = np.full((self.batch_size, max(map(len, batches))), self.word_to_int[' '], np.int32)
for row in range(self.batch_size): x_batch[row, :len(batches[row])] = batches[row]
# 标签数据 根据上一个字符预测下一个字符 所以这里y_batch数据应为x_batch数据向后移一位
y_batch = np.copy(x_batch)
y_batch[:, :-1], y_batch[:, -1] = x_batch[:, 1:], x_batch[:, 0]
yield x_batch, y_batch
start += self.batch_size
end += self.batch_size
if __name__ == '__main__':
data = Poetry().batch()
for x, y in data:
print(x)
| StarcoderdataPython |
3207213 | from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('cms', '0010_transcriptions'),
]
operations = [
migrations.AddField(
model_name='image',
name='essay',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
]
| StarcoderdataPython |
4979436 | <filename>Bio/EUtils/ReseekFile.py
"""Wrap a file handle to allow seeks back to the beginning
Sometimes data coming from a socket or other input file handle isn't
what it was supposed to be. For example, suppose you are reading from
a buggy server which is supposed to return an XML stream but can also
return an unformatted error message. (This often happens because the
server doesn't handle incorrect input very well.)
A ReseekFile helps solve this problem. It is a wrapper to the
original input stream but provides a buffer. Read requests to the
ReseekFile get forwarded to the input stream, appended to a buffer,
then returned to the caller. The buffer contains all the data read so
far.
The ReseekFile can be told to reseek to the start position. The next
read request will come from the buffer, until the buffer has been
read, in which case it gets the data from the input stream. This
newly read data is also appended to the buffer.
When buffering is no longer needed, use the 'nobuffer()' method. This
tells the ReseekFile that once it has read from the buffer it should
throw the buffer away. After nobuffer is called, the behaviour of
'seek' is no longer defined.
For example, suppose you have the server as above which either
gives an error message is of the form:
ERROR: cannot do that
or an XML data stream, starting with "<?xml".
infile = urllib2.urlopen("http://somewhere/")
infile = ReseekFile.ReseekFile(infile)
s = infile.readline()
if s.startswith("ERROR:"):
raise Exception(s[:-1])
infile.seek(0)
infile.nobuffer() # Don't buffer the data
... process the XML from infile ...
This module also implements 'prepare_input_source(source)' modeled on
xml.sax.saxutils.prepare_input_source. This opens a URL and if the
input stream is not already seekable, wraps it in a ReseekFile.
NOTE:
Don't use bound methods for the ReseekFile. When the buffer is
empty, the ReseekFile reassigns the input file's read/readlines/etc.
method as instance variable. This gives slightly better performance
at the cost of not allowing an infrequently used idiom.
Use tell() to get the beginning byte location. ReseekFile will
attempt to get the real position from the wrapped file and use that as
the beginning location. If the wrapped file does not support tell(),
ReseekFile.tell() will return 0.
readlines does not yet support a sizehint. Want to
an implementation?
The latest version of this code can be found at
http://www.dalkescientific.com/Python/
"""
# Written in 2003 by <NAME>, Dalke Scientific Software, LLC.
# This software has been released to the public domain. No
# copyright is asserted.
from cStringIO import StringIO
class ReseekFile:
"""wrap a file handle to allow seeks back to the beginning
Takes a file handle in the constructor.
See the module docstring for more documentation.
"""
def __init__(self, file):
self.file = file
self.buffer_file = StringIO()
self.at_beginning = 1
try:
self.beginning = file.tell()
except (IOError, AttributeError):
self.beginning = 0
self._use_buffer = 1
def seek(self, offset, whence = 0):
"""offset, whence = 0
Seek to a given byte position. Only supports whence == 0
and offset == the initial value of ReseekFile.tell() (which
is usually 0, but not always.)
"""
if whence != 0:
raise TypeError("Unexpected whence value of %s; expecting 0" % \
(whence,))
if offset != self.beginning:
raise TypeError("Unexpected offset value of %r; expecting '%s'" % \
(offset, self.beginning))
self.buffer_file.seek(0)
self.at_beginning = 1
def tell(self):
"""the current position of the file
The initial position may not be 0 if the underlying input
file supports tell and it not at position 0.
"""
if not self.at_beginning:
raise TypeError("ReseekFile cannot tell except at the beginning of file")
return self.beginning
def _read(self, size):
if size < 0:
y = self.file.read()
z = self.buffer_file.read() + y
if self._use_buffer:
self.buffer_file.write(y)
return z
if size == 0:
return ""
x = self.buffer_file.read(size)
if len(x) < size:
y = self.file.read(size - len(x))
if self._use_buffer:
self.buffer_file.write(y)
return x + y
return x
def read(self, size = -1):
"""read up to 'size' bytes from the file
Default is -1, which means to read to end of file.
"""
x = self._read(size)
if self.at_beginning and x:
self.at_beginning = 0
self._check_no_buffer()
return x
def readline(self):
"""read a line from the file"""
# Can we get it out of the buffer_file?
s = self.buffer_file.readline()
if s[-1:] == "\n":
return s
# No, so now we read a line from the input file
t = self.file.readline()
# Append the new data to the buffer, if still buffering
if self._use_buffer:
self.buffer_file.write(t)
self._check_no_buffer()
return s + t
def readlines(self):
"""read all remaining lines from the file"""
s = self.read()
lines = []
i, j = 0, s.find("\n")
while j > -1:
lines.append(s[i:j+1])
i = j+1
j = s.find("\n", i)
if i < len(s):
# Only get here if the last line doesn't have a newline
lines.append(s[i:])
return lines
def _check_no_buffer(self):
# If 'nobuffer' called and finished with the buffer file
# then get rid of the buffer and redirect everything to
# the original input file.
if self._use_buffer == 0 and self.buffer_file.tell() == \
len(self.buffer_file.getvalue()):
# I'm doing this for the slightly better performance
self.seek = getattr(self.file, "seek", None)
self.tell = getattr(self.file, "tell", None)
self.read = self.file.read
self.readline = self.file.readline
self.readlines = self.file.readlines
del self.buffer_file
def nobuffer(self):
"""tell the ReseekFile to stop using the buffer once it's exhausted"""
self._use_buffer = 0
def prepare_input_source(source):
"""given a URL, returns a xml.sax.xmlreader.InputSource
Works like xml.sax.saxutils.prepare_input_source. Wraps the
InputSource in a ReseekFile if the URL returns a non-seekable
file.
To turn the buffer off if that happens, you'll need to do
something like
f = source.getCharacterStream()
...
try:
f.nobuffer()
except AttributeError:
pass
or
if isinstance(f, ReseekFile):
f.nobuffer()
"""
from xml.sax import saxutils
source = saxutils.prepare_input_source(source)
# Is this correct? Don't know - don't have Unicode exprerience
f = source.getCharacterStream() or source.getByteStream()
try:
f.tell()
except (AttributeError, IOError):
f = ReseekFile.ReseekFile(f)
source.setByteStream(f)
source.setCharacterStream(None)
return source
def test_reads(test_s, file, seek0):
assert file.read(2) == "Th"
assert file.read(3) == "is "
assert file.read(4) == "is a"
assert file.read(0) == ""
assert file.read(0) == ""
assert file.read(6) == " test."
file.seek(seek0)
assert file.read(2) == "Th"
assert file.read(3) == "is "
assert file.read(4) == "is a"
assert file.read(0) == ""
assert file.read(0) == ""
assert file.read(6) == " test."
assert file.read(1) == "\n"
assert file.read(5) == "12345"
assert file.read() == "67890\n"
file.seek(seek0)
assert file.read() == test_s
file.seek(seek0)
def test():
s = "This is a test.\n1234567890\n"
file = StringIO(s)
# Test with a normal file
x = file.tell()
test_reads(s, file, x)
test_reads(s, file, x)
# Test with a ReseekFile wrapper
rf = ReseekFile(file)
y = rf.tell()
rf.seek(y)
test_reads(s, rf, y)
assert rf.read() == s
assert rf.read() == ""
# Make sure the tell offset is correct (may not be 0)
file = StringIO("X" + s)
file.read(1)
rf = ReseekFile(file)
y = rf.tell()
test_reads(s, rf, y)
rf.seek(y)
test_reads(s, rf, y)
assert rf.read() == s
assert rf.read() == ""
# Test the ability to turn off buffering and have changes
# propogate correctly
file = StringIO("X" + s)
file.read(1)
rf = ReseekFile(file)
y = rf.tell()
assert y == 1
rf.read(1000)
rf.seek(y)
rf.nobuffer()
assert rf.tell() == y
test_reads(s, rf, y)
rf.seek(y)
test_reads(s, rf, y)
assert rf.read() == s
assert rf.read() == ""
# turn off buffering after partial reads
file = StringIO("X" + s)
file.read(1)
rf = ReseekFile(file)
y = rf.tell()
rf.read(5)
rf.seek(y)
rf.nobuffer()
assert rf.read() == s
file = StringIO("X" + s)
file.read(1)
rf = ReseekFile(file)
y = rf.tell()
t = rf.read(5)
rf.seek(y)
rf.nobuffer()
assert rf.read(5) == t
file = StringIO("X" + s)
file.read(1)
rf = ReseekFile(file)
y = rf.tell()
t = rf.read(5)
assert t == s[:5]
rf.seek(y)
rf.nobuffer()
assert rf.read(8) == s[:8]
file = StringIO("X" + s)
file.read(1)
rf = ReseekFile(file)
y = rf.tell()
t = rf.read(5)
assert t == s[:5]
rf.nobuffer()
assert rf.read(8) == s[5:5+8]
# Should only do this test on Unix systems
import os
infile = os.popen("echo HELLO_THERE")
infile.read(1)
rf = ReseekFile(infile)
y = rf.tell()
assert rf.read(1) == "E"
assert rf.read(2) == "LL"
rf.seek(y)
assert rf.read(4) == "ELLO"
rf.seek(y)
assert rf.read(1) == "E"
rf.nobuffer()
assert rf.read(1) == "L"
assert rf.read(4) == "LO_T"
assert rf.read(4) == "HERE"
try:
rf.seek(y)
raise AssertionError("Cannot seek here!")
except IOError:
pass
try:
rf.tell()
raise AssertionError("Cannot tell here!")
except IOError:
pass
# Check if readline/readlines works
s = "This is line 1.\nAnd line 2.\nAnd now, page 3!"
file = StringIO(s)
rf = ReseekFile(file)
rf.read(1)
assert rf.readline() == "his is line 1.\n"
rf.seek(0)
assert rf.readline() == "This is line 1.\n"
rf.read(2)
assert rf.readline() == "d line 2.\n"
rf.seek(0)
assert rf.readlines() == ["This is line 1.\n",
"And line 2.\n",
"And now, page 3!"]
rf.seek(0)
rf.read(len(s))
assert rf.readlines() == []
rf.seek(0)
# Now there is a final newline
s = "This is line 1.\nAnd line 2.\nAnd now, page 3!\n"
rf = ReseekFile(StringIO(s))
rf.read(1)
rf.seek(0)
rf.nobuffer()
assert rf.readlines() == ["This is line 1.\n",
"And line 2.\n",
"And now, page 3!\n"]
if __name__ == "__main__":
test()
| StarcoderdataPython |
9659631 | <gh_stars>0
"""
*******************************************************************************
* BTChip Bitcoin Hardware Wallet Python API
* (c) 2014 BTChip - 1BTChip7VfTnrPra5jqci7ejnMguuHogTn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from navhip.navhip import *
from navhip.navhipUtils import *
import json
"""
Signs a TX generated by Armory. That TX:
{
'inputs': [{
'p2shscript': '52210269694830114e4b1f6ef565ce4efb933681032d30333c80df713df6b60a4c62832102f43b905e9e35ccd22757faedf9eceb652dc9ba198a3904d43f4298def0213eb521037b9e3578dd3b5559d613bc2641931e6ce7d55a9d081b07347888d7d17a2b910253ae',
'supporttxhash_be': '0c1676b8fc1adaca53221290e242b8eb80fd6b89aa83f2fa0106f87e13388300',
'sequence': 4294967295,
'keys': [{
'dersighex': '',
'pubkeyhex': '<KEY>',
'wltlochex': ''
}, {
'dersighex': '',
'pubkeyhex': '02f43b905e9e35ccd22757faedf9eceb652dc9ba198a3904d43f4298def0213eb5',
'wltlochex': ''
}, {
'dersighex': '',
'pubkeyhex': '037b9e3578dd3b5559d613bc2641931e6ce7d55a9d081b07347888d7d17a2b9102',
'wltlochex': ''
}],
'contriblabel': u '',
'supporttxhash_le': '008338137ef80601faf283aa896bfd80ebb842e290122253cada1afcb876160c',
'contribid': 'JLBercZk',
'version': 1,
'inputvalue': 46000000,
'outpoint': '008338137ef80601faf283aa896bfd80ebb842e290122253cada1afcb876160c00000000',
'magicbytes': '0b110907',
'supporttx': '01000000013e9fe12917d854a0e093b982eaa46990289e2262f2db9fc1bd3f13718f3c806e010000006b483045022100af668e482e3ed363f51b36ddabad7cdf20d177104c92b8676a5b14f51107179602206c4ecd67544c74c6689ca453e2157d0c0b8a4608d85956429d2615275a51c66f01210374db359a004626daf2fcf10b8601f5f39438848a6733c768e88ce0ad398ae79dffffffff0280e7bd020000000017a914e2a227eb40dfce902f2c1d80ddafa798b16d22c3876c8fc846000000001976a914af58f09cf65b213bb9bd181a94e133b4ad4d6b2788ac00000000',
'numkeys': 3,
'supporttxhash': '0c1676b8fc1adaca53221290e242b8eb80fd6b89aa83f2fa0106f87e13388300',
'supporttxoutindex': 0
}],
'fee': 10000,
'locktimeint': 0,
'outputs': [{
'txoutvalue': 10000000,
'authdata': '',
'contriblabel': '',
'p2shscript': '',
'scripttypeint': 4,
'isp2sh': True,
'txoutscript': 'a914c0c3b6ada732c797881d00de6c350eec96e3d22287',
'authmethod': 'NONE',
'hasaddrstr': True,
'contribid': '',
'version': 1,
'ismultisig': False,
'magicbytes': '0b110907',
'addrstr': '2NApUBXv4NB8pm834pHUajiUL6rvFaaj6N8',
'scripttypestr': 'Standard (P2SH)',
'wltlocator': ''
}, {
'txoutvalue': 35990000,
'authdata': '',
'contriblabel': '',
'p2shscript': '',
'scripttypeint': 4,
'isp2sh': True,
'txoutscript': 'a914e2a227eb40dfce902f2c1d80ddafa798b16d22c387',
'authmethod': 'NONE',
'hasaddrstr': True,
'contribid': '',
'version': 1,
'ismultisig': False,
'magicbytes': '0b110907',
'addrstr': '2NDuYxRrmAs2fRcMj4ew2F41aFp2PN9yiV1',
'scripttypestr': 'Standard (P2SH)',
'wltlocator': ''
}],
'sumoutputs': 45990000,
'suminputs': 46000000,
'version': 1,
'numoutputs': 2,
'magicbytes': '0b110907',
'locktimedate': '',
'locktimeblock': 0,
'id': '8jkccikU',
'numinputs': 1
}
Input comes from vout[0] of 0c1676b8fc1adaca53221290e242b8eb80fd6b89aa83f2fa0106f87e13388300.
TX I want to generate is 0.10 to 2NApUBXv4NB8pm834pHUajiUL6rvFaaj6N8
The multisig address 2NDuYxRrmAs2fRcMj4ew2F41aFp2PN9yiV1 contains 0.46 BTC, and is generated
using the public keys 0'/0/0, 0'/0/1, and 0'/0/2 from the seed below.
"""
# Run on non configured dongle or dongle configured with test seed below
SEED = bytearray("1762F9A3007DBC825D0DD9958B04880284C88A10C57CF569BB3DADF7B1027F2D".decode('hex'))
# Armory supporttx
UTX = bytearray("01000000013e9fe12917d854a0e093b982eaa46990289e2262f2db9fc1bd3f13718f3c806e010000006b483045022100af668e482e3ed363f51b36ddabad7cdf20d177104c92b8676a5b14f51107179602206c4ecd67544c74c6689ca453e2157d0c0b8a4608d85956429d2615275a51c66f01210374db359a004626daf2fcf10b8601f5f39438848a6733c768e88ce0ad398ae79dffffffff0280e7bd020000000017a914e2a227eb40dfce902f2c1d80ddafa798b16d22c3876c8fc846000000001976a914af58f09cf65b213bb9bd181a94e133b4ad4d6b2788ac00000000".decode('hex'))
UTXO_INDEX = 0
OUTPUT = bytearray("02809698000000000017a914c0c3b6ada732c797881d00de6c350eec96e3d22287f02925020000000017a914e2a227eb40dfce902f2c1d80ddafa798b16d22c387".decode('hex'))
# Armory p2shscript
REDEEMSCRIPT = bytearray("52210269694830114e4b1f6ef565ce4efb933681032d30333c80df713df6b60a4c62832102f43b905e9e35ccd22757faedf9eceb652dc9ba198a3904d43f4298def0213eb521037b9e3578dd3b5559d613bc2641931e6ce7d55a9d081b07347888d7d17a2b910253ae".decode('hex'))
SIGNATURE_0 = bytearray("3044022056cb1b781fd04cfe6c04756ad56d02e5512f3fe7f411bc22d1594da5c815a393022074ad7f4d47af7c3f8a7ddf0ba2903f986a88649b0018ce1538c379b304a6a23801".decode('hex'))
SIGNATURE_1 = bytearray("304402205545419c4aded39c7f194b3f8c828f90e8d9352c756f7c131ed50e189c02f29a02201b160503d7310df49055b04a327e185fc22dfe68f433594ed7ce526d99a5026001".decode('hex'))
SIGNATURE_2 = bytearray("30440220634fbbfaaea74d42280a8c9e56c97418af04539f93458e85285d15462aec7712022041ba27a5644642a2f5b3c02610235ec2c6115bf4137bb51181cbc0a3a54dc0db01".decode('hex'))
TRANSACTION = bytearray("0100000001008338137ef80601faf283aa896bfd80ebb842e290122253cada1afcb876160c00000000fc004730440220634fbbfaaea74d42280a8c9e56c97418af04539f93458e85285d15462aec7712022041ba27a5644642a2f5b3c02610235ec2c6115bf4137bb51181cbc0a3a54dc0db0147304402205545419c4aded39c7f194b3f8c828f90e8d9352c756f7c131ed50e189c02f29a02201b160503d7310df49055b04a327e185fc22dfe68f433594ed7ce526d99a50260014c6952210269694830114e4b1f6ef565ce4efb933681032d30333c80df713df6b60a4c62832102f43b905e9e35ccd22757faedf9eceb652dc9ba198a3904d43f4298def0213eb521037b9e3578dd3b5559d613bc2641931e6ce7d55a9d081b07347888d7d17a2b910253aeffffffff02809698000000000017a914c0c3b6ada732c797881d00de6c350eec96e3d22287f02925020000000017a914e2a227eb40dfce902f2c1d80ddafa798b16d22c38700000000".decode('hex'))
SECONDFACTOR_1 = "RELAXED MODE Powercycle then confirm use of 0.46 BTC with PIN"
# Armory txoutscript
output = get_output_script([["0.1", bytearray("a914c0c3b6ada732c797881d00de6c350eec96e3d22287".decode('hex'))], ["0.3599", bytearray("a914e2a227eb40dfce902f2c1d80ddafa798b16d22c387".decode('hex'))]]);
if output<>OUTPUT:
raise BTChipException("Invalid output script encoding");
# Optional setup
dongle = getDongle(True)
app = navhip(dongle)
try:
app.setup(navhip.OPERATION_MODE_RELAXED_WALLET, navhip.FEATURE_RFC6979, 111, 196, "1234", None, navhip.QWERTY_KEYMAP, SEED)
except:
pass
# Authenticate
app.verifyPin("1234")
# Get the trusted input associated to the UTXO
transaction = bitcoinTransaction(UTX)
print transaction
trustedInput = app.getTrustedInput(transaction, UTXO_INDEX)
# Start composing the transaction
app.startUntrustedTransaction(True, 0, [trustedInput], REDEEMSCRIPT)
app.finalizeInputFull(OUTPUT)
dongle.close()
# Wait for the second factor confirmation
# Done on the same application for test purposes, this is typically done in another window
# or another computer for bigger transactions
response = raw_input("Powercycle the dongle to get the second factor and powercycle again : ")
if not response.startswith(SECONDFACTOR_1):
raise BTChipException("Invalid second factor")
# Get a reference to the dongle again, as it was disconnected
dongle = getDongle(True)
app = navhip(dongle)
# Replay the transaction, this time continue it since the second factor is ready
app.startUntrustedTransaction(False, 0, [trustedInput], REDEEMSCRIPT)
app.finalizeInputFull(OUTPUT)
# Provide the second factor to finalize the signature
signature1 = app.untrustedHashSign("0'/0/1", response[len(response) - 4:])
if signature1 <> SIGNATURE_1:
raise BTChipException("Invalid signature1")
# Same thing for the second signature
app.verifyPin("1234")
app.startUntrustedTransaction(True, 0, [trustedInput], REDEEMSCRIPT)
app.finalizeInputFull(OUTPUT)
dongle.close()
response = raw_input("Powercycle the dongle to get the second factor and powercycle again : ")
if not response.startswith(SECONDFACTOR_1):
raise BTChipException("Invalid second factor")
dongle = getDongle(True)
app = navhip(dongle)
app.startUntrustedTransaction(False, 0, [trustedInput], REDEEMSCRIPT)
app.finalizeInputFull(OUTPUT)
signature2 = app.untrustedHashSign("0'/0/2", response[len(response) - 4:])
if signature2 <> SIGNATURE_2:
raise BTChipException("Invalid signature2")
# Finalize the transaction - build the redeem script and put everything together
inputScript = get_p2sh_input_script(REDEEMSCRIPT, [signature2, signature1])
transaction = format_transaction(OUTPUT, [ [ trustedInput['value'], inputScript] ])
print "Generated transaction : " + str(transaction).encode('hex')
if transaction <> TRANSACTION:
raise BTChipException("Invalid transaction")
# The transaction is ready to be broadcast, enjoy
| StarcoderdataPython |
6500823 | """
Escreva um programa que leia um número N inteiro qualquer e mostre na tela os N primeiros elementos de uma Sequência
de Fibonacci.
"""
print('-' * 30)
print('Sequência de Fibonnacci')
print('-' * 30)
n = int(input('Quantos termos você quer mostrar? '))
t1 = 0
t2 = 1
print('~' * 30)
print(f'{t1} → {t2}', end='')
cont = 3
while cont <= n:
t3 = t1 + t2
print(f' → {t3}', end='')
t1 = t2
t2 = t3
cont = cont + 1
print(' FIM')
| StarcoderdataPython |
1849284 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
"""
the input x in both networks should be [o, g], where o is the observation and g is the goal.
"""
# define the actor network
class actor(nn.Module):
def __init__(self, env_params):
super(actor, self).__init__()
self.max_action = env_params['action_max']
self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'], 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 256)
self.action_out = nn.Linear(256, env_params['action'])
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
actions = self.max_action * torch.tanh(self.action_out(x))
return actions
def add_dice(self, env_params, extra_dice):
with torch.no_grad():
extra_inputs = extra_dice * 3
end_shift = env_params['goal']
temp = self.fc1
og_ins = np.arange(temp.weight.shape[-1])
og_ins[-end_shift:] += extra_inputs
self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'], 256)
self.fc1.bias = temp.bias
self.fc1.weight[:, og_ins] = temp.weight
self.fc1.weight[:, -(end_shift+extra_inputs):-end_shift] = 0
class critic(nn.Module):
def __init__(self, env_params):
super(critic, self).__init__()
self.max_action = env_params['action_max']
self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'] + env_params['action'], 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 256)
self.q_out = nn.Linear(256, 1)
def forward(self, x, actions):
x = torch.cat([x, actions / self.max_action], dim=-1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
q_value = self.q_out(x)
return q_value
def add_dice(self, env_params, extra_dice):
with torch.no_grad():
extra_inputs = extra_dice * 3
end_shift = env_params['goal'] + env_params['action']
temp = self.fc1
ins = np.arange(temp.weight.shape[-1])
ins[-end_shift:] += extra_inputs
self.fc1 = nn.Linear(env_params['obs'] + env_params['goal'] + env_params['action'], 256)
self.fc1.bias = temp.bias
self.fc1.weight[:, ins] = temp.weight
self.fc1.weight[:, -(end_shift+extra_inputs):-end_shift] = 0
# define simple feed-forward dynamics model
class DynamicsModel(nn.Module):
def __init__(self, obs_size, act_size, hiddens=2, hidden_size=512):
super(DynamicsModel, self).__init__()
assert hiddens > 0, "Must have at least 1 hidden layer"
self.hidden_layers = nn.ModuleList([nn.Linear(obs_size + act_size, hidden_size)])
self.hidden_layers.extend([nn.Linear(hidden_size, hidden_size) for i in range(hiddens-1)])
self.fc_final = nn.Linear(hidden_size, obs_size)
def forward(self, obs, a):
x = torch.cat([obs, a], dim=-1).float()
for fc in self.hidden_layers:
x = F.relu(fc(x))
delta = self.fc_final(x)
return delta
| StarcoderdataPython |
6678495 | import os
import os.path
from Compiler_teaching_platform.settings import BASE_DIR
def dfs_showdir(path, depth):
if depth == 0:
print("root:[" + path + "]")
for item in os.listdir(path):
# if '.git' not in item and '.css' not in item:
if '.py' in item or '.' not in item:
if '.pyc' not in item:
print("| " * depth + "+--" + item)
newitem = path +'/'+ item
if os.path.isdir(newitem):
dfs_showdir(newitem, depth +1)
if __name__ == '__main__':
dfs_showdir(BASE_DIR, 0) | StarcoderdataPython |
9616800 | <filename>evaluate.py
"""
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
evaluation, run this script, then cd into './evaluation' and run eval_tools.py
Author:<NAME>
"""
import tensorflow as tf
import numpy as np
import cv2
import os
from time import time
from model.factory import model_factory
from dataset.hazy_person import provider
import utils.test_tools as test_tools
import config
FLAGS = tf.app.flags.FLAGS
slim = tf.contrib.slim
tf.app.flags.DEFINE_string(
'model_name', None,
'The name of the architecture to train.')
tf.app.flags.DEFINE_string(
'attention_module', None,
'The name of attention module to apply.')
tf.app.flags.DEFINE_string(
'checkpoint_dir', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_float(
'select_threshold', 0.3, 'obj score less than it would be filter')
tf.app.flags.DEFINE_float(
'nms_threshold', 0.6, 'nms threshold')
tf.app.flags.DEFINE_integer(
'keep_top_k', 30, 'maximun num of obj after nms')
tf.app.flags.DEFINE_integer(
'compare_img_height', 224, 'the img height when compare with ground truth')
tf.app.flags.DEFINE_integer(
'compare_img_width', 224, 'the img width when compare with ground truth')
tf.app.flags.DEFINE_integer(
'vis_img_height', 800, 'the img height when visulize')
tf.app.flags.DEFINE_integer(
'vis_img_width', 800, 'the img width when visulize')
#### config only for prioriboxes_mbn ####
tf.app.flags.DEFINE_string(
'backbone_name', None,
'support mobilenet_v1 and mobilenet_v2')
tf.app.flags.DEFINE_boolean(
'multiscale_feats', None,
'whether merge different scale features')
## define placeholder ##
inputs = tf.placeholder(tf.float32,
shape=(None, config.img_size[0], config.img_size[1], 3))
def build_graph(model_name, attention_module, config_dict, is_training):
"""build tf graph for predict
Args:
model_name: choose a model to build
attention_module: must be "se_block" or "cbam_block"
config_dict: some config for building net
is_training: whether to train or test, here must be False
Return:
det_loss: a tensor with a shape [bs, priori_boxes_num, 4]
clf_loss: a tensor with a shape [bs, priori_boxes_num, 2]
"""
assert is_training == False
net = model_factory(inputs=inputs, model_name=model_name,
attention_module=attention_module, is_training=is_training,
config_dict=config_dict)
corner_bboxes, clf_pred = net.get_output_for_test()
score, bboxes = test_tools.bboxes_select(clf_pred, corner_bboxes,
select_threshold= FLAGS.select_threshold)
score, bboxes = test_tools.bboxes_sort(score, bboxes)
rscores, rbboxes = test_tools.bboxes_nms_batch(score, bboxes,
nms_threshold=FLAGS.nms_threshold,
keep_top_k=FLAGS.keep_top_k)
return rscores, rbboxes
def main(_):
config_dict = {'multiscale_feats': FLAGS.multiscale_feats,
'backbone': FLAGS.backbone_name}
scores, bboxes = build_graph(FLAGS.model_name, FLAGS.attention_module, is_training=False,
config_dict=config_dict)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if FLAGS.checkpoint_dir ==None:
raise ValueError("checkpoint_dir must not be None")
else:
model_name = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name+".model")
tf.train.Saver().restore(sess, model_name)
print("Load checkpoint success...")
pd = provider(for_what="evaluate", whether_aug=False)
while (True):
norm_img, corner_bboxes_gt, file_name = pd.load_data_eval()
if file_name != None:
scores_pred, bboxes_pred = sess.run([scores, bboxes], feed_dict={inputs: np.array([norm_img])})
#img = np.uint8((norm_img + 1.) * 255 / 2)
#img = cv2.resize(img, dsize=(FLAGS.vis_img_height, FLAGS.vis_img_width))
scores_pred = list(scores_pred.values())
bboxes_pred = list(bboxes_pred.values())
scores_pred = scores_pred[0][0]
bboxes_pred = bboxes_pred[0][0]
bboxes_pred[:, 0] = bboxes_pred[:, 0] * FLAGS.compare_img_height
bboxes_pred[:, 1] = bboxes_pred[:, 1] * FLAGS.compare_img_width
bboxes_pred[:, 2] = bboxes_pred[:, 2] * FLAGS.compare_img_height
bboxes_pred[:, 3] = bboxes_pred[:, 3] * FLAGS.compare_img_width
bboxes_pred = np.int32(bboxes_pred)
file_name = file_name.split('.')[0]
file_name = './evaluation/detection-results/'+file_name+'.txt'
file = open(file_name, "w")
for score, bbox in zip(scores_pred, bboxes_pred):
if bbox.any() != 0:
string = ("person " + str(score) + " " + str(bbox[1]) + " " + str(
bbox[0]) + " " + str(bbox[3]) + " " + str(bbox[2]) + "\n")
file.write(string)
file.close()
else:
tf.logging.info("Finish detection results, please cd into './evaluation' and run eval_tools.py")
break
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
8031342 | from fifo_animal_shelter import __version__
from fifo_animal_shelter.fifo_animal_shelter import Queue, Node, AnimalShelter
def test_version():
assert __version__ == '0.1.0'
def test_enqueue():
actual = []
animal = AnimalShelter()
animal.enqueue('bobi', 'dog')
actual += [animal.dog.peek()]
animal.enqueue('lara', 'cat')
actual += [animal.cat.peek()]
excepted = ['bobi', 'lara']
assert actual == excepted
def test_enqueue_fail():
animal = AnimalShelter()
actual = animal.enqueue('fast', 'horse')
excepted = 'you can just choose dog or cat'
assert actual == excepted
def test_dequeue():
animal = AnimalShelter()
animal.enqueue('bobi', 'dog')
animal.enqueue('lara', 'cat')
actual = [animal.dequeue('dog'), animal.dequeue('cat')]
excepted = ['bobi', 'lara']
assert actual == excepted
def test_dequeue_fail():
animal = AnimalShelter()
actual = animal.dequeue('horse')
excepted = 'you can just choose dog or cat'
assert actual == excepted
| StarcoderdataPython |
95695 |
import torch
from sklearn.linear_model import Ridge
import signatory
class InvestConsumption():
def __init__(self, params):
self.T = params["T"]
self.N = params["N"]
self.device = params["device"]
def initialize(self, typeVec):
"""
input:
typeVec
initialize random parameters
"""
self.delta = typeVec[:, 0:1]
self.mu = typeVec[:, 1:2]
self.nu = typeVec[:, 2:3]
self.theta = typeVec[:, 3:4]
self.sigma = typeVec[:, 4:5]
self.eps = typeVec[:, 5:6]
def U(self, x):
"""
input:
x -- torch.tensor, positive
delta -- torch.tensor
"""
return 1/(1-1/self.delta)*torch.pow(x, 1-1/self.delta)
def running(self, x, mx, alpha, mc):
"""
input:
alpha -- torch.tensor([pi, c])
"""
c = alpha[:, 1:]
return -self.U(c*x*torch.pow(mc*mx, -self.theta))
def terminal(self, x, mx):
return -self.eps*self.U(x*mx**(-self.theta))
def one_step_simulation(self, x, pi, c, dw, dcn):
"""
input:
x.size() = x.size(m)
"""
dt = self.T/self.N
x_next = torch.log(x) + pi*(dt*self.mu + dw*self.nu + dcn*self.sigma) - c*dt\
- 0.5* pi**2 *(self.sigma**2 + self.nu**2)*dt
return torch.exp(x_next)
def c(self, t):
'''
if self.beta == 0:
return 1/(self.T - t + 1/self.lam)
else:
return 1/(1/self.beta + (1/self.lam - 1/self.beta)*torch.exp(-self.beta*(self.T-t)))
'''
return 1/(1/self.beta + (1/self.lam - 1/self.beta)*torch.exp(-self.beta*(self.T-t)))
def benchmark(self, w, cn, initial):
"""
input:
w -- tensor(batch, N, dim), brownian increments
cn -- tensor(batch, N+1, dim), common noise
initial -- tensor(batch, 1, dim), starting point, has initial distribution mu_0
return:
X -- tensor(batch, N+1, dim), benchmark paths, no extra time dimension
c -- tensor(batch, N, dim), benchmark cost
"""
phi = torch.mean(self.delta*self.mu*self.sigma/(self.sigma**2+self.nu**2))
psi = torch.mean(self.theta*(self.delta-1)*self.sigma**2/(self.sigma**2+self.nu**2))
q = phi/(1+psi)
e1 = torch.mean((self.delta*self.mu**2-self.theta*(self.delta-1)*self.sigma*self.mu*q)/(self.sigma**2+self.nu**2))
e2 = torch.mean((self.delta*self.mu-self.theta*(self.delta-1)*self.sigma*q)**2/(self.sigma**2+self.nu**2))
rho = (1-1/self.delta)*(\
self.delta/(2*(self.sigma**2+self.nu**2))*(self.mu-self.sigma*q*self.theta*(1-1/self.delta))**2 \
+ 0.5*q**2*self.theta**2*(1-1/self.delta) - self.theta*e1 + 0.5*self.theta*e2)
self.beta = (self.theta*(self.delta-1)*torch.mean(self.delta*rho)/(1+torch.mean(self.theta*(self.delta-1))) - self.delta*rho)
q1 = self.eps**(-self.delta)
self.lam = q1 * torch.exp(torch.mean(torch.log(q1)))**(-self.theta*(self.delta-1)/(1+torch.mean(self.theta*(self.delta-1))))
pi = self.delta*self.mu/(self.sigma**2+self.nu**2) - self.theta*(self.delta-1)*self.sigma/(self.sigma**2+self.nu**2)*q
dt = self.T/self.N
batch, _, dim = w.size()
X = torch.zeros(batch, self.N+1, dim)
c = torch.zeros(batch, self.N, dim)
X[:, 0, :] = initial
for i in range(1, self.N+1):
c[:, i-1, :] = self.c(i*dt-dt)
X[:, i, :] = self.one_step_simulation(X[:, i-1, :],
pi.view(-1, 1), self.c(i*dt-dt), w[:, i-1, :],
cn[:, i]-cn[:, i-1])
self.pi = pi
self.Xbar = torch.zeros(batch, self.N+1, dim)
self.Xbar[:, 0, :] = torch.mean(torch.log(initial))
d1 = torch.mean(pi*self.mu - 0.5*pi**2 *(self.sigma**2 + self.nu**2))*dt
d2 = torch.mean(self.sigma*pi)
for i in range(1, self.N+1):
self.Xbar[:, i, :] = self.Xbar[:, i-1, :] + d1 - torch.mean(c[:, i-1, :])*dt + d2*(cn[:, i]-cn[:, i-1])
self.Xbar = torch.exp(self.Xbar)
#############
f = 0
for i in range(self.N):
f += (rho+1/self.delta*c[:, i, :]+torch.mean(c[:, i, :])*(1-1/self.delta)*self.theta)*dt
f = torch.exp(f)
self.benchmark_loss = torch.mean(self.eps/(1-1/self.delta)*initial**(1-1/self.delta)*self.Xbar[:, 0, :]**(-self.theta*(1-1/self.delta))*f)
return X, c
def benchmark_loss(self):
return self.benchmark_loss
def distFlow(self, X, c, rough, in_dim=2, depth=4):
"""
input:
rough -- signatory.Path, rough path object of common noise
return:
mx, mc -- next round conditional dist.
"""
batch, _, _ = X.size()
mx = torch.zeros(batch, self.N+1, 1)
mc = torch.zeros(batch, self.N, 1)
#mx[:, 0] = torch.exp(torch.mean(torch.log(X[:, 0])))
self.linear = Ridge(alpha=0.1, tol=1e-6)
data = torch.cat([rough.signature(None, None).cpu().detach(),
rough.signature(None, self.N//2+1).cpu().detach(),
torch.zeros(batch, signatory.signature_channels(in_dim, depth))],
dim=0)
label = []
label.append(X[:, -1].cpu().detach())
label.append(X[:, self.N//2].detach())
label.append(X[:, 0].cpu().detach())
label = torch.log(torch.cat(label, dim=0))
self.linear.fit(data.numpy(), label.numpy())
l = torch.tensor(self.linear.coef_, dtype=torch.float32).view(-1, 1)
# i=1
mx[:, 0] = torch.exp(torch.tensor([self.linear.intercept_]))
for i in range(2, self.N+2):
mx[:, i-1] = torch.exp(torch.matmul(rough.signature(end=i), l) + self.linear.intercept_)
self.linearc = Ridge(alpha=0.1, tol=1e-5)
labelc = torch.log(torch.cat([c[:, -1].cpu().detach(),
c[:, self.N//2].detach(),
c[:, 0].cpu().detach()], dim=0))
self.linearc.fit(data.numpy(), labelc.numpy())
lc = torch.tensor(self.linearc.coef_).view(-1, 1)
mc[:, 0] = torch.exp(torch.tensor([self.linearc.intercept_]))
for i in range(2, self.N+1):
mc[:, i-1] = torch.exp(torch.matmul(rough.signature(end=i), lc) + self.linearc.intercept_)
return mx, mc
def getDistFlow(self, batch, w0, depth):
augment = signatory.Augment(1,
layer_sizes = (),
kernel_size = 1,
include_time = True)
rough = signatory.Path(augment(w0), depth)
mx = torch.zeros(batch, self.N+1, 1)
mx[:, 0] = torch.exp(torch.tensor(self.linear.intercept_))
mc = torch.zeros(batch, self.N, 1)
mc[:, 0] = torch.exp(torch.tensor(self.linearc.intercept_))
l = torch.tensor(self.linear.coef_, dtype=torch.float32).view(-1, 1)
lc = torch.tensor(self.linearc.coef_, dtype=torch.float32).view(-1, 1)
# i=1
for i in range(2, self.N+2):
mx[:, i-1] = torch.exp(torch.matmul(rough.signature(end=i), l) + self.linear.intercept_)
for i in range(2, self.N+1):
mc[:, i-1] = torch.exp(torch.matmul(rough.signature(end=i), lc) + self.linearc.intercept_)
return mx, mc
| StarcoderdataPython |
9798683 | <filename>codewars/7kyu/doha22/max_lenght/max_lenght.py
def mxdiflg(a1, a2):
if a1 and a2:
res1 = max([len(x) for x in a1])
res11 = min([len(x) for x in a1])
res2 = max([len(x) for x in a2])
res22 = min([len(x) for x in a2])
return max(res1 - res22, res2 - res11)
else:
return -1
#print(mxdiflg(["hoqq", "bbllkw", "oox"],["cccooommaaqqoxii", "gggqaffhhh"]))
def mxdiflg2(a1, a2):
if a1 and a2:
return max(abs(len(x) - len(y)) for x in a1 for y in a2)
return -1 | StarcoderdataPython |
68709 | <reponame>troncosoae/jetson-exp
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
from tensorflow import keras
import pathlib
import time
import sys
from Net.Net import Net
if __name__ == "__main__":
DEVICE = 'gpu'
try:
DEVICE = sys.argv[1]
except IndexError:
pass
if DEVICE == 'cpu':
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
img_height = 180
img_width = 180
channels = 3
num_classes = 5
net = Net(num_classes, img_height, img_width, channels)
net.load_weights('./checkpoints/my_checkpoint')
batch_size = 32
# dataset_url = "https://storage.googleapis.com/\
# download.tensorflow.org/example_images/flower_photos.tgz"
dataset_url = "data/flower_photos.tgz"
data_dir = keras.utils.get_file(
'flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
train_ds = keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
for element in val_ds:
print(element, type(element))
start_time = time.time()
estimate = net.predict(element)
t = time.time() - start_time
print(f"--- {t} seconds ---")
print(estimate)
print(element[1])
| StarcoderdataPython |
9755093 | <filename>safe_intervals.py
import sys
from itertools import groupby, dropwhile, tee
import random
import argparse
def main(args):
# @SQ SN:GL000222.1 LN:18686...
chr_header = [l.strip().split() for l in args.dict if l.startswith('@SQ')]
assert all(
l[1].startswith('SN') and l[2].startswith('LN') for l in chr_header
)
chr_len = {l[1][3:]: int(l[2][3:]) for l in chr_header}
tot_len = sum(chr_len.values())
target_split_length = tot_len // args.numsplit
for k, chrom in groupby(
dropwhile(
lambda l: l.startswith('@'),
args.interval_list),
key=lambda x: x.split('\t')[0]):
# target_splits = int(chr_len[k] / tot_len * args.numsplit) + 1
# target_split_length = chr_len[k] / target_splits
# A --- B ___ C --- D
# 3 regions
# A -> B unknown (last)
# B -> C unsafe to split (inter)
# C -> D safe to split (cur)
# invariant, |A -> B| < target_len
A, B = 1, 0
C, D = None, chr_len[k]
for l in chrom:
C, D = map(int, l.split('\t')[1:3])
# first remove B + C
# either by emitting A->C
# or A->C fits in one interval
# and is emitted in the while loop
if C - A >= target_split_length:
# don't bother with a tiny split
# B -> C might be huge anyway
if B - A < target_split_length * 0.5:
print(f"{k}:{A}-{C}")
else:
print(f"{k}:{A}-{B}")
print(f"{k}:{B+1}-{C}")
A = C + 1
# while loop only splits in the safe C->D region
assert C - A < target_split_length
while A + target_split_length < D:
print(f"{k}:{A}-{A+target_split_length}")
A = A + target_split_length + 1
B = D
assert D == chr_len[k], k
print(f"{k}:{A}-{D}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument(
'interval_list',
type=open,
)
parser.add_argument(
'dict',
type=open,
)
parser.add_argument(
'numsplit',
type=int,
)
args = parser.parse_args()
sys.exit(main(args))
| StarcoderdataPython |
4941350 | #!/usr/bin/env python
import roslib; roslib.load_manifest("neato_node");
import rospy
from what_is_my_name import what_is_my_name
from std_msgs.msg import Int8, String
from geometry_msgs.msg import Twist
from std_msgs.msg import Int32
from neato_driver.neato_driver import Botvac
class PacketReplicatorNode:
'''
Replicates packets from the requestMotion and requestStop packets
'''
def __init__(self):
#each chair will have their own topics
self.chairbot_number = what_is_my_name();
#topic where we put the replicated packet
self.chairMovement_topic_name = 'chairMovement' + self.chairbot_number
#topic where we get the request motion packet
self.requestMotion_topic_name = 'requestMotion' + self.chairbot_number
#topic where we get the request stop packet
self.requestStop_topic_name = 'requestStop' + self.chairbot_number
self.chairMovementTopic = rospy.Publisher(self.chairMovement_topic_name, Twist, queue_size=30);
#empty twist packet to replicate which we will fill with the right motion
self.motion = None
self.packet = Twist()
#these are the motions which will be performed when a given CONSTANT is in the packet
#this is what we actually replicate!
self.BACKWARD = {
'linear': {'x': 150.0, 'y':0.0, 'z':0.0},
'angular': {'x': 0.0, 'y':0.0, 'z':0.0}
}
self.FORWARD = {
'linear': {'x': -150.0, 'y':0.0, 'z':0.0},
'angular': {'x': 0.0, 'y':0.0, 'z':0.0}
}
self.LEFT = {
'linear': {'x': 0.0, 'y':0.0, 'z':0.0},
'angular': {'x': 0.0, 'y':0.0, 'z':50.0}
}
self.RIGHT = {
'linear': {'x': 0.0, 'y':0.0, 'z':0.0},
'angular': {'x': 0.0, 'y':0.0, 'z':-50.0}
}
self.STOP_MOTION = {
'linear': {'x': 0.0, 'y':0.0, 'z':0.0},
'angular': {'x': 0.0, 'y':0.0, 'z':0.0}
}
#dict mapping the constants to the actual motion dictionaries
self.MOTIONS = { 'BACKWARD' : self.BACKWARD,
"FORWARD": self.FORWARD,
'LEFT': self.LEFT,
'RIGHT': self.RIGHT,
'STOP' : self.STOP_MOTION
}
#this tracks whether we are told to stop or not
self.STOP_FLAG = False
#requestMotion topic which receives MOTION commands from the frontends
rospy.Subscriber(self.requestMotion_topic_name, String, self.motion_callback, queue_size=10)
#stopMotion topic which receives STOP commands from the frontend
rospy.Subscriber(self.requestStop_topic_name, String, self.motion_callback, queue_size=10)
#initialize a ros node
rospy.init_node('packet_replicator_' + self.chairbot_number)
def motion_callback(self, msg):
'''
takes the motion message request and sets flags OR motion variables based on it
'''
print "We got a msg", msg.data
msg = msg.data #just unrwap the command
if msg == 'STOP': # we were given the STOP command
rospy.loginfo("We got a STOP")
self.STOP_FLAG = True
else: # we were given a MOTION command
self.STOP_FLAG = False
self.motion = self.MOTIONS[msg]
print("The motion is gonna be ", self.motion)
def spin(self):
self.r = rospy.Rate(20) # WHY 20 ???
while not rospy.is_shutdown():
if self.motion is None:
#print "Waiting for motion"
continue; #try again!
if self.STOP_FLAG is True:
rospy.loginfo("Stopping")
else:
rospy.loginfo("Moving")
pass;
rospy.loginfo("Replicating the packet")
rospy.loginfo(self.motion)
#populate the packet with the movememnt commands
#for that motion which were set by the motion_callback
self.packet.linear.x = self.motion['linear']['x']
self.packet.linear.y = self.motion['linear']['y']
self.packet.linear.z = self.motion['linear']['z']
self.packet.angular.x = self.motion['angular']['x']
self.packet.angular.y = self.motion['angular']['y']
self.packet.angular.z = self.motion['angular']['z']
self.chairMovementTopic.publish(self.packet)
self.r.sleep()
if __name__ == "__main__":
robot = PacketReplicatorNode()
robot.spin()
| StarcoderdataPython |
1619480 | <reponame>human-science/adpy
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Insert preamble to the specific asciidoc files in adoc folder.
# Toll's default preamble can be overridden by preamble written in a specific asciidoc file from adoc folder.
import re
import sys
import os
def main():
args = sys.argv
langName = args[1]
preamble = ''':doctype: book
:lang: ''' + langName + '''
:hardbreaks:
:sectnums!:
:sectlinks:
:sectids:
:experimental:
:toc: macro
:docinfo: shared
:docinfodir: ../../lib
ifdef::backend-html5[:nofooter:]
ifdef::backend-html5[:linkcss:]
:idprefix:
:imagesdir: images
:toclevels: 2
:stylesdir: css
:icons: font
:chapter-label:
:leveloffset: 1
'''
fnameList = os.listdir('.')
for file in fnameList:
fname, ext = os.path.splitext(file)
if ext == '.adoc':
f = open(file, 'r', encoding='utf_8_sig')
l = f.readlines()
l.insert(0, preamble + '\n')
f = open(file, 'r+', encoding='utf_8_sig')
f.writelines(l)
f.close
if __name__ == '__main__':
main()
| StarcoderdataPython |
3448301 | <gh_stars>1-10
import unittest
from main import *
from unittest.mock import MagicMock, Mock
class TestSolver(unittest.TestCase):
def test_zero_order_root(self):
params = [[0], [4]]
expected = ["all", "undefined"]
for i in range(0, len(expected)):
solver = Solver(params[i])
solver.zero_order_root()
self.assertEqual(solver.roots[0], expected[i], "Get roots error")
def test_get_order(self):
params = [[0], [0, 1], [-1, 1, 1, 1]]
expected = [0, 1, 3]
for i in range(0, len(expected)):
solver = Solver(params[i])
result = solver.get_order()
self.assertEquals(result, expected[i], "Order failed")
def test_get_roots_zero_path(self):
solver = Solver([0])
solver.get_order = MagicMock(return_value=0)
solver.zero_order_root = Mock()
solver.get_roots()
solver.zero_order_root.assert_called_once()
def test_get_roots_higher_order_path(self):
solver = Solver([1, 2, 3])
solver.get_order = MagicMock(return_value=2)
solver.get_newton_root = Mock()
solver.get_roots()
solver.get_newton_root.assert_called_once()
def test_get_function(self):
x_val = 1
params = [[1, 2], [0, 2, 2]]
expected = [3, 4]
for i in range(0, len(expected)):
solver = Solver(params[i])
result = solver.get_function(x_val)
self.assertEquals(result, expected[i], "Error function")
def test_get_derivative(self):
x_val = 1
params = [[1, 2], [0, 2, 2]]
expected = [2, 6]
for i in range(0, len(expected)):
solver = Solver(params[i])
result = solver.get_derivative(x_val)
self.assertEquals(result, expected[i], "Error deriv")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9621073 | <filename>tools/gt_map_visualization/gps_from_bag.py<gh_stars>10-100
#!/usr/bin/env python
#
# example usage:
#
import rosbag, rospy
import glob
import argparse
import numpy as np
import tf
import tf.transformations
import utm
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='extract trajectories from bag file')
parser.add_argument('--bag', '-b', action='store', default=None, required=True,
help='name of bag file')
topics = ['/gps/fix']
args = parser.parse_args()
fileNames = ['gps_traj.txt']
gpsFile = open(fileNames[0], 'w')
for bagfile in [args.bag]:
bag = rosbag.Bag(bagfile, 'r')
it = bag.read_messages(topics=topics)
for (topic, msg, tros) in it:
if (topic == '/gps/fix'): # gps long/lat
utm_pos = utm.from_latlon(msg.latitude, msg.longitude)
a = msg.altitude # ignored here
T = tf.transformations.identity_matrix()
T[0:3,3] = np.array([utm_pos[0], utm_pos[1], 0])
gpsFile.write("%.6f %s\n" % (msg.header.stamp.to_nsec()/1e9, ' '.join(map(str, T.ravel()[0:12].tolist()))))
gpsFile.close()
print "wrote to files: %s" % ", ".join(fileNames)
| StarcoderdataPython |
3227397 | <filename>project.py<gh_stars>0
import collections
import os
import re
import shutil
import subprocess
import sys
import urllib.request
import zipfile
import string
import logging
import logging.handlers
import struct
import yaml
# from configparser import ConfigParser
from pathlib import Path
import exceptions
if getattr(sys, 'frozen', False):
# THIS_FILE_PATH = Path(os.path.dirname(sys.executable))
THIS_FILE_PATH = Path(sys.executable)
elif __file__:
THIS_FILE_PATH = Path(__file__)
class Project(object):
def __init__(self, logger=None):
self.logger = logger
if not logger:
self.logging_level = 'DEBUG'
self.logging_format = '%(asctime)s [%(levelname)10s] %(pathname)s [%(lineno)d] => %(funcName)s(): %(message)s'
self._setup_logger()
self._python_path = None
self._python_version = None
self.venv_name = 'venv'
self._bit_version = struct.calcsize("P") * 8
self.available_plugins = []
self.selected_plugins = []
self.copied_packages = []
self.steps = collections.OrderedDict({'Ladda ner programmet': self.download_program,
'Ladda ner plugins': self.download_plugins,
'Ladda ner smhi-paket': self.download_packages,
'Skapa virtuell python-miljö': self.create_environment,
'Installera python-paket (requirements)': self.install_packages,
'Skapa körbar bat-fil': self.create_run_program_file})
self.directory = 'C:/'
self._find_plugins()
self._find_python_exe()
def _setup_logger(self, **kwargs):
name = Path(__file__).stem
self.logger = logging.getLogger(name)
self.logger.setLevel(self.logging_level)
directory = Path(THIS_FILE_PATH.parent, 'log')
if not directory.exists():
os.makedirs(directory)
file_path = Path(directory, 'install.log')
handler = logging.handlers.TimedRotatingFileHandler(str(file_path), when='D', interval=1, backupCount=7)
formatter = logging.Formatter(self.logging_format)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
@property
def directory(self):
"""
Project will be created under this directory.
"""
return self.__directory
@directory.setter
def directory(self, directory):
self.root_directory = directory
self.__directory = Path(directory, 'SHARKtools')
self.program_directory = Path(self.directory, 'SHARKtools')
self.plugins_directory = Path(self.program_directory, 'plugins')
self.package_directory = self.program_directory
self.wheels_source_directory = Path(THIS_FILE_PATH.parent, 'wheels')
self.smhi_packages_config_file = Path(THIS_FILE_PATH.parent, 'sharksmhi_packages.yaml')
self.install_history_directory = Path(self.directory, 'install_history')
self.wheels_directory = Path(self.install_history_directory, 'wheels')
self.venv_directory = Path(self.program_directory, self.venv_name)
self.temp_directory = Path(self.directory, '_temp_sharktools')
self.temp_program_dir = Path(self.temp_directory, 'temp_program')
self.temp_plugins_dir = Path(self.temp_directory, 'temp_plugins')
self.temp_packages_dir = Path(self.temp_directory, 'temp_packages')
self.temp_move_plugins_dir = Path(self.temp_directory, 'temp_subdirs')
self.batch_file_create_venv = Path(self.install_history_directory, 'create_venv.bat')
self.batch_file_install_requirements = Path(self.install_history_directory, 'install_requirements.bat')
self.batch_file_run_program = Path(self.directory, 'run_program.bat')
self.log_file_path = Path(self.install_history_directory, 'install.log')
self.requirements_file_path = Path(self.install_history_directory, 'requirements.txt')
self.git_root_url = 'https://github.com/sharksmhi/'
def run_step(self, step, **kwargs):
"""
Step matches keys in self.steps
:param step: str
:return:
"""
if kwargs.get('use_git'):
self.package_directory = self.directory
else:
self.package_directory = self.program_directory
func = self.steps.get(step)
if func:
all_ok = func(**kwargs)
return all_ok
def setup_project(self):
"""
Sets up the project. Copies files from self.temp_directory. Main program and plugins.
:return:
"""
if self.directory is None:
self.logger.error('Project directory not set!')
raise NotADirectoryError('No directory found')
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if not os.path.exists(self.wheels_directory):
os.makedirs(self.wheels_directory)
if not os.path.exists(self.install_history_directory):
os.makedirs(self.install_history_directory)
def download_program(self, use_git=False, **kwargs):
# self._reset_directory(self.temp_program_dir)
if use_git:
self._clone_or_pull_main_program()
else:
self._download_main_program_from_github()
self._unzip_main_program()
self._copy_main_program()
def _clone_or_pull_main_program(self):
if 'SHARKtools' in [path.name for path in self.directory.iterdir()]:
self._pull_main_program()
else:
self._clone_main_program()
def _clone_main_program(self):
file_path = Path(self.install_history_directory, 'git_clone_main_program.bat')
lines = [f'cd {self.directory}',
f'git clone {self.git_root_url}SHARKtools.git"']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def _pull_main_program(self):
file_path = Path(self.install_history_directory, 'git_pull_main_program.bat')
lines = [f'cd {self.program_directory}',
f'git pull']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def download_plugins(self, use_git=False, **kwargs):
# self._reset_directory(self.temp_plugins_dir)
self._create_directory(self.plugins_directory)
if use_git:
self._clone_or_pull_plugins()
else:
self._download_plugins_from_github()
self._unzip_plugins()
self._copy_plugins()
def _clone_or_pull_plugins(self):
installed_plugins = [path.name for path in self.plugins_directory.iterdir()]
for plugin in self.selected_plugins:
if plugin in installed_plugins:
self._pull_plugin(plugin)
else:
self._clone_plugin(plugin)
def _clone_plugin(self, plugin):
file_path = Path(self.install_history_directory, f'git_clone_plugin_{plugin}.bat')
lines = [f'cd {self.plugins_directory}',
f'git clone {self.git_root_url}{plugin}.git"']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def _pull_plugin(self, plugin):
file_path = Path(self.install_history_directory, f'git_pull_plugin_{plugin}.bat')
lines = [f'cd {Path(self.plugins_directory, plugin)}',
f'git pull']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def download_packages(self, use_git=False, **kwargs):
# self._reset_directory(self.temp_packages_dir)
if use_git:
self._clone_or_pull_packages()
else:
self._download_packages_from_github()
self._unzip_packages()
self._copy_packages()
def _clone_or_pull_packages(self):
installed_packages = [path.name for path in self.directory.iterdir()]
for pack in self._get_packages_to_download_from_github():
if pack in installed_packages:
self._pull_package(pack)
else:
self._clone_package(pack)
def _clone_package(self, pack):
file_path = Path(self.install_history_directory, f'git_clone_package_{pack}.bat')
lines = [f'cd {self.package_directory}',
f'git clone {self.git_root_url}{pack}.git"']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def _pull_package(self, pack):
file_path = Path(self.install_history_directory, f'git_pull_package_{pack}.bat')
lines = [f'cd {Path(self.package_directory, pack)}',
f'git pull']
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
self._run_batch_file(file_path)
def create_environment(self, **kwargs):
"""
Create a batch file and run it to create a virtual environment.
:return:
"""
# Delete old environment
self._delete(self.venv_directory)
# Create file
self._create_batch_environment_file()
# Run file
self._run_batch_file(self.batch_file_create_venv)
self._create_pth_file()
# Install python packages
# self.install_packages()
def install_packages(self, **kwargs):
"""
Installs packages in self.requirements_file_path into the virtual environment.
:return:
"""
if not os.path.exists(self.venv_directory):
self.logger.error('No venv found')
raise exceptions.MissingVenvException('Virtuell pythonmiljö saknas. Skapa en miljö innan du installerar paket!')
all_ok = True
if not self.wheels_source_directory.exists():
os.makedirs(self.wheels_source_directory)
# self._copy_wheels()
self._create_requirements_file()
self._create_batch_install_requirements_file()
self._run_batch_file(self.batch_file_install_requirements)
self._create_pth_file()
return all_ok
def _create_pth_file(self):
packages = self._get_packages_to_download_from_github()
paths = {path.name: path for path in self.package_directory.iterdir()}
lines = []
for pack in packages:
path = paths.get(pack)
if not path:
continue
lines.append(str(path))
file_path = Path(self.venv_directory, 'Lib', 'site-packages', '.pth')
with open(file_path, 'w') as fid:
fid.write('\n'.join(lines))
def _get_wheel_rel_path_for_package(self, package):
path = self._get_wheel_path_for_package(package)
if not path:
return
return f'./{path.relative_to(self.install_history_directory)}'.replace('\\', '/')
def _get_wheel_path_for_package(self, package):
if not self.install_history_directory:
return
if not self.wheels_directory.exists():
return
pack = package.lower()
for path in self.wheels_directory.iterdir():
if path.suffix != '.whl':
continue
name = path.name.lower()
if pack in name:
if (f'cp{self._python_version}' in name and f'{self._bit_version}.whl' in name) or 'none-any' in name:
return path
def _old_copy_wheels(self):
if not self.wheels_directory.exists():
os.makedirs(self.wheels_directory)
existing_wheels = [path.name for path in self.wheels_directory.iterdir()]
for path in self.wheels_source_directory.iterdir():
if path.name in existing_wheels:
continue
name = path.name.lower()
print('self._python_version', self._python_version)
print('self._bit_version', self._bit_version)
if (f'cp{self._python_version}' in name and f'{self._bit_version}.whl' in name) or 'none-any' in name:
shutil.copy2(str(path), str(Path(self.wheels_directory, path.name)))
def _get_source_wheel_for_package(self, package, and_not=None):
pack = package.lower()
for path in self.wheels_source_directory.iterdir():
name = path.name.lower()
if and_not and and_not in name:
continue
if pack not in name:
continue
if (f'cp{self._python_version}' in name and f'{self._bit_version}.whl' in name) or 'none-any' in name:
return path
def _copy_wheel_to_local(self, source_path):
if not source_path.exists():
return
target_path = Path(self.wheels_directory, source_path.name)
if target_path.exists():
return
if not self.wheels_directory.exists():
os.makedirs(self.wheels_directory)
shutil.copy2(str(source_path), str(target_path))
def _create_requirements_file(self, use_git=False):
"""
Look for requirement files and stores valid lines in self.requirements_file_path
:return:
"""
local_packages = [path.name for path in self.package_directory.iterdir()]
lines = []
if 'ctdpy' in local_packages:
lines.extend(['shapely', 'gdal', 'fiona', 'six', 'rtree', 'geopandas'])
for root, dirs, files in os.walk(self.package_directory, topdown=False):
for name in files:
if name == 'requirements.txt':
file_path = Path(root, name)
with open(file_path) as fid:
for line in fid:
module = line.strip()
if module.startswith('#'):
continue
if module and module not in lines:
lines.append(module)
# Remove duplicates
keep_dict = {}
for item in set(lines):
item = item.strip()
# if item.startswith('#'):
# continue
split_item = item.strip().split('==')
pack = split_item[0]
keep_dict.setdefault(pack, set())
keep_dict[pack].add(item)
keep_pip_list = []
keep_wheel_list = []
for pack, value in keep_dict.items():
if pack in local_packages:
continue
and_not = None
if pack == 'pandas':
and_not = 'geopandas'
source_wheel_path = self._get_source_wheel_for_package(pack, and_not=and_not)
if source_wheel_path:
self._copy_wheel_to_local(source_wheel_path)
wheel_path = self._get_wheel_rel_path_for_package(pack)
if wheel_path:
keep_wheel_list.append(wheel_path)
else:
if len(value) == 1:
keep_pip_list.append(list(value)[0])
else:
keep_pip_list.append(pack)
# Write to file
keep_list = keep_wheel_list + keep_pip_list
with open(self.requirements_file_path, 'w') as fid:
fid.write('\n'.join(keep_list))
def old_create_requirements_file_pipwin(self):
"""
Look for requirement files and stores valid lines in self.requirements_file_path
:return:
"""
lines = {} # Is sorted by default
for root, dirs, files in os.walk(self.program_directory, topdown=False):
for name in files:
if name == 'requirements.txt':
file_path = Path(root, name)
print(file_path)
with open(file_path) as fid:
for line in fid:
line = line.strip()
if not line:
continue
if line.startswith('# '):
continue
module = line
module_name = module
wheel = False
module_nr = 0
if line.startswith('#wheel'):
wheel = True
module = module.split(' ')[1]
module_name = module
if '==' in module:
module_name, module_nr = module.split('==')
module_nr = int(module_nr.replace('.', ''))
if module_name not in lines:
print('0', module_name)
lines[module_name] = dict(text=f'{line} \t# {file_path}',
nr=module_nr,
wheel=wheel)
else:
if not wheel and lines[module_name]['wheel']:
continue
if wheel and not lines[module_name]['wheel']:
lines[module_name] = dict(text=f'{line} \t# {file_path}',
nr=module_nr,
wheel=wheel)
continue
if module_nr > lines[module_name]['nr']:
lines[module_name] = dict(text=f'{line} \t# {file_path}',
nr=module_nr,
wheel=wheel)
continue
# Write to file
with open(self.requirements_file_path, 'w') as fid:
fid.write('\n'.join([lines[key]['text'] for key in lines]))
def _get_requirements_list_from_url(self, url):
try:
with urllib.request.urlopen(url) as f:
content_str = f.read().decode('utf-8')
return [item.strip() for item in content_str.split('\n')]
except Exception as e:
self.logger.error(f'Could not download info from URL: {url}')
raise
def _get_packages_to_download_from_github(self):
to_download = {}
if not self.smhi_packages_config_file.exists():
raise FileNotFoundError(self.smhi_packages_config_file)
with open(self.smhi_packages_config_file) as fid:
data = yaml.load(fid, Loader=yaml.FullLoader)
for plugin, item_list in data.items():
for item in item_list:
pack, url = [value.strip() for value in item.split('=')]
to_download[pack] = url
return to_download
def _download_packages_from_github(self):
packages_to_download = self._get_packages_to_download_from_github()
for pack, url in packages_to_download.items():
self._download_package_from_github(pack, url)
def _download_package_from_github(self, package, url):
urllib.request.urlretrieve(url, r'{}/{}.zip'.format(self.temp_packages_dir, package))
def _copy_packages(self):
self.copied_packages = []
self._check_path(self.temp_packages_dir)
all_dirs = os.listdir(self.temp_packages_dir)
for _dir in all_dirs:
match = re.findall('-.*-', _dir)
if not match:
continue
package = match[0].strip('-')
source_dir = Path(self.temp_packages_dir, _dir, package)
target_dir = Path(self.program_directory, package)
self._delete(target_dir)
shutil.copytree(source_dir, target_dir)
# Copy requirements.txt
source_req_file_path = Path(self.temp_packages_dir, _dir, 'requirements.txt')
if source_req_file_path.exists():
target_req_file_path = Path(target_dir, 'requirements.txt')
shutil.copy2(source_req_file_path, target_req_file_path)
self.logger.info(f'Package {package} copied to {target_dir}')
self.copied_packages.append(package)
def create_run_program_file(self, **kwargs):
"""
Creates a batch file that can be used to run the program.
:return:
"""
self._check_path(self.batch_file_run_program)
# Check if all info exists
if not os.path.exists(self.program_directory) or not os.listdir(self.program_directory):
raise exceptions.CantRunProgramException('Huvudprogram är inte nedladdat')
elif not os.path.exists(self.venv_directory) or not os.listdir(self.venv_directory):
raise exceptions.CantRunProgramException('Virtuell miljö är inte skapad')
# elif not os.path.exists(self.sharkpylib_directory) or not os.listdir(self.sharkpylib_directory):
# raise exceptions.CantRunProgramException('sharkpylib är inte nedladdat')
lines = []
lines.append(f'call {Path(self.venv_directory, "Scripts", "activate")}')
lines.append(f'cd {self.program_directory}')
lines.append(f'python main.py')
lines.append(f'pause')
with open(self.batch_file_run_program, 'w') as fid:
fid.write('\n'.join(lines))
def _create_batch_install_requirements_file(self):
"""
Creates a batch file that installs packages to the virtual environment.
:return:
"""
lines = []
env_activate_path = Path(self.venv_directory, 'Scripts', 'activate')
lines.append(f'call {env_activate_path}')
lines.append('python -m pip install --upgrade pip')
# wheel_files = os.listdir(self.wheels_directory)
#
# # Look for pyproj
# for file_name in wheel_files[:]:
# if 'pyproj' in file_name:
# lines.append(f'pip install {Path(self.wheels_directory, file_name)}')
# wheel_files.pop(wheel_files.index(file_name))
#
# # Install the rest
# for file_name in wheel_files:
# lines.append(f'pip install {Path(self.wheels_directory, file_name)}')
# Add requirements file
lines.append(f'pip install -r {self.requirements_file_path}')
# with open(self.requirements_file_path) as fid:
# for line in fid:
# line = line.strip()
# if line.startswith('#reinstall'):
# pack = line.split(' ')[1]
# lines.append(f'pip install --upgrade --force-reinstall {pack}')
with open(self.batch_file_install_requirements, 'w') as fid:
fid.write('\n'.join(lines))
def old_create_batch_install_requirements_file_pipwin(self):
"""
Creates a batch file that installs packages to the virtual environment.
:return:
"""
lines = []
env_activate_path = Path(self.venv_directory, 'Scripts', 'activate')
lines.append(f'call {env_activate_path}')
lines.append('python -m pip install --upgrade pip')
lines.append('')
lines.append('pip install wheel')
lines.append('pip install pipwin')
with open(self.requirements_file_path) as fid:
for line in fid:
line = line.strip()
if line.startswith('#wheel'):
pack = line.split(' ')[1]
lines.append(f'pipwin install {pack}')
# Add requirements file
lines.append('')
lines.append(f'pip install -r {self.requirements_file_path}')
lines.append('')
with open(self.requirements_file_path) as fid:
for line in fid:
line = line.strip()
if line.startswith('#reinstall'):
pack = line.split(' ')[1]
lines.append(f'pip install --upgrade --force-reinstall {pack}')
with open(self.batch_file_install_requirements, 'w') as fid:
fid.write('\n'.join(lines))
def _create_batch_environment_file(self):
self._check_path(self.directory)
if not self._python_path:
self.logger.error('Invalid python.exe file')
raise FileNotFoundError
lines = []
disk = str(self.venv_directory.parent)[0]
# Browse to disk
lines.append(f'{disk}:')
# Go to python environment directory
lines.append(f'cd {self.venv_directory.parent}')
# Create environment
lines.append(f'call {self._python_path} -m venv {self.venv_name}')
with open(self.batch_file_create_venv, 'w') as fid:
fid.write('\n'.join(lines))
def select_plugins(self, plugins_list):
for plugin in plugins_list:
if plugin not in self.available_plugins:
self.logger.error('Not a valid plugin: {}'.format(plugin))
raise ValueError
self.selected_plugins.append(plugin)
def set_python_path(self, python_exe):
"""
Sets the python directory (version) used to create the python environment.
:param python_directory: str
:return: None
"""
python_exe = Path(python_exe)
if not python_exe.exists():
self.logger.error('Not a valid python!')
raise FileNotFoundError
if not python_exe.name =='python.exe':
self.logger.error('Not a valid python!')
raise FileNotFoundError
self._python_path = python_exe
self._python_version = ''.join([s for s in list(str(self._python_path.parent.name)) if s in string.digits + '-'])
self._save_python_path()
def get_python_path(self):
return self._python_path
def _find_plugins(self):
try:
resp = urllib.request.urlopen(r'https://github.com/orgs/sharksmhi/repositories')
data = resp.read().decode('UTF-8')
self.available_plugins = sorted(set(re.findall('SHARKtools_[a-zA-Z0-9_]+', data)))
# Remove SHARKtools_install (this program)
if 'SHARKtools_install' in self.available_plugins:
self.available_plugins.pop(self.available_plugins.index('SHARKtools_install'))
self._save_plugins()
except:
self._load_plugins()
def _find_python_exe(self, root_folder='C:/'):
self._python_path = None
if self._load_python_path():
return True
for path in sorted(sys.path):
if 'python36' in path.lower():
file_list = os.listdir(path)
for file_name in file_list:
if file_name == 'python.exe':
self._python_path = Path(path, file_name)
self.logger.info(f'Found python path: {self._python_path}')
return True
self.logger.warning('python.exe not found!')
return False
def _save_python_path(self):
if not self._python_path:
return False
with open('python_path', 'w') as fid:
fid.write(str(self._python_path))
return True
def _load_python_path(self):
self._python_path = None
if not os.path.exists('python_path'):
return False
with open('python_path') as fid:
line = fid.readline().strip()
if line and os.path.exists(line):
self._python_path = line
self.logger.info(f'python.exe path taken from file: {self._python_path}')
return True
return False
def _save_plugins(self):
with open(Path(THIS_FILE_PATH.parent, 'plugins'), 'w') as fid:
fid.write('\n'.join(self.available_plugins))
def _load_plugins(self):
self.available_plugins = []
with open(Path(THIS_FILE_PATH.parent, 'plugins')) as fid:
for line in fid:
line = line.strip()
if line:
self.available_plugins.append(line)
self.available_plugins = sorted(self.available_plugins)
if self.available_plugins:
return True
else:
return False
def _download_main_program_from_github(self):
self._check_path(self.temp_program_dir)
url = r'https://github.com/sharksmhi/SHARKtools/zipball/master/'
urllib.request.urlretrieve(url, r'{}/SHARKtools.zip'.format(self.temp_program_dir))
def _download_plugins_from_github(self):
self._reset_directory(self.temp_plugins_dir)
# Plugins
for plugin in self.selected_plugins:
url = r'https://github.com/sharksmhi/{}/zipball/main/'.format(plugin)
item = r'{}/{}.zip'.format(self.temp_plugins_dir, plugin)
print('url', url)
print('item', item)
urllib.request.urlretrieve(url, item)
# def _unzip_files(self):
# # Unzip
# file_list = os.listdir(self.temp_directory)
# for file_name in file_list:
# if file_name[:-4] in (['SHARKtools'] + self.selected_plugins):
# file_path = Path(self.temp_directory, file_name)
# with zipfile.ZipFile(file_path, "r") as zip_ref:
# zip_ref.extractall(self.temp_directory)
def _unzip_files(self, directory):
file_list = os.listdir(directory)
for file_name in file_list:
file_path = Path(directory, file_name)
try:
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(directory)
except Exception as e:
print('Exception!!!')
print(e)
def _unzip_packages(self):
self._unzip_files(self.temp_packages_dir)
def _unzip_plugins(self):
self._unzip_files(self.temp_plugins_dir)
def _unzip_main_program(self):
self._unzip_files(self.temp_program_dir)
def _copy_main_program(self):
self._check_path(self.program_directory)
all_files = os.listdir(self.temp_program_dir)
# Copy main program
for file_name in all_files:
if '-SHARKtools-' in file_name:
# First save plugins
self._save_subdirs_temporary()
# Now copy main program
source_dir = Path(self.temp_program_dir, file_name)
target_dir = Path(self.program_directory)
self._delete(target_dir)
shutil.copytree(source_dir, target_dir)
# Finally import temporary saved plugins
self._import_temporary_subdirs_plugins()
break
def _save_subdirs_temporary(self):
# Copy plugins
self._create_directory(self.temp_move_plugins_dir)
source_dir = self.plugins_directory
self._create_directory(source_dir)
self._delete(self.temp_move_plugins_dir)
shutil.copytree(source_dir, self.temp_move_plugins_dir)
# # Copy sharkpylib
# source_dir = Path(self.program_directory, 'sharkpylib')
# self._create_directory(source_dir)
# self._delete(self.temp_packages_dir)
# shutil.copytree(source_dir, self.temp_packages_dir)
def _import_temporary_subdirs_plugins(self):
# Copy plugins
if not os.path.exists(self.temp_move_plugins_dir):
self.logger.warning(f'No temporary plugins: {self.temp_move_plugins_dir}')
raise Exception
plugin_dirs = os.listdir(self.temp_move_plugins_dir)
for plugin_name in plugin_dirs:
source_dir = Path(self.temp_move_plugins_dir, plugin_name)
target_dir = Path(self.plugins_directory, plugin_name)
self._delete(target_dir)
if not source_dir.is_dir():
continue
shutil.copytree(source_dir, target_dir)
self._delete(source_dir)
# # Copy sharkpylib
# if not os.path.exists(self.temp_packages_dir):
# self.logger.warning(f'No temporary sharkpylib: {self.temp_packages_dir}')
# raise Exception
# source_dir = Path(self.temp_packages_dir)
# target_dir = self.sharkpylib_directory
# self._delete(target_dir)
# shutil.copytree(source_dir, target_dir)
# self._delete(source_dir)
def _copy_plugins(self):
self._check_path(self.program_directory)
all_files = os.listdir(self.temp_plugins_dir)
for plugin in self.selected_plugins:
for file_name in all_files:
if f'-{plugin}-' in file_name:
source_dir = Path(self.temp_plugins_dir, file_name)
target_dir = Path(self.plugins_directory, plugin)
self._delete(target_dir)
shutil.copytree(source_dir, target_dir)
break
def _run_batch_file(self, file_path):
"""
This will run and delete the batch file.
:return:
"""
self._check_path(file_path)
if file_path.suffix != '.bat':
self.logger.info(f'Not a valid bat file {file_path}')
raise Exception
self.logger.info(f'Running file {file_path}')
subprocess.run(str(file_path))
return True
def _check_path(self, path):
if 'SHARKtools' in str(path):
return True
self.logger.error(f'Not a valid path: {path}')
raise Exception
def _delete(self, path):
"""
Checks valid path (containing "sharktools") before deleting.
:param path:
:return:
"""
if os.path.exists(path) and self._check_path(path):
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
return False
return True
return False
def _reset_directory(self, directory):
"""
Resets the given directory. First delete via self._delete to make sure root path is correct.
Then makes directory tree. Also if non existing from the beginning.
:param directory:
:return:
"""
self._delete(directory)
self._create_directory(directory)
def _create_directory(self, directory):
self._check_path(directory)
if not os.path.exists(directory):
os.makedirs(directory)
if __name__ == '__main__':
pass
p = Project()
p.setup_project()
p.select_plugins(['SHARKtools_ctd_processing', 'SHARKtools_pre_system_Svea'])
# p.download_program()
# p.download_plugins()
# p.download_packages()
p.set_python_path(r'C:\Python36/python.exe')
# p.create_environment()
# p.install_packages()
# p.install_packages()
# p.create_run_program_file()
| StarcoderdataPython |
360726 | import os
import torch
import torch.nn as nn
from torch.cuda import empty_cache
from torch.nn.utils import clip_grad_norm_
from tqdm import trange
from atari_utils.logger import WandBLogger
from simple.adafactor import Adafactor
class Trainer:
def __init__(self, model, config):
self.model = model
self.config = config
self.logger = None
if self.config.use_wandb:
self.logger = WandBLogger()
self.optimizer = Adafactor(self.model.parameters())
self.model_step = 1
self.reward_step = 1
def train(self, epoch, env, steps=15000):
if epoch == 0:
steps *= 3
c, h, w = self.config.frame_shape
rollout_len = self.config.rollout_length
states, actions, rewards, new_states, dones, values = env.buffer[0]
if env.buffer[0][5] is None:
raise BufferError('Can\'t train the world model, the buffer does not contain one full episode.')
assert states.dtype == torch.uint8
assert actions.dtype == torch.uint8
assert rewards.dtype == torch.uint8
assert new_states.dtype == torch.uint8
assert values.dtype == torch.float32
def get_index():
index = -1
while index == -1:
index = int(torch.randint(len(env.buffer) - rollout_len, size=(1,)))
for i in range(rollout_len):
done, value = env.buffer[index + i][4:6]
if done or value is None:
index = -1
break
return index
def get_indices():
return [get_index() for _ in range(self.config.batch_size)]
def preprocess_state(state):
state = state.float() / 255
noise_prob = torch.tensor([[self.config.input_noise, 1 - self.config.input_noise]])
noise_prob = torch.softmax(torch.log(noise_prob), dim=-1)
noise_mask = torch.multinomial(noise_prob, state.numel(), replacement=True).view(state.shape)
noise_mask = noise_mask.to(state)
state = state * noise_mask + torch.median(state) * (1 - noise_mask)
return state
reward_criterion = nn.CrossEntropyLoss()
iterator = trange(
0,
steps,
rollout_len,
desc='Training world model',
unit_scale=rollout_len
)
for i in iterator:
# Scheduled sampling
if epoch == 0:
decay_steps = self.config.scheduled_sampling_decay_steps
inv_base = torch.exp(torch.log(torch.tensor(0.01)) / (decay_steps // 4))
epsilon = inv_base ** max(decay_steps // 4 - i, 0)
progress = min(i / decay_steps, 1)
progress = progress * (1 - 0.01) + 0.01
epsilon *= progress
epsilon = 1 - epsilon
else:
epsilon = 0
indices = get_indices()
frames = torch.empty((self.config.batch_size, c * self.config.stacking, h, w))
frames = frames.to(self.config.device)
for j in range(self.config.batch_size):
frames[j] = env.buffer[indices[j]][0].clone()
frames = preprocess_state(frames)
n_losses = 5 if self.config.use_stochastic_model else 4
losses = torch.empty((rollout_len, n_losses))
if self.config.stack_internal_states:
self.model.init_internal_states(self.config.batch_size)
for j in range(rollout_len):
_, actions, rewards, new_states, _, values = env.buffer[0]
actions = torch.empty((self.config.batch_size, *actions.shape))
actions = actions.to(self.config.device)
rewards = torch.empty((self.config.batch_size, *rewards.shape), dtype=torch.long)
rewards = rewards.to(self.config.device)
new_states = torch.empty((self.config.batch_size, *new_states.shape), dtype=torch.long)
new_states = new_states.to(self.config.device)
values = torch.empty((self.config.batch_size, *values.shape))
values = values.to(self.config.device)
for k in range(self.config.batch_size):
actions[k] = env.buffer[indices[k] + j][1]
rewards[k] = env.buffer[indices[k] + j][2]
new_states[k] = env.buffer[indices[k] + j][3]
values[k] = env.buffer[indices[k] + j][5]
new_states_input = new_states.float() / 255
self.model.train()
frames_pred, reward_pred, values_pred = self.model(frames, actions, new_states_input, epsilon)
if j < rollout_len - 1:
for k in range(self.config.batch_size):
if float(torch.rand((1,))) < epsilon:
frame = new_states[k]
else:
frame = torch.argmax(frames_pred[k], dim=0)
frame = preprocess_state(frame)
frames[k] = torch.cat((frames[k, c:], frame), dim=0)
loss_reconstruct = nn.CrossEntropyLoss(reduction='none')(frames_pred, new_states)
clip = torch.tensor(self.config.target_loss_clipping).to(self.config.device)
loss_reconstruct = torch.max(loss_reconstruct, clip)
loss_reconstruct = loss_reconstruct.mean() - self.config.target_loss_clipping
loss_value = nn.MSELoss()(values_pred, values)
loss_reward = reward_criterion(reward_pred, rewards)
loss = loss_reconstruct + loss_value + loss_reward
if self.config.use_stochastic_model:
loss_lstm = self.model.stochastic_model.get_lstm_loss()
loss = loss + loss_lstm
self.optimizer.zero_grad()
loss.backward()
clip_grad_norm_(self.model.parameters(), self.config.clip_grad_norm)
self.optimizer.step()
tab = [float(loss), float(loss_reconstruct), float(loss_value), float(loss_reward)]
if self.config.use_stochastic_model:
tab.append(float(loss_lstm))
losses[j] = torch.tensor(tab)
losses = torch.mean(losses, dim=0)
metrics = {
'loss': float(losses[0]),
'loss_reconstruct': float(losses[1]),
'loss_value': float(losses[2]),
'loss_reward': float(losses[3])
}
if self.config.use_stochastic_model:
metrics.update({'loss_lstm': float(losses[4])})
if self.logger is not None:
d = {'model_step': self.model_step, 'epsilon': epsilon}
d.update(metrics)
self.logger.log(d)
self.model_step += rollout_len
iterator.set_postfix(metrics)
empty_cache()
if self.config.save_models:
torch.save(self.model.state_dict(), os.path.join('models', 'model.pt'))
| StarcoderdataPython |
3209629 | <filename>stocks/stocks.py
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import robin_stocks_modified as robin_stocks
from Utils.datetime_funcs import *
from Utils import login
"""Wrappers for bare robin_stocks functions"""
def get_stock_historicals(symbol, interval="5minute", span="week"):
"""Returns the historical data for a SYMBOL with data at every time INTERVAL over a given SPAN."""
assert span in ['day', 'week', 'month', '3month', 'year', '5year']
assert interval in ['5minute', '10minute', 'hour', 'day', 'week']
historicals = robin_stocks.stocks.get_stock_historicals(symbol, interval, span)
process_historicals(historicals)
return historicals
def get_instrument_data(symbol):
"""Gets all relevant instrument data for symbol."""
all_matches = robin_stocks.stocks.find_instrument_data(symbol)
if not all_matches[0]:
return None
for match in all_matches:
if match["symbol"] == symbol:
return match
return None
def get_latest_price(symbol, includeExtendedHours=True):
string_array = robin_stocks.stocks.get_latest_price(symbol, includeExtendedHours)
return eval(string_array[0])
def get_splits(symbol):
return robin_stocks.stocks.get_splits(symbol)
"""These functions help to manipulate data from the API calls."""
def process_historicals(historicals):
"""Mutates historical data from Robinhood. This function can be added to over time to enable new functionality."""
for data_point in historicals:
date = get_historical_date(data_point)
time = utc_to_military(get_historical_time(data_point))
data_point["date"] = date
data_point["time"] = time
del data_point["begins_at"]
del data_point["session"]
del data_point["interpolated"]
del data_point["symbol"]
def bound_historicals(all_historicals, start_date=None, end_date=None):
"""Returns the historical data of ALL_HISTORICALS filtered to be bounded by the starting and ending dates."""
assert start_date or end_date, "stock_historicals_between_dates must have some boundary date provided"
def isValid(date):
"""This function will return the validity of the argument DATE based on which of starting_date and ending_date have been passed to the enclosing function."""
if end_date and start_date:
return is_not_past(date, start_date) and is_not_past(end_date, date)
elif not end_date:
return is_not_past(date, start_date)
else:
return is_not_past(end_date, date)
relevant_dates = []
for data_point in all_historicals:
date = data_point["date"]
if isValid(date):
relevant_dates.append(data_point)
return relevant_dates
def get_historical_time(data_point):
"""Returns the time of a DATA_POINT"""
try:
return data_point['begins_at'][11:16]
except:
return None
def get_historical_date(data_point):
"""Returns the date of a DATA_POINT"""
try:
return data_point['begins_at'][0:10]
except:
return None
| StarcoderdataPython |
110100 | <reponame>tirgei/stack-overflow-lite-api
import os
from flask import Flask
from instance.config import app_config
from app.api.v1.views.user_views import v1 as users_v1
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(app_config[config_name])
app.register_blueprint(users_v1)
return app | StarcoderdataPython |
6616813 | from unittest import TestCase
class TestPlotVariantTypeData(TestCase):
def test_plot_variant_type_data(self):
self.fail()
| StarcoderdataPython |
1706537 | ##################################
# Laboratory Server configuration #
##################################
laboratory_assigned_experiments = {
'exp1:dummy@Dummy experiments' : {
'coord_address' : 'experiment1:laboratory1@core_machine',
'checkers' : ()
},
}
| StarcoderdataPython |
4841638 | # -*- coding: utf-8 -*-
"""
Extracts number of replies from a tweet.
Created on Fri Oct 22 00:58:01 2021
@author: Yannik
"""
from code.feature_extraction.feature_extractor import FeatureExtractor
import numpy as np
class RepliesExtractor(FeatureExtractor):
"""Collects the number of replies for a Tweet and stores them as seperate feature."""
def __init__(self, input_column):
"""Constructor, calls super Constructor."""
super().__init__([input_column], "{0}_feature".format(input_column))
# don't need to fit, so don't overwrite _set_variables()
def _get_values(self, inputs):
"""Returnes the given input column as a feature."""
result = np.array(inputs[0])
result = result.reshape(-1,1)
return result | StarcoderdataPython |
1602116 | <gh_stars>0
#https://colab.research.google.com/drive/1AOkk-TSDFbvCb3Mxb7sTqRMAFbMi6Ra-?usp=sharing#scrollTo=vgZemXj6hejq
#!apt install tesseract-ocr-rus
#!apt install libtesseract-dev
#!pip install pytesseract
import pytesseract
import cv2
import pandas as pd
#Для таблиц
#!!pip install table_ocr
import numpy as np
#from table_ocr.ocr_image import crop_to_text
def tesseract_enabled():
return 'rus' in pytesseract.get_languages()
#функция слепляет картинки в одну
def concat_images(image_set, how=0):
def resize_image(image_matrix, nh, nw):
#image_matrix = crop_to_text(image_matrix)
oh, ow = image_matrix.shape[:2]
resized_image = np.full((nh, nw), 1, dtype=image_matrix.dtype)
resized_image[:oh, :ow] = image_matrix
return resized_image
shapes = [imat.shape for imat in image_set]
max_h = max([s[0] for s in shapes])
max_w = max([s[1] for s in shapes])
images_resized = [
resize_image(img, max_h, max_w)
for img in image_set
]
if (how == 0) or (how == 'vertical'):
concats = cv2.vconcat(images_resized)
elif (how == 1) or (how == 'horizontal'):
concats = cv2.hconcat(images_resized)
else:
concats = cv2.hconcat(images_resized)
return concats
def get_text_corpus(jpg):
if not tesseract_enabled():
raise Exception('Russian is not installed..')
data = pytesseract.image_to_data(jpg, output_type='data.frame', lang='rus', config='hocr')
median = data[data['conf'] > 0]['conf'].median()
data = data[~pd.isna(data.text)]
if median >= 95:
metrics = 1
else:
metrics = 0
if len(data) < 1:
#raise Exception('No words..')
print('No words..')
return '', None, None
try:
return data.text.str.cat(sep=' '), data, metrics
except:
return ' '.join(data['text'].astype('str')), data, metrics
def get_jpg_anon(jpg, coordinates, filled=True):
'''
# Функция принимает картинку, список имен, возвращает картинку с закрашенными
# plt.imshow(get_jpg_anon(img, coord))
'''
if filled:
filled = -1
else:
filled = 2
if isinstance(coordinates, pd.DataFrame): #добавил это условие, потом что иногда пустые координаты могут приходить
for item in coordinates.iterrows():
c = item[1]
jpg = cv2.rectangle(jpg, (c.left, c.top), (c.left + c.width, c.top + c.height), (0, 0, 0), filled) #black
return jpg
| StarcoderdataPython |
12842204 | <gh_stars>10-100
import gym
import numpy as np
from viewer import OpenCVImageViewer
class GymWrapper(object):
"""
Gym interface wrapper for dm_control env wrapped by pixels.Wrapper
"""
metadata = {'render.modes': ['human', 'rgb_array']}
reward_range = (-np.inf, np.inf)
def __init__(self, env):
self._env = env
self._viewer = None
def __getattr(self, name):
return getattr(self._env, name)
@property
def observation_space(self):
obs_spec = self._env.observation_spec()
return gym.spaces.Box(0, 255, obs_spec['pixels'].shape, dtype=np.uint8)
@property
def action_space(self):
action_spec = self._env.action_spec()
return gym.spaces.Box(action_spec.minimum, action_spec.maximum, dtype=np.float32)
def step(self, action):
time_step = self._env.step(action)
obs = time_step.observation['pixels']
reward = time_step.reward or 0
done = time_step.last()
info = {'discount': time_step.discount}
return obs, reward, done, info
def reset(self):
time_step = self._env.reset()
obs = time_step.observation['pixels']
return obs
def render(self, mode='human', **kwargs):
if not kwargs:
kwargs = self._env._render_kwargs
img = self._env.physics.render(**kwargs)
if mode == 'rgb_array':
return img
elif mode == 'human':
if self._viewer is None:
self._viewer = OpenCVImageViewer()
self._viewer.imshow(img)
return self._viewer.isopen
else:
raise NotImplementedError
class RepeatAction(gym.Wrapper):
"""
Action repeat wrapper to act same action repeatedly
"""
def __init__(self, env, skip=4):
gym.Wrapper.__init__(self, env)
self._skip = skip
def reset(self):
return self.env.reset()
def step(self, action):
total_reward = 0.0
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
| StarcoderdataPython |
12231 | import importlib
import os
import pytest
from helpers import running_on_ci
import janitor.biology # noqa: F403, F401
# Skip all tests if Biopython not installed
pytestmark = pytest.mark.skipif(
(importlib.util.find_spec("Bio") is None) & ~running_on_ci(),
reason="Biology tests relying on Biopython only required for CI",
)
@pytest.mark.biology
def test_join_fasta(biodf):
"""Test adding sequence from FASTA file in ``sequence`` column."""
df = biodf.join_fasta(
filename=os.path.join(pytest.TEST_DATA_DIR, "sequences.fasta"),
id_col="sequence_accession",
column_name="sequence",
)
assert "sequence" in df.columns
| StarcoderdataPython |
1806945 | <gh_stars>10-100
'''
python setup.py sdist bdist_wheel
python -m twine upload dist/*
'''
import sys
import os
import glob
import setuptools
from setuptools import setup, find_packages
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension, BuildExtension
requirements = ["torch"]
def get_extensions():
if CUDA_HOME is None:
print('CUDA_HOME is None. Install Without CUDA Extension')
return None
else:
print('Install With CUDA Extension')
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir_construct = os.path.join(this_dir, 'DCLS/construct/src')
extensions_dir = os.path.join(this_dir, 'DCLS/src')
ext_list_construct = ['dcls_construct_1d',
#'dcls_construct_2_1d',
'dcls_construct_2d',
#'dcls_construct_3_1d',
#'dcls_construct_3_2d',
'dcls_construct_3d'
]
ext_list = [#'dcls_2d',
#'im2col_dcls_2d',
#'rsconv_2d',
#'im2col_rsconv_2d'
]
#'Distance']
if not sys.platform == 'win32':
# win32 does not support cuSparse
#ext_list_construct.extend(['spmm',
# 'sparse_weight_conv'])
pass
extra_compile_args = {'cxx': ['-g'], 'nvcc': ['-use_fast_math']}
extension = CUDAExtension
define_macros = [("WITH_CUDA", None)]
ext_modules = list([
extension(
ext_name,
glob.glob(os.path.join(extensions_dir_construct, ext_name + '.cpp')) + glob.glob(os.path.join(extensions_dir_construct, 'cuda', ext_name + '_cuda_kernel.cu')),
define_macros=define_macros,
extra_compile_args=extra_compile_args,
#libraries=[ 'cusparse', 'cusparseLt']
) for ext_name in ext_list_construct])
ext_modules.extend( list([
extension(
ext_name,
glob.glob(os.path.join(extensions_dir, ext_name + '.cpp')) + glob.glob(os.path.join(extensions_dir, 'cuda', ext_name + '_cuda_kernel.cu')),
define_macros=define_macros,
extra_compile_args=extra_compile_args,
#libraries=[ 'cusparse', 'cusparseLt']
) for ext_name in ext_list]))
return ext_modules
with open("./requirements.txt", "r", encoding="utf-8") as fh:
install_requires = fh.read()
with open("./README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
install_requires=requirements,
name="DCLS",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="Dilated convolution with learnable spacings, built on PyTorch.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/K-H-Ismail/Dilated-Convolution-with-Learnable-Spacings-PyTorch.git",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
ext_modules=get_extensions(),
cmdclass={
"build_ext": BuildExtension
}
)
| StarcoderdataPython |
364345 | <filename>ds18b20S/__init__.py
#Code by <NAME>
#Library for ds18b20 temperature sensor
#Contact`
#Email: <EMAIL>
import sys
import glob
import os
class DsbS():
def __init__(self, initial=False):
if initial:
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
def getSensorIds(slef):
return [i.split("/")[5] for i in glob.glob("/sys/bus/w1/devices/28-0*/w1_slave")]
def getTemps(self, Ttype="C"):
temps = list()
if len([i.split("/")[5] for i in glob.glob("/sys/bus/w1/devices/28-0*/w1_slave")]) < 1: print("NO DEVICES FOUND"); return "Null"
for sensor in glob.glob("/sys/bus/w1/devices/28-0*/w1_slave"):
id = sensor.split("/")[5]
try:
f = open(sensor, "r")
data = f.read()
f.close()
if "YES" in data:
(discard, sep, reading) = data.partition(' t=')
if Ttype.lower() == "c":
t = float(reading) / 1000.0
temps.append(t)
elif Ttype.lower() == "f":
t = (float(reading) / 1000.0 * 9 / 5) + 32
temps.append(t)
elif Ttype.lower() == "k":
t = (float(reading) / 1000.0) + 273.15
temps.append(t)
else:
t = float(reading) / 1000.0
temps.append(t)
print("WARNING: UNKOWN TEMPERATURE TYPE")
else:
print("EROR WHILE READING TEMPERATURE")
except:
print("EROR WHILE READING TEMPERATURE1")
return temps
def getIdTemp(self, Ttype="c"):
temps = dict()
if len([i.split("/")[5] for i in glob.glob("/sys/bus/w1/devices/28-0*/w1_slave")]) < 1: print("NO DEVICES FOUND"); return "Null"
for sensor in glob.glob("/sys/bus/w1/devices/28-0*/w1_slave"):
id = sensor.split("/")[5]
try:
f = open(sensor, "r")
data = f.read()
f.close()
if "YES" in data:
(discard, sep, reading) = data.partition(' t=')
if Ttype.lower() == "c":
t = float(reading) / 1000.0
temps[sensor] = t
elif Ttype.lower() == "f":
t = (float(reading) / 1000.0 * 9 / 5) + 32
temps[sensor] = t
elif Ttype.lower() == "k":
t = (float(reading) / 1000.0) + 273.15
temps[sensor] = t
else:
t = float(reading) / 1000.0
temps[sensor] = t
print("WARNING: UNKOWN TEMPERATURE TYPE")
else:
print("EROR WHILE READING TEMPERATURE")
except:
print("EROR WHILE READING TEMPERATURE")
return temps | StarcoderdataPython |
3485549 | # ===========================================================================
# distmap.py --------------------------------------------------------------
# ===========================================================================
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
from shdw.__init__ import _logger
import shdw.utils.imgio
import shdw.utils.imgtools
import numpy as np
# function ----------------------------------------------------------------
# ---------------------------------------------------------------------------
def new_distance_transform_map(
files,
param_specs,
param_io,
param_label=dict(),
param_show=dict(),
param=dict()
):
_logger.info("Start creation of maximum likelihood classification map with settings:\nparam_specs:\t{},\nparam_io:\t{},\nparam_label:\t{},\nparam_show:\t{},\nparam:\t{}".format(param_specs, param_io, param_label, param_show, param))
# settings ------------------------------------------------------------
# -----------------------------------------------------------------------
img_in, img_out, _, _ = shdw.utils.imgio.get_data(files, param_specs, param_io, param_label=param_label, param_show=param_show)
for item in img_in:
_logger.info("Processing image '{}'".format(item[0].path))
edt = np.squeeze(
shdw.utils.imgtools.get_distance_transform(
item.spec("label").data,
param["label_value"],
param["threshold"]
),
axis=2
)
_logger.info(shdw.utils.imgtools.get_array_info(edt))
img_out(item[0].path, edt)
| StarcoderdataPython |
1872929 | <reponame>DQinYuan/pyecharts<gh_stars>0
# coding=utf-8
from pyecharts.chart import Chart
class HeatMap(Chart):
"""
<<< 热力图 >>>
热力图主要通过颜色去表现数值的大小,必须要配合 visualMap 组件使用。
直角坐标系上必须要使用两个类目轴。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(HeatMap, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, *args, **kwargs):
"""
:param args:
如果指定;额 is_has_calendar_heatmap 属性为 True,则定义如下
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param data:
数据项,数据中,每一行是一个『数据项』,每一列属于一个『维度』。
不指定,默认情况定义如下:
:param name:
系列名称,用于 tooltip 的显示,legend 的图例筛选。
:param x_axis:
x 坐标轴数据。需为类目轴,也就是不能是数值。
:param y_axis:
y 坐标轴数据。需为类目轴,也就是不能是数值。
:param data:
数据项,数据中,每一行是一个『数据项』,每一列属于一个『维度』。
:param kwargs:
"""
_is_calendar = kwargs.get('is_calendar_heatmap', None) is True
if _is_calendar:
name, data = args
else:
name, x_axis, y_axis, data = args
chart = self._get_all_options(**kwargs)
self._option.get('legend')[0].get('data').append(name)
self._option.get('series').append(
{
"type": "heatmap",
"name": name,
"data": data,
"label": chart['label'],
"seriesId": self._option.get('series_id'),
}
)
if _is_calendar:
self._option.get('toolbox').update(left="98%", top="26%")
self._option.get('series')[0].update(coordinateSystem='calendar')
self._option.update(calendar=chart['calendar'])
else:
xaxis, yaxis = chart['xy_axis']
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get('xAxis')[0].update(
type='category', data=x_axis, splitArea={"show": True}
)
self._option.get('yAxis')[0].update(
type='category', data=y_axis, splitArea={"show": True}
)
self._config_components(**kwargs)
| StarcoderdataPython |
12828944 | <gh_stars>10-100
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
class HookBasedFeatureExtractor(nn.Module):
def __init__(self, submodule, layername, upscale=False):
super(HookBasedFeatureExtractor, self).__init__()
self.submodule = submodule
self.submodule.eval()
self.layername = layername
self.outputs_size = None
self.outputs = None
self.inputs = None
self.inputs_size = None
self.upscale = upscale
def get_input_array(self, m, i, o):
if isinstance(i, tuple):
self.inputs = [i[index].data.clone() for index in range(len(i))]
self.inputs_size = [input.size() for input in self.inputs]
else:
self.inputs = i.data.clone()
self.inputs_size = self.input.size()
print('Input Array Size: ', self.inputs_size)
def get_output_array(self, m, i, o):
if isinstance(o, tuple):
self.outputs = [o[index].data.clone() for index in range(len(o))]
self.outputs_size = [output.size() for output in self.outputs]
else:
self.outputs = o.data.clone()
self.outputs_size = self.outputs.size()
print('Output Array Size: ', self.outputs_size)
def rescale_output_array(self, newsize):
us = nn.Upsample(size=newsize[2:], mode='bilinear')
if isinstance(self.outputs, list):
for index in range(len(self.outputs)): self.outputs[index] = us(self.outputs[index]).data()
else:
self.outputs = us(self.outputs).data()
def forward(self, x):
target_layer = self.submodule._modules.get(self.layername)
# Collect the output tensor
h_inp = target_layer.register_forward_hook(self.get_input_array)
h_out = target_layer.register_forward_hook(self.get_output_array)
self.submodule(x)
h_inp.remove()
h_out.remove()
# Rescale the feature-map if it's required
if self.upscale: self.rescale_output_array(x.size())
return self.inputs, self.outputs
import math
def spatial_pyramid_pool(previous_conv, batch_size, previous_conv_size, out_bin_sizes):
'''
ref: Spatial Pyramid Pooling in Deep ConvolutionalNetworks for Visual Recognition
previous_conv: a tensor vector of previous convolution layer
num_sample: an int number of image in the batch
previous_conv_size: an int vector [height, width] of the matrix features size of previous convolution layer
out_pool_size: a int vector of expected output size of max pooling layer
returns: a tensor vector with shape [1 x n] is the concentration of multi-level pooling
'''
# print(previous_conv.size())
for i in range(0, len(out_bin_sizes)):
print(previous_conv_size)
#assert previous_conv_size[0] % out_bin_sizes[i]==0, 'please make sure feature size can be devided by bins'
h_wid = int(math.ceil(previous_conv_size[0] / out_bin_sizes[i]))
w_wid = int(math.ceil(previous_conv_size[1] / out_bin_sizes[i]))
# h_stride = int(math.floor(previous_conv_size[0] / out_bin_sizes[i]))
# w_stride = int(math.floor(previous_conv_size[1] / out_bin_sizes[i]))
h_pad = (h_wid * out_bin_sizes[i] - previous_conv_size[0] + 1) // 2
w_pad = (w_wid * out_bin_sizes[i] - previous_conv_size[1] + 1) // 2
maxpool = nn.MaxPool2d(kernel_size=(h_wid, w_wid), stride=(h_wid, w_wid),padding=(h_pad,w_pad))
x = maxpool(previous_conv)
if (i == 0):
spp = x.view(batch_size, -1)
#print("spp size:",spp.size())
else:
# print("size:",spp.size())
spp = torch.cat((spp, x.view(batch_size, -1)), dim=1)
# print("spp size:",spp.size())
return spp
'''
https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
'''
class GradientReversalFunction(torch.autograd.Function):
def __init__(self, Lambda):
super(GradientReversalFunction, self).__init__()
self.Lambda = Lambda
def forward(self, input):
return input.view_as(input)
def backward(self, grad_output):
# Multiply gradient by -self.Lambda
return self.Lambda * grad_output.neg()
class GradientReversalLayer(nn.Module):
def __init__(self, Lambda, use_cuda=False):
super(GradientReversalLayer, self).__init__()
self.Lambda = Lambda
if use_cuda:
self.cuda()
def forward(self, input):
return GradientReversalFunction(self.Lambda)(input)
def change_lambda(self, Lambda):
self.Lambda = Lambda
def gram_matrix_2D(y):
'''
give torch 4d tensor, calculate Gram Matrix
:param y:
:return:
'''
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def adjust_learning_rate(optimizer, lr):
"""Sets the learning rate to a fixed number"""
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_scheduler(optimizer, lr_policy,lr_decay_iters=5,epoch_count=None,niter=None,niter_decay=None):
print('lr_policy = [{}]'.format(lr_policy))
if lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + epoch_count - niter) / float(niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=lr_decay_iters, gamma=0.5)
elif lr_policy == 'step2':
scheduler = lr_scheduler.StepLR(optimizer, step_size=lr_decay_iters, gamma=0.1)
elif lr_policy == 'plateau':
print('schedular=plateau')
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, threshold=0.01, patience=5)
elif lr_policy == 'plateau2':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif lr_policy == 'step_warmstart':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 100:
lr_l = 1
elif 100 <= epoch < 200:
lr_l = 0.1
elif 200 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif lr_policy == 'step_warmstart2':
def lambda_rule(epoch):
#print(epoch)
if epoch < 5:
lr_l = 0.1
elif 5 <= epoch < 50:
lr_l = 1
elif 50 <= epoch < 100:
lr_l = 0.1
elif 100 <= epoch:
lr_l = 0.01
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', lr_policy)
return scheduler
def cal_cls_acc(pred,gt):
'''
input tensor
:param pred: network output N*n_classes
:param gt: ground_truth N [labels_id]
:return: float acc
'''
pred_class = pred.data.max(1)[1].cpu()
sum = gt.cpu().eq(pred_class).sum()
count = gt.size(0)
return sum, count
| StarcoderdataPython |
6633788 | import re
import mock
import pytest
from dagster import HookContext, build_hook_context, failure_hook, resource, solid, success_hook
from dagster.core.definitions.decorators.hook import event_list_hook
from dagster.core.errors import DagsterInvalidInvocationError, DagsterInvariantViolationError
def test_event_list_hook_invocation():
entered = []
@event_list_hook
def basic_event_list_hook(context, event_list):
assert isinstance(context, HookContext)
for event in event_list:
if event.is_step_success:
entered.append("yes")
basic_event_list_hook(build_hook_context(), [mock.MagicMock(is_step_success=True)])
assert entered == ["yes"]
entered = []
basic_event_list_hook(build_hook_context(), event_list=[mock.MagicMock(is_step_success=True)])
assert entered == ["yes"]
entered = []
basic_event_list_hook(
context=build_hook_context(), event_list=[mock.MagicMock(is_step_success=True)]
)
assert entered == ["yes"]
with pytest.raises(
DagsterInvalidInvocationError,
match="Decorated function expects two parameters, context and event_list, but 0 were provided.",
):
basic_event_list_hook() # pylint: disable=no-value-for-parameter
with pytest.raises(
DagsterInvalidInvocationError,
match="Decorated function expects two parameters, context and event_list, but 1 were provided.",
):
basic_event_list_hook(event_list=[]) # pylint: disable=no-value-for-parameter
with pytest.raises(
DagsterInvalidInvocationError,
match="Decorated function expects two parameters, context and event_list, but 1 were provided.",
):
basic_event_list_hook(context=None) # pylint: disable=no-value-for-parameter
with pytest.raises(
DagsterInvalidInvocationError,
match="Decorated function expects two parameters, context and event_list, but 1 were provided.",
):
basic_event_list_hook(None) # pylint: disable=no-value-for-parameter
with pytest.raises(
DagsterInvalidInvocationError, match="Could not find expected argument 'context'."
):
basic_event_list_hook( # pylint: disable=unexpected-keyword-arg,no-value-for-parameter
foo=None, event_list=[]
)
with pytest.raises(
DagsterInvalidInvocationError, match="Could not find expected argument 'event_list'."
):
basic_event_list_hook( # pylint: disable=unexpected-keyword-arg,no-value-for-parameter
context=None, bar=[]
)
@pytest.mark.parametrize("hook_decorator", [success_hook, failure_hook])
def test_context_hook_invocation(hook_decorator):
entered = []
@hook_decorator
def my_hook(_):
entered.append("yes")
my_hook(None)
assert entered == ["yes"]
entered = []
my_hook(build_hook_context())
assert entered == ["yes"]
entered = []
my_hook(_=build_hook_context())
assert entered == ["yes"]
with pytest.raises(
DagsterInvalidInvocationError,
match="Decorated function expects one parameter, _, but 0 were provided.",
):
my_hook() # pylint: disable=no-value-for-parameter
with pytest.raises(
DagsterInvalidInvocationError, match="Could not find expected argument '_'."
):
my_hook(foo=None) # pylint: disable=unexpected-keyword-arg,no-value-for-parameter
@pytest.mark.parametrize(
"hook_decorator,is_event_list_hook",
[(success_hook, False), (failure_hook, False), (event_list_hook, True)],
)
def test_success_hook_with_resources(hook_decorator, is_event_list_hook):
decorator = hook_decorator(required_resource_keys={"foo", "bar"})
if is_event_list_hook:
def my_hook_reqs_resources(context, _):
assert context.resources.foo == "foo"
assert context.resources.bar == "bar"
hook = decorator(my_hook_reqs_resources)
else:
def my_hook_reqs_resources(context): # type: ignore[misc]
assert context.resources.foo == "foo"
assert context.resources.bar == "bar"
hook = decorator(my_hook_reqs_resources)
@resource
def bar_resource(_):
return "bar"
if is_event_list_hook:
hook(build_hook_context(resources={"foo": "foo", "bar": bar_resource}), None)
else:
hook(build_hook_context(resources={"foo": "foo", "bar": bar_resource}))
with pytest.raises(
DagsterInvariantViolationError,
match=r"The hook 'my_hook_reqs_resources' requires resource '\w+', "
r"which was not provided by the context.",
):
if is_event_list_hook:
hook(None, None)
else:
hook(None)
@pytest.mark.parametrize(
"hook_decorator,is_event_list_hook",
[(success_hook, False), (failure_hook, False), (event_list_hook, True)],
)
def test_success_hook_cm_resource(hook_decorator, is_event_list_hook):
entered = []
@resource
def cm_resource(_):
try:
entered.append("try")
yield "foo"
finally:
entered.append("finally")
decorator = hook_decorator(required_resource_keys={"cm"})
if is_event_list_hook:
def my_hook_cm_resource(context, _):
assert context.resources.cm == "foo"
assert entered == ["try"]
hook = decorator(my_hook_cm_resource)
else:
def my_hook_cm_resource(context): # type: ignore[misc]
assert context.resources.cm == "foo"
assert entered == ["try"]
hook = decorator(my_hook_cm_resource)
with build_hook_context(resources={"cm": cm_resource}) as context:
if is_event_list_hook:
hook(context, None)
else:
hook(context)
assert entered == ["try", "finally"]
with pytest.raises(
DagsterInvariantViolationError,
match=re.escape(
"At least one provided resource is a generator, but attempting to access resources "
"outside of context manager scope. You can use the following syntax to open a context "
"manager: `with build_hook_context(...) as context:`"
),
):
if is_event_list_hook:
hook(build_hook_context(resources={"cm": cm_resource}), None)
else:
hook(build_hook_context(resources={"cm": cm_resource}))
def test_hook_invocation_with_solid():
@success_hook
def basic_hook(context):
assert context.solid.name == "foo"
assert len(context.solid.graph_definition.solids) == 1
@solid
def foo():
pass
@solid
def not_foo():
pass
basic_hook(build_hook_context(solid=foo))
basic_hook(build_hook_context(solid=not_foo.alias("foo")))
def test_properties_on_hook_context():
@success_hook
def basic_hook(context):
assert isinstance(context.job_name, str)
assert isinstance(context.run_id, str)
assert isinstance(context.op_exception, BaseException)
error = DagsterInvariantViolationError("blah")
basic_hook(build_hook_context(run_id="blah", job_name="blah", op_exception=error))
| StarcoderdataPython |
1746960 | import logging
from e2e.utils.cognito_bootstrap import common
from e2e.utils.aws.acm import AcmCertificate
from e2e.utils.aws.cognito import CustomDomainCognitoUserPool
from e2e.utils.aws.route53 import Route53HostedZone
from e2e.utils.utils import print_banner, load_yaml_file
from e2e.utils.load_balancer.lb_resources_cleanup import (
delete_cert,
delete_policy,
clean_root_domain,
)
from e2e.fixtures import cluster
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def delete_userpool(
domain: str,
userpool_name: str,
userpool_arn: str,
domain_alias: str,
domain_cert_arn: str,
region: str,
) -> None:
userpool_domain = "auth." + domain
userpool_id = userpool_arn.split("/")[-1]
userpool_cloudfront_alias = domain_alias
cognito_userpool = CustomDomainCognitoUserPool(
userpool_name=userpool_name,
userpool_domain=userpool_domain,
userpool_id=userpool_id,
domain_cert_arn=domain_cert_arn,
region=region,
)
try:
if userpool_cloudfront_alias:
cognito_userpool.delete_userpool_domain()
cognito_userpool.delete_userpool()
except Exception:
pass
def delete_cognito_dependency_resources(cfg: dict):
deployment_region = cfg["cluster"]["region"]
cluster_name = cfg["cluster"]["name"]
subdomain_hosted_zone_id = cfg["route53"]["subDomain"].get("hostedZoneId", None)
root_domain_hosted_zone_id = cfg["route53"]["rootDomain"].get("hostedZoneId", None)
subdomain_hosted_zone = None
root_hosted_zone = None
if subdomain_hosted_zone_id:
subdomain_name = cfg["route53"]["subDomain"]["name"]
subdomain_hosted_zone = Route53HostedZone(
domain=subdomain_name,
id=subdomain_hosted_zone_id,
)
if root_domain_hosted_zone_id:
clean_root_domain(
domain_name=cfg["route53"]["rootDomain"]["name"],
hosted_zone_id=root_domain_hosted_zone_id,
subdomain_hosted_zone=subdomain_hosted_zone,
)
root_cert_arn = cfg["route53"]["rootDomain"].get("certARN", None)
if root_cert_arn:
delete_cert(acm_certificate=AcmCertificate(arn=root_cert_arn, region=deployment_region))
subdomain_cert_deployment_region = subdomain_cert_n_virginia = None
subdomain_cert_deployment_region_arn = cfg["route53"]["subDomain"].get(
deployment_region + "-certARN", None
)
subdomain_cert_n_virginia_arn = cfg["route53"]["subDomain"].get(
"us-east-1-certARN", None
)
if subdomain_cert_n_virginia_arn:
subdomain_cert_deployment_region = (
subdomain_cert_n_virginia
) = AcmCertificate(arn=subdomain_cert_n_virginia_arn)
if deployment_region != "us-east-1" and subdomain_cert_deployment_region_arn:
subdomain_cert_deployment_region = AcmCertificate(
arn=subdomain_cert_deployment_region_arn, region=deployment_region
)
# delete userpool domain and userpool
cognito_userpool_arn = cfg["cognitoUserpool"].get("ARN", None)
if cognito_userpool_arn and subdomain_cert_deployment_region:
delete_userpool(
domain=subdomain_name,
userpool_name=cfg["cognitoUserpool"]["name"],
userpool_arn=cognito_userpool_arn,
domain_alias=cfg["cognitoUserpool"]["domainAliasTarget"],
domain_cert_arn=subdomain_cert_deployment_region.arn,
region=deployment_region,
)
# delete ALB
if "kubeflow" in cfg.keys():
alb = cfg["kubeflow"].get("alb", None)
if alb:
alb_sa = alb.get("serviceAccount", None)
if alb_sa:
cluster.delete_iam_service_account(
alb_sa["name"], alb_sa["namespace"], cluster_name, deployment_region
)
alb_controller_policy_arn = alb_sa["policyArn"]
delete_policy(arn=alb_controller_policy_arn, region=deployment_region)
# delete subdomain certs
if deployment_region != "us-east-1":
delete_cert(acm_certificate=subdomain_cert_deployment_region)
delete_cert(acm_certificate=subdomain_cert_n_virginia)
# delete hosted zone
subdomain_hosted_zone.delete_hosted_zone()
if __name__ == "__main__":
config_file_path = common.CONFIG_FILE
print_banner("Reading Config")
cfg = load_yaml_file(file_path=config_file_path)
delete_cognito_dependency_resources(cfg)
| StarcoderdataPython |
6660592 | <reponame>FadedCoder/Teeshood
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.contact_form_list, name='contact-form-list'),
url(r'^(?P<pk>[0-9]+)/$',
views.contact_form_details, name='contact-form-details'),
url(r'^(?P<pk>[0-9]+)/delete/$',
views.contact_form_delete, name='contact-form-delete'),
]
| StarcoderdataPython |
4884275 | create_commodity_table = '''CREATE TABLE IF NOT EXISTS `commodity` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(50) NOT NULL,
`unit` varchar(5) NOT NULL,
`max` decimal(7,2) NOT NULL DEFAULT '0.00',
`min` decimal(7,2) NOT NULL DEFAULT '0.00',
`avg` decimal(7,2) NOT NULL DEFAULT '0.00',
`date` date DEFAULT NULL,
PRIMARY KEY (`id`)
)'''
# create a table for commodity if it doesnot exist.
create_stmt = '''CREATE TABLE IF NOT EXISTS `{}` (
`id` INT NOT NULL AUTO_INCREMENT,
`max` DOUBLE(7,2) NULL,
`min` DOUBLE(7,2) NULL,
`avg` DOUBLE(7,2) NULL,
`date` DATE NULL,
PRIMARY KEY (`id`));'''
# Insert a commodity row into commodity table. (global commodity table)
insert_commodity = '''INSERT INTO `commodity` (`name`, `unit`, `max`, `min`, `avg`, `date`) VALUES ('{}', '{}', '{}', '{}', '{}', '{}');'''
#Insert a commodity into its own table for the date.
insert_item_date = '''INSERT INTO `{}` (`max`, `min`, `avg`, `date`) VALUES ('{}', '{}', '{}', '{}');''' | StarcoderdataPython |
394371 | """
Test Exceptions
"""
import pytest
from alsek.exceptions import AlsekError
@pytest.mark.parametrize("exception", AlsekError.__subclasses__())
def test_exceptions(exception: BaseException) -> None:
try:
raise exception()
except BaseException as error:
assert isinstance(error, AlsekError)
| StarcoderdataPython |
8031944 | # New 'clean-pass' of L3HW-SF ~ usin' lessons learned
# <NAME> - 2017-May-23 02:37
#
# useful links:
# DataAugmentation:
# https://github.com/fastai/courses/blob/master/deeplearning1/nbs/lesson3.ipynb
# I forgot but reference anyway:
# https://github.com/fastai/courses/blob/master/deeplearning1/nbs/lesson2.ipynb
# Good followthru of lecture & how to save to submission w/ Pandas:
# https://github.com/philippbayer/cats_dogs_redux/blob/master/Statefarm.ipynb
# Me:
# https://github.com/WNoxchi/Kaukasos/blob/master/FAI/lesson3/L3HW_SF.ipynb
import keras
import bcolz
import os, sys
import numpy as np
import pandas as pd
from glob import glob
from keras.optimizers import Adam
from keras.preprocessing import image
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dense
from keras.models import Sequential
sys.path.insert(1, os.path.join(os.getcwd(), '../utils'))
import utils
from vgg16bn import Vgg16BN
# directory setup
HOME_DIR = os.getcwd()
DATA_DIR = HOME_DIR + '/data'
TEST_DIR = DATA_DIR + '/test'
TRAIN_DIR = DATA_DIR + '/train'
VALID_DIR = DATA_DIR + '/valid'
data_path = DATA_DIR + '/'
test_path = TEST_DIR + '/'
train_path = TRAIN_DIR + '/'
valid_path = VALID_DIR + '/'
results_path = DATA_DIR + '/results/'
# utility functions
def save_array(fname, arr): c=bcolz.carray(arr, rootdir=fname, mode='w'); c.flush()
def load_array(fname): return bcolz.open(fname)[:]
def reset_valid(verbose=1):
"""Moves all images in validation set back to
their respective classes in the training set."""
counter = 0
%cd $valid_path
for i in xrange(10):
%cd c"$i"
g = glob('*.jpg')
for n in xrange(len(g)):
os.rename(g[n], TRAIN_DIR + '/c' + str(i) + '/' + g[n])
counter += 1
%cd ..
if verbose: print("Moved {} files".format(counter))
# modified from: http://forums.fast.ai/t/statefarm-kaggle-comp/183/20
def set_valid(number=1, verbose=1):
"""Moves <number> subjects from training to validation
directories. Verbosity: 0: Silent; 1: print no. files moved;
2: print each move operation. Default=1"""
counter = 0
if number < 0: number = 0
# repeat for <number> subjects
for n in xrange(number):
# read CSV file into Pandas DataFrame
dil = pd.read_csv(data_path + 'driver_imgs_list.csv')
# group frame by subject in image
grouped_subjects = dil.groupby('subject')
# pick subject at random
subject = grouped_subjects.groups.keys()[np.random.randint(0, \
high=len(grouped_subjects.groups))]
# get group assoc w/ subject
group = grouped_subjects.get_group(subject)
# loop over group & move imgs to validation dir
for (subject, clssnm, img) in group.values:
source = '{}train/{}/{}'.format(data_path, clssnm, img)
target = source.replace('train', 'valid')
if verbose > 1: print('mv {} {}'.format(source, target))
os.rename(source, target)
if verbose: print("Files moved: {}".format(counter))
# function to build FCNet w/ BatchNormalization & Dropout
def create_FCbn_layers(p=0):
return [
MaxPooling2D(input_shape=Conv_model[-1].output_shape[1:]),
Flatten(),
BatchNormalization()
Dense(4096, activation='relu')
BatchNormalization()
Dropout(p)
Dense(10, activation='softmax')
]
# # creating validation directories
# os.mkdir(VAL_DIR)
# for i in xrange(10):
# os.mkdir(VAL_DIR + '/c' + str(i))
#
# # another way to do this:
# %mkdir $VAL_PATH
# for i in xrange(10):
# %mkdir $VAL_PATH/c"$i"
# setting/resetting validation set
reset_valid()
set_valid(number=3)
# parameters
batch_size = 32
target_size = (224, 224)
# train/valid batch generators
gen = image.ImageDataGenerator(rotation_range=10, width_shift_range=0.05,
height_shift_range=0.05, width_zoom_range=0.1, zoom_range=0.1,
shear_range=0.1, channel_shift_range=10)
# does it matter that I don't set dim_ordering='tf' ?
trn_batches = gen.flow_from_directory(train_path, target_size=target_size,
batch_size=batch_size, shuffle=True, class_mode='categorical')
val_batches = gen.flow_from_directory(valid_path, target_size=target_size,
batch_size=batch_size, shuffle=False, class_mode='categorical')
# load VGG16BN model & its weights
VGGbn = Vgg16BN()
VGGbn.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
# (maybe) train the model at low η to train the Conv layers a bit
VGGbn.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=val_batches, nb_val_samples=val_batches.n)
# find out how many epochs at what η to do this until it's ~optimal
# separate Conv layers & create new ConvNet (w/ vgg weights)
last_conv_idx = [index, for index, layer in enumerate(VGGbn.model.layers) \
if type(layer) is Convolution2D][-1]
Conv_layers = VGGbn.model.layers[:last_conv_idx + 1]
# create new ConvNet from VGG16BN conv layers
Conv_model = Sequential(Conv_layers)
# now set training batches to not be shuffled. This is critical, because
# classes & labels will be supplied to the FCNet via directory; otherwise
# it won't know what's what. This doesn't need to be done if the whole model
# is left as one whole, but when using output features of one model as inputs
# to another, there has to be some way of keeping track of the labels.
# Remember gen is set to dataaugmentation. Reset this when predicting test set.
trn_batches = gen.flow_from_directory(train_path, target_size=target_size,
batch_size=batch_size, shuffle=False, class_mode='categorical')
# run Conv Model on trn/val batches to create features as inputs to FCNet
conv_features = Conv_model.predict_generator(trn_batches, trn_batches.nb_sample)
conv_val_feat = Conv_model.predict_generator(val_batches, val_batches.nb_sample)
# (?) does it matter than trn_batches is shuffled? nb_sample vs n?
# batches.n in fit() and batches.nb_sample in predict() ?
# save the convolution model's output features
save_array(results_path + 'conv_features.dat', conv_features)
save_array(results_path + 'conv_val_feat.dat', conv_val_feat)
# create FCNet
FC_model = Sequential(create_FCbn_layers(p=0.3))
FC_model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
# train FCNet on the ConvNet features
# is there a way to do this as a generator -- or does it not matter?
# maybe save the features, then pull them from disk in batches?
FC_model.fit(conv_features, trn_batches.labels, batch_size=batch_size,
nb_epoch=1, validation_data=(conv_val_feat, val_batches.labels))
# non-augmented batch generator for test-data
gen = image.ImageDataGenerator()
tst_batches = gen.flow_from_directory(test_path, batch_size=batch_size,
shuffle=False, class_mode=None)
# vgg16gn.test() <---> model.predict_generator(tst_batches, tst_batches.nb_sample)
# run test batches through ConvNet
conv_tst_feat = Conv_model.predict_generator(tst_batches, tst_batches.nb_sample)
# run ConvNet test features through FCNet
preds = FC_model.predict(conv_tst_feat, batch_size=batch_size*2)
# Ensemble the above, save models in array, average predictions
# NOTE: the Conv layers are probably not going to learn much after being trained
# on 1.5M imagenet photos... and there isn't yet a clean way to clear gpu
# memory in JNBs, each time a VGG model is instantiated ~700MB are loaded
# into GPU memory.. so to work around for ensembling: I'll initialize a
# VGG model once & train it's Conv layers, then do a 'hybrid-ensemble' w/
# the FC Nets.
# NOTE: On further thought, it would've been smarter to do the ConvNet features
# outside the function, and just save the features and pass them in.
def hybrid_ensemble(number=1):
batch_size=32
target_size=(224, 224)
reset_valid()
set_valid(number=3)
gen = image.ImageDataGenerator(rotation_range=10, width_shift_range=0.05,
height_shift_range=0.05, width_zoom_range=0.1, zoom_range=0.1,
shear_range=0.1, channel_shift_range=10)
trn_batches = gen.flow_from_directory(train_path, target_size=target_size,
batch_size=batch_size, shuffle=True, class_mode='categorical')
val_batches = gen.flow_from_directory(valid_path, target_size=target_size,
batch_size=batch_size, shuffle=False, class_mode='categorical')
VGGbn = Vgg16BN()
VGGbn.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
VGGbn.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=val_batches, nb_val_samples=val_batches.n)
last_conv_idx = [index, for index, layer in enumerate(VGGbn.model.layers) \
if type(layer) is Convolution2D][-1]
Conv_layers = VGGbn.model.layers[:last_conv_idx + 1]
Conv_model = Sequential(Conv_layers)
trn_batches = gen.flow_from_directory(train_path, target_size=target_size,
batch_size=batch_size, shuffle=False, class_mode='categorical')
conv_features = Conv_model.predict_generator(trn_batches,
trn_batches.nb_sample)
conv_val_feat = Conv_model.predict_generator(val_batches,
val_batches.nb_sample)
predarray = []
for n in xrange(number):
reset_valid()
set_valid(number=3)
FC_model = Sequential(create_FCbn_layers(p=0.3))
FC_model.compile(Adam(), loss='categorical_crossentropy',
metrics=['accuracy'])
FC_model.fit(conv_features, trn_batches.labels, batch_size=batch_size,
nb_epoch=1, validation_data=(conv_val_feat, val_batches.labels))
gen_t = image.ImageDataGenerator()
tst_batches = gen_t.flow_from_directory(test_path,
batch_size=batch_size, shuffle=False, class_mode=None)
conv_tst_feat = Conv_model.predict_generator(tst_batches,
tst_batches.nb_sample)
preds = FC_model.predict(conv_tst_feat, batch_size=batch_size*2)
predarray.append(preds)
# NOTE: I could probably save more memory by loading a tabula-rasa FCNet
# from disk, instead of just defining a new one each iteration.
# record submission
| StarcoderdataPython |
5045997 | from typing import List
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
if not nums:
return -1, -1
res1, res2 = -1, -1
lo, hi = 0, len(nums)-1
while lo <= hi:
a = (lo + hi) // 2
if target <= nums[a]:
hi = a - 1
elif nums[a] < target:
lo = a + 1
res1 = -1 if lo >= len(nums) or nums[lo] != target else lo
lo, hi = 0, len(nums)-1
while lo <= hi:
b = (lo + hi) // 2
if target < nums[b]:
hi = b - 1
elif nums[b] <= target:
lo = b + 1
res2 = -1 if hi < 0 or nums[hi] != target else hi
return res1, res2
if __name__ == '__main__':
s = Solution()
print(s.searchRange([5, 7, 7, 8, 8, 10], 8))
print(s.searchRange([5, 7, 7, 8, 8, 10], 6))
print(s.searchRange([], 0))
| StarcoderdataPython |
12856785 | <reponame>xuantan/viewfinder<filename>backend/db/test/id_allocator_test.py
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Tests for IdAllocator data object.
"""
__author__ = '<EMAIL> (<NAME>)'
import unittest
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test
from viewfinder.backend.db.id_allocator import IdAllocator
from base_test import DBBaseTestCase
class IdAllocatorTestCase(DBBaseTestCase):
@async_test
def testCreate(self):
alloc = IdAllocator('type', 13)
num_ids = 3000
def _OnAllocated(ids):
id_set = set(ids)
assert len(id_set) == num_ids
self.stop()
with util.ArrayBarrier(_OnAllocated) as b:
[alloc.NextId(self._client, callback=b.Callback()) for i in xrange(num_ids)]
@async_test
def testMultiple(self):
"""Tests that multiple allocations from the same sequence do
not overlap.
"""
allocs = [IdAllocator('type'), IdAllocator('type')]
num_ids = 3000
def _OnAllocated(id_lists):
assert len(id_lists) == 2
id_set1 = set(id_lists[0])
id_set2 = set(id_lists[1])
assert len(id_set1) == 3000
assert len(id_set2) == 3000
assert id_set1.isdisjoint(id_set2)
self.stop()
with util.ArrayBarrier(_OnAllocated) as b:
with util.ArrayBarrier(b.Callback()) as b1:
[allocs[0].NextId(self._client, b1.Callback()) for i in xrange(num_ids)]
with util.ArrayBarrier(b.Callback()) as b2:
[allocs[1].NextId(self._client, b2.Callback()) for i in xrange(num_ids)]
| StarcoderdataPython |
3510605 | # -*- coding: utf-8 -*-
SECRET_KEY = 'this is not secret...'
REMEMBER_COOKIE_NAME = 'scout_remember_me'
MONGO_DBNAME = 'scoutTest'
BOOTSTRAP_SERVE_LOCAL = True
TEMPLATES_AUTO_RELOAD = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Flask-mail: http://pythonhosted.org/flask-mail/
# see: https://bitbucket.org/danjac/flask-mail/issue/3
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
# Chanjo-Report
REPORT_LANGUAGE = 'en'
ACCEPT_LANGUAGES = ['en', 'sv']
# FEATURE FLAGS
SHOW_CAUSATIVES = True
# OMIM API KEY: Required for downloading definitions from OMIM (https://www.omim.org/api)
#OMIM_API_KEY = 'valid_omim_api_key'
| StarcoderdataPython |
11241677 | <reponame>gustavocelani/customers_gxp_heatmap
###############################################################################
#
# Filename: analyze.py
#
# Description: Parsed customers analysis.
# Usage: python3 analyze.py --customers <dataset>
#
# Version: 1.0
# Created: 25/06/2020 10:28:31 AM
# Revision: 1
#
# Author: <NAME>
#
################################################################################
# Import
from progress.bar import IncrementalBar
from prettytable import PrettyTable
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
import time
import json
import os
# Argument Parser
argumentParser = argparse.ArgumentParser()
argumentParser.add_argument("-c", "--customers", required = True, help = "path to input parsed customers dataset")
args = vars(argumentParser.parse_args())
# Console start header
os.system("figlet \"Data Analysis\"")
print("\nCustomers path: ", args["customers"])
# Reading parsed customers json file
customers = []
with open(args["customers"], "r", encoding = "latin-1") as customersJsonFile:
customers = json.load(customersJsonFile)
# Attributes usage analysis
print()
customerJsonKeys = list(customers[0].keys())
customerJsonKeysUsageCount = [0] * len(customerJsonKeys)
progressBar = IncrementalBar('Attributes Usage Analysis', max = len(customers))
for i in range(len(customers)):
progressBar.next()
for j in range(len(customers[0])):
if customers[i][customerJsonKeys[j]] != '':
customerJsonKeysUsageCount[j] += 1
progressBar.finish()
# Attibutes percent usage
print()
customerJsonKeysUsagePercent = [0] * len(customerJsonKeys)
progressBar = IncrementalBar('Attributes Percent Usage ', max = len(customerJsonKeys))
for i in range(len(customerJsonKeys)):
progressBar.next()
customerJsonKeysUsagePercent[i] = (customerJsonKeysUsageCount[i] / len(customers)) * 100
progressBar.finish()
# Building attribute usage tables
fullUsageTable = PrettyTable(['Attribute', 'Count', 'Usage [%]'])
allUsageTable = PrettyTable(['Attribute'])
anyUsageTable = PrettyTable(['Attribute'])
for i in range(len(customerJsonKeys)):
fullUsageTable.add_row([customerJsonKeys[i], customerJsonKeysUsageCount[i], round(customerJsonKeysUsagePercent[i], 2)])
if customerJsonKeysUsagePercent[i] == 100:
allUsageTable.add_row([customerJsonKeys[i]])
if customerJsonKeysUsagePercent[i] == 0:
anyUsageTable.add_row([customerJsonKeys[i]])
# Printing attributes usage tables
print("\n%s" % fullUsageTable)
print("\n100%% Customers Usage Attibutes\n%s" % allUsageTable)
print("\n0%% Customers Usage Attibutes\n%s" % anyUsageTable)
# Plotting attributes usage analysis
input("\nPress ENTER to plot attributes usage graph...")
index = np.arange(len(customerJsonKeys))
plt.bar(index, customerJsonKeysUsageCount)
plt.title('Customer Enrollment Attibutes Usage')
plt.xlabel('Customer Enrollment Attibute', fontsize = 8)
plt.ylabel('Usage Count', fontsize = 8)
plt.xticks(index, customerJsonKeys, fontsize = 8, rotation = 90)
plt.show()
print()
exit()
| StarcoderdataPython |
3290636 | <filename>babyshop_app/products/migrations/0005_rename_describtion_product_description.py
# Generated by Django 4.0.2 on 2022-02-24 05:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0004_category_product_category'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='describtion',
new_name='description',
),
]
| StarcoderdataPython |
8011675 | class FakeRequest:
"""
A replacement for Django's Request object. This is only used for unit
tests at the moment. If you're not using Django and need to create such
a class have a look at the source for the tests fake request class in
in tests/request.py
"""
#: Boolean, if the connection is secured or not
secure = False
#: HTTP headers like in the PHP $_SERVER variable, see
#: http://php.net/manual/en/reserved.variables.server.php
META = {}
#: Cookies... work in progress
COOKIES = False
def __init__(self, headers):
"""
Configure request object according to the headers we get
:param headers: see META
:type headers: dict
:rtype: None
"""
self.META = headers
if self.META['HTTPS']:
self.secure = True # TODO test this..
def is_secure(self):
"""
Returns a boolean, if the connection is secured
:rtype: bool
"""
return self.secure
| StarcoderdataPython |
11344709 | from autodoc import *
import sys
def check(got, expected, expected_builtin=None, skip=False):
if not skip:
expect = expected
if is_python_builtin() and expected_builtin != None:
expect = expected_builtin
if expect != got:
raise RuntimeError(
"\n" + "Expected: [" + str(expect) + "]\n" + "Got : [" + str(got) + "]")
def is_new_style_class(cls):
return hasattr(cls, "__class__")
if not is_new_style_class(A):
# Missing static methods make this hard to test... skip if -classic is
# used!
sys.exit(0)
# skip builtin check - the autodoc is missing, but it probably should not be
skip = True
check(A.__doc__, "Proxy of C++ A class.", "::A")
check(A.funk.__doc__, "just a string.")
check(A.func0.__doc__,
"func0(self, arg2, hello) -> int",
"func0(arg2, hello) -> int")
check(A.func1.__doc__,
"func1(A self, short arg2, Tuple hello) -> int",
"func1(short arg2, Tuple hello) -> int")
check(A.func2.__doc__,
"\n"
" func2(self, arg2, hello) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" arg2: short\n"
" hello: int tuple[2]\n"
"\n"
" ",
"\n"
"func2(arg2, hello) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"arg2: short\n"
"hello: int tuple[2]\n"
"\n"
""
)
check(A.func3.__doc__,
"\n"
" func3(A self, short arg2, Tuple hello) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" arg2: short\n"
" hello: int tuple[2]\n"
"\n"
" ",
"\n"
"func3(short arg2, Tuple hello) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"arg2: short\n"
"hello: int tuple[2]\n"
"\n"
""
)
check(A.func0default.__doc__,
"\n"
" func0default(self, e, arg3, hello, f=2) -> int\n"
" func0default(self, e, arg3, hello) -> int\n"
" ",
"\n"
"func0default(e, arg3, hello, f=2) -> int\n"
"func0default(e, arg3, hello) -> int\n"
""
)
check(A.func1default.__doc__,
"\n"
" func1default(A self, A e, short arg3, Tuple hello, double f=2) -> int\n"
" func1default(A self, A e, short arg3, Tuple hello) -> int\n"
" ",
"\n"
"func1default(A e, short arg3, Tuple hello, double f=2) -> int\n"
"func1default(A e, short arg3, Tuple hello) -> int\n"
""
)
check(A.func2default.__doc__,
"\n"
" func2default(self, e, arg3, hello, f=2) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg3: short\n"
" hello: int tuple[2]\n"
" f: double\n"
"\n"
" func2default(self, e, arg3, hello) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg3: short\n"
" hello: int tuple[2]\n"
"\n"
" ",
"\n"
"func2default(e, arg3, hello, f=2) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg3: short\n"
"hello: int tuple[2]\n"
"f: double\n"
"\n"
"func2default(e, arg3, hello) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg3: short\n"
"hello: int tuple[2]\n"
"\n"
""
)
check(A.func3default.__doc__,
"\n"
" func3default(A self, A e, short arg3, Tuple hello, double f=2) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg3: short\n"
" hello: int tuple[2]\n"
" f: double\n"
"\n"
" func3default(A self, A e, short arg3, Tuple hello) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg3: short\n"
" hello: int tuple[2]\n"
"\n"
" ",
"\n"
"func3default(A e, short arg3, Tuple hello, double f=2) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg3: short\n"
"hello: int tuple[2]\n"
"f: double\n"
"\n"
"func3default(A e, short arg3, Tuple hello) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg3: short\n"
"hello: int tuple[2]\n"
"\n"
""
)
check(A.func0static.__doc__,
"\n"
" func0static(e, arg2, hello, f=2) -> int\n"
" func0static(e, arg2, hello) -> int\n"
" ",
"\n"
"func0static(e, arg2, hello, f=2) -> int\n"
"func0static(e, arg2, hello) -> int\n"
""
)
check(A.func1static.__doc__,
"\n"
" func1static(A e, short arg2, Tuple hello, double f=2) -> int\n"
" func1static(A e, short arg2, Tuple hello) -> int\n"
" ",
"\n"
"func1static(A e, short arg2, Tuple hello, double f=2) -> int\n"
"func1static(A e, short arg2, Tuple hello) -> int\n"
""
)
check(A.func2static.__doc__,
"\n"
" func2static(e, arg2, hello, f=2) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg2: short\n"
" hello: int tuple[2]\n"
" f: double\n"
"\n"
" func2static(e, arg2, hello) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg2: short\n"
" hello: int tuple[2]\n"
"\n"
" ",
"\n"
"func2static(e, arg2, hello, f=2) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg2: short\n"
"hello: int tuple[2]\n"
"f: double\n"
"\n"
"func2static(e, arg2, hello) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg2: short\n"
"hello: int tuple[2]\n"
"\n"
""
)
check(A.func3static.__doc__,
"\n"
" func3static(A e, short arg2, Tuple hello, double f=2) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg2: short\n"
" hello: int tuple[2]\n"
" f: double\n"
"\n"
" func3static(A e, short arg2, Tuple hello) -> int\n"
"\n"
" Parameters\n"
" ----------\n"
" e: A *\n"
" arg2: short\n"
" hello: int tuple[2]\n"
"\n"
" ",
"\n"
"func3static(A e, short arg2, Tuple hello, double f=2) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg2: short\n"
"hello: int tuple[2]\n"
"f: double\n"
"\n"
"func3static(A e, short arg2, Tuple hello) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"e: A *\n"
"arg2: short\n"
"hello: int tuple[2]\n"
"\n"
""
)
if sys.version_info[0:2] > (2, 4):
# Python 2.4 does not seem to work
check(A.variable_a.__doc__,
"A_variable_a_get(self) -> int",
"A.variable_a"
)
check(A.variable_b.__doc__,
"A_variable_b_get(A self) -> int",
"A.variable_b"
)
check(A.variable_c.__doc__,
"\n"
"A_variable_c_get(self) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"self: A *\n"
"\n",
"A.variable_c"
)
check(A.variable_d.__doc__,
"\n"
"A_variable_d_get(A self) -> int\n"
"\n"
"Parameters\n"
"----------\n"
"self: A *\n"
"\n",
"A.variable_d"
)
check(B.__doc__,
"Proxy of C++ B class.",
"::B"
)
check(C.__init__.__doc__, "__init__(self, a, b, h) -> C", None, skip)
check(D.__init__.__doc__,
"__init__(D self, int a, int b, Hola h) -> D", None, skip)
check(E.__init__.__doc__,
"\n"
" __init__(self, a, b, h) -> E\n"
"\n"
" Parameters\n"
" ----------\n"
" a: special comment for parameter a\n"
" b: another special comment for parameter b\n"
" h: enum Hola\n"
"\n"
" ", None, skip
)
check(F.__init__.__doc__,
"\n"
" __init__(F self, int a, int b, Hola h) -> F\n"
"\n"
" Parameters\n"
" ----------\n"
" a: special comment for parameter a\n"
" b: another special comment for parameter b\n"
" h: enum Hola\n"
"\n"
" ", None, skip
)
check(B.funk.__doc__,
"funk(B self, int c, int d) -> int",
"funk(int c, int d) -> int")
check(funk.__doc__, "funk(A e, short arg2, int c, int d) -> int")
check(funkdefaults.__doc__,
"\n"
" funkdefaults(A e, short arg2, int c, int d, double f=2) -> int\n"
" funkdefaults(A e, short arg2, int c, int d) -> int\n"
" ",
"\n"
"funkdefaults(A e, short arg2, int c, int d, double f=2) -> int\n"
"funkdefaults(A e, short arg2, int c, int d) -> int\n"
""
)
check(func_input.__doc__, "func_input(int * INPUT) -> int")
check(func_output.__doc__, "func_output() -> int")
check(func_inout.__doc__, "func_inout(int * INOUT) -> int")
check(func_cb.__doc__, "func_cb(int c, int d) -> int")
check(banana.__doc__, "banana(S a, S b, int c, Integer d)")
| StarcoderdataPython |
11296311 | <reponame>kolszewska/MedTagger
"""Module responsible for definition of Auth service."""
from typing import Any
from flask import request
from flask_restplus import Resource
from medtagger.api import api
from medtagger.api.auth.business import create_user, sign_in_user
from medtagger.api.auth import serializers
auth_ns = api.namespace('auth', 'Auth methods')
@auth_ns.route('/register')
class Register(Resource):
"""Register user endpoint."""
@staticmethod
@api.expect(serializers.new_user)
@api.doc(responses={201: 'User created', 400: 'Invalid arguments'})
def post() -> Any:
"""Register the user."""
user = request.json
user_id, user_token = create_user(user['email'], user['password'], user['firstName'], user['lastName'])
return {'id': user_id, 'token': user_token}, 201
@auth_ns.route('/sign-in')
class SignIn(Resource):
"""Sign in endpoint."""
@staticmethod
@api.expect(serializers.sign_in)
@api.doc(responses={200: 'Signed in', 400: 'User does not exist or wrong password was provided'})
def post() -> Any:
"""Sign in the user."""
sign_in = request.json
token = sign_in_user(sign_in['email'], sign_in['password'])
return {"token": token}, 200
| StarcoderdataPython |
9713404 | <filename>src/ebay_rest/api/commerce_taxonomy/models/category_tree_node.py
# coding: utf-8
"""
Taxonomy API
Use the Taxonomy API to discover the most appropriate eBay categories under which sellers can offer inventory items for sale, and the most likely categories under which buyers can browse or search for items to purchase. In addition, the Taxonomy API provides metadata about the required and recommended category aspects to include in listings, and also has two operations to retrieve parts compatibility information. # noqa: E501
OpenAPI spec version: v1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CategoryTreeNode(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'category': 'Category',
'category_tree_node_level': 'int',
'child_category_tree_nodes': 'list[CategoryTreeNode]',
'leaf_category_tree_node': 'bool',
'parent_category_tree_node_href': 'str'
}
attribute_map = {
'category': 'category',
'category_tree_node_level': 'categoryTreeNodeLevel',
'child_category_tree_nodes': 'childCategoryTreeNodes',
'leaf_category_tree_node': 'leafCategoryTreeNode',
'parent_category_tree_node_href': 'parentCategoryTreeNodeHref'
}
def __init__(self, category=None, category_tree_node_level=None, child_category_tree_nodes=None, leaf_category_tree_node=None, parent_category_tree_node_href=None): # noqa: E501
"""CategoryTreeNode - a model defined in Swagger""" # noqa: E501
self._category = None
self._category_tree_node_level = None
self._child_category_tree_nodes = None
self._leaf_category_tree_node = None
self._parent_category_tree_node_href = None
self.discriminator = None
if category is not None:
self.category = category
if category_tree_node_level is not None:
self.category_tree_node_level = category_tree_node_level
if child_category_tree_nodes is not None:
self.child_category_tree_nodes = child_category_tree_nodes
if leaf_category_tree_node is not None:
self.leaf_category_tree_node = leaf_category_tree_node
if parent_category_tree_node_href is not None:
self.parent_category_tree_node_href = parent_category_tree_node_href
@property
def category(self):
"""Gets the category of this CategoryTreeNode. # noqa: E501
:return: The category of this CategoryTreeNode. # noqa: E501
:rtype: Category
"""
return self._category
@category.setter
def category(self, category):
"""Sets the category of this CategoryTreeNode.
:param category: The category of this CategoryTreeNode. # noqa: E501
:type: Category
"""
self._category = category
@property
def category_tree_node_level(self):
"""Gets the category_tree_node_level of this CategoryTreeNode. # noqa: E501
The absolute level of the current category tree node in the hierarchy of its category tree. Note: The root node of any full category tree is always at level 0. # noqa: E501
:return: The category_tree_node_level of this CategoryTreeNode. # noqa: E501
:rtype: int
"""
return self._category_tree_node_level
@category_tree_node_level.setter
def category_tree_node_level(self, category_tree_node_level):
"""Sets the category_tree_node_level of this CategoryTreeNode.
The absolute level of the current category tree node in the hierarchy of its category tree. Note: The root node of any full category tree is always at level 0. # noqa: E501
:param category_tree_node_level: The category_tree_node_level of this CategoryTreeNode. # noqa: E501
:type: int
"""
self._category_tree_node_level = category_tree_node_level
@property
def child_category_tree_nodes(self):
"""Gets the child_category_tree_nodes of this CategoryTreeNode. # noqa: E501
An array of one or more category tree nodes that are the immediate children of the current category tree node, as well as their children, recursively down to the leaf nodes. Returned only if the current category tree node is not a leaf node (the value of leafCategoryTreeNode is false). # noqa: E501
:return: The child_category_tree_nodes of this CategoryTreeNode. # noqa: E501
:rtype: list[CategoryTreeNode]
"""
return self._child_category_tree_nodes
@child_category_tree_nodes.setter
def child_category_tree_nodes(self, child_category_tree_nodes):
"""Sets the child_category_tree_nodes of this CategoryTreeNode.
An array of one or more category tree nodes that are the immediate children of the current category tree node, as well as their children, recursively down to the leaf nodes. Returned only if the current category tree node is not a leaf node (the value of leafCategoryTreeNode is false). # noqa: E501
:param child_category_tree_nodes: The child_category_tree_nodes of this CategoryTreeNode. # noqa: E501
:type: list[CategoryTreeNode]
"""
self._child_category_tree_nodes = child_category_tree_nodes
@property
def leaf_category_tree_node(self):
"""Gets the leaf_category_tree_node of this CategoryTreeNode. # noqa: E501
A value of true indicates that the current category tree node is a leaf node (it has no child nodes). A value of false indicates that the current node has one or more child nodes, which are identified by the childCategoryTreeNodes array. Returned only if the value of this field is true. # noqa: E501
:return: The leaf_category_tree_node of this CategoryTreeNode. # noqa: E501
:rtype: bool
"""
return self._leaf_category_tree_node
@leaf_category_tree_node.setter
def leaf_category_tree_node(self, leaf_category_tree_node):
"""Sets the leaf_category_tree_node of this CategoryTreeNode.
A value of true indicates that the current category tree node is a leaf node (it has no child nodes). A value of false indicates that the current node has one or more child nodes, which are identified by the childCategoryTreeNodes array. Returned only if the value of this field is true. # noqa: E501
:param leaf_category_tree_node: The leaf_category_tree_node of this CategoryTreeNode. # noqa: E501
:type: bool
"""
self._leaf_category_tree_node = leaf_category_tree_node
@property
def parent_category_tree_node_href(self):
"""Gets the parent_category_tree_node_href of this CategoryTreeNode. # noqa: E501
The href portion of the getCategorySubtree call that retrieves the subtree below the parent of this category tree node. Not returned if the current category tree node is the root node of its tree. # noqa: E501
:return: The parent_category_tree_node_href of this CategoryTreeNode. # noqa: E501
:rtype: str
"""
return self._parent_category_tree_node_href
@parent_category_tree_node_href.setter
def parent_category_tree_node_href(self, parent_category_tree_node_href):
"""Sets the parent_category_tree_node_href of this CategoryTreeNode.
The href portion of the getCategorySubtree call that retrieves the subtree below the parent of this category tree node. Not returned if the current category tree node is the root node of its tree. # noqa: E501
:param parent_category_tree_node_href: The parent_category_tree_node_href of this CategoryTreeNode. # noqa: E501
:type: str
"""
self._parent_category_tree_node_href = parent_category_tree_node_href
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CategoryTreeNode, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CategoryTreeNode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
9726881 | <filename>stubs.min/System/Windows/Forms/__init___parts/ToolStripItemTextRenderEventArgs.py<gh_stars>1-10
class ToolStripItemTextRenderEventArgs(ToolStripItemRenderEventArgs):
"""
Provides data for the System.Windows.Forms.ToolStripRenderer.RenderItemText event.
ToolStripItemTextRenderEventArgs(g: Graphics,item: ToolStripItem,text: str,textRectangle: Rectangle,textColor: Color,textFont: Font,format: TextFormatFlags)
ToolStripItemTextRenderEventArgs(g: Graphics,item: ToolStripItem,text: str,textRectangle: Rectangle,textColor: Color,textFont: Font,textAlign: ContentAlignment)
"""
@staticmethod
def __new__(self,g,item,text,textRectangle,textColor,textFont,*__args):
"""
__new__(cls: type,g: Graphics,item: ToolStripItem,text: str,textRectangle: Rectangle,textColor: Color,textFont: Font,format: TextFormatFlags)
__new__(cls: type,g: Graphics,item: ToolStripItem,text: str,textRectangle: Rectangle,textColor: Color,textFont: Font,textAlign: ContentAlignment)
"""
pass
Text=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the text to be drawn on the System.Windows.Forms.ToolStripItem.
Get: Text(self: ToolStripItemTextRenderEventArgs) -> str
Set: Text(self: ToolStripItemTextRenderEventArgs)=value
"""
TextColor=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the color of the System.Windows.Forms.ToolStripItem text.
Get: TextColor(self: ToolStripItemTextRenderEventArgs) -> Color
Set: TextColor(self: ToolStripItemTextRenderEventArgs)=value
"""
TextDirection=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets whether the text is drawn vertically or horizontally.
Get: TextDirection(self: ToolStripItemTextRenderEventArgs) -> ToolStripTextDirection
Set: TextDirection(self: ToolStripItemTextRenderEventArgs)=value
"""
TextFont=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the font of the text drawn on the System.Windows.Forms.ToolStripItem.
Get: TextFont(self: ToolStripItemTextRenderEventArgs) -> Font
Set: TextFont(self: ToolStripItemTextRenderEventArgs)=value
"""
TextFormat=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the display and layout information of the text drawn on the System.Windows.Forms.ToolStripItem.
Get: TextFormat(self: ToolStripItemTextRenderEventArgs) -> TextFormatFlags
Set: TextFormat(self: ToolStripItemTextRenderEventArgs)=value
"""
TextRectangle=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the rectangle that represents the bounds to draw the text in.
Get: TextRectangle(self: ToolStripItemTextRenderEventArgs) -> Rectangle
Set: TextRectangle(self: ToolStripItemTextRenderEventArgs)=value
"""
| StarcoderdataPython |
3230889 | <gh_stars>1-10
import matplotlib.pyplot as plt
year = []
dataset1 = []
dataset2 = []
startYear = int(input("Input the starting year: "))
endYear = int(input("Input the ending year: "))
for i in range(startYear, endYear+1):
data1 = float(input("Input the data value for Dataset 1 in " + str(i) + ": "))
data2 = float(input("Input the data value for Dataset 2 in " + str(i) + ": "))
year.append(i)
dataset1.append(data1)
dataset2.append(data2)
plt.plot(year, dataset1, 'k',
year, dataset2)
plt.xlabel('Year')
plt.ylabel('Value')
plt.show() | StarcoderdataPython |
9666161 | import unittest
from typing import NoReturn
from tests.common import *
from ddns_manager.ddns_updater import DDNSUpdater
class TestDDNSUpdater(unittest.TestCase):
def test_get_current_ip_signature(self):
# test if DDNSUpdater has the method update_ddns_record and if that has the correct signature
# has should have method
method = getattr(DDNSUpdater, "update_ddns_record", None)
self.assertTrue(callable(method))
# check if is abstract
self.assertTrue(is_abstract(method))
# checks method signature
self.assertEqual("(self, ip: str) -> NoReturn", callable_signature(method))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.