code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from rdkit import Chem
from rdkit.Chem import AllChem
import re
import os
from subprocess import Popen
import matplotlib.pyplot as plt
from scipy.stats import linregress
import numpy as np
import argparse
from multiprocessing import Pool
# path_to_orca = "/usr/local/orca_4_1_1_linux_x86-64/orca"
# path_to_orca = "/usr/local/orca_4_1_1/orca"
path_to_orca = "/usr/local/orca_4_1_2_linux_x86-64/orca"
conversion_factor = 627.5 # (kcal/mol)/Ha^-1
method_dict = {"Default": ("! PBE0 def2-SVP RIJCOSX def2/J TIGHTSCF TightOpt Freq D3BJ",
"! wB97X-D3 def2-TZVPP RIJCOSX def2/J TIGHTSCF Grid6 GridX6"),
"High-level": ("! RI-MP2 def2-TZVP RIJCOSX def2/J def2-TZVP/C TIGHTSCF TightOpt NumFreq",
"! DLPNO-CCSD(T) def2-QZVPP RIJCOSX AutoAux TIGHTSCF Grid6 GridX6"),
"Cheap": ("! PBE def2-SVP RIJCOSX def2/J Opt Freq D3BJ", None),
"Proposed": ("! wB97X-D3 def2-TZVP RIJCOSX def2/J TightOpt TIGHTSCF Freq Grid6 GridX6 CPCM",
"! RI-B2GP-PLYP def2-TZVPP def2-TZVPP/C D3BJ RIJCOSX AutoAux TIGHTSCF Grid6 GridX6 SP CPCM"),
"Reference": ("! RI-SCS-MP2 def2-TZVP RIJCOSX def2/J def2-TZVP/C TIGHTSCF TightOpt NumFreq CPCM",
"!DLPNO-CCSD(T) def2-TZVPP RIJCOSX AutoAux TIGHTSCF Grid6 GridX6 SP CPCM"),
"maProposed": ("! wB97X-D3 ma-def2-TZVP RIJCOSX AutoAux TightOpt TIGHTSCF Freq Grid6 GridX6 CPCM",
"! RI-B2GP-PLYP ma-def2-TZVPP D3BJ RIJCOSX AutoAux TIGHTSCF Grid6 GridX6 SP CPCM"),
"maReference": ("! RI-SCS-MP2 ma-def2-TZVP RIJCOSX AutoAux TIGHTSCF TightOpt NumFreq CPCM",
"! DLPNO-CCSD(T) ma-def2-TZVPP RIJCOSX AutoAux TIGHTSCF Grid6 GridX6 SP CPCM"),
# "PBE_def2-SVP": ("! PBE def2-SVP RI def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE_def2-TZVP": ("! PBE def2-TZVP RI def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE_def2-TZVPP": ("! PBE def2-TZVPP RI def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE_def2-QZVP": ("! PBE def2-QZVP RI def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE0_def2-SVP": ("! PBE0 def2-SVP RIJCOSX def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE0_def2-TZVP": ("! PBE0 def2-TZVP RIJCOSX def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE0_def2-TZVPP": ("! PBE0 def2-TZVPP RIJCOSX def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "PBE0_def2-QZVP": ("! PBE0 def2-QZVP RIJCOSX def2/J TightOpt TIGHTSCF Freq D3BJ Grid6 GridX6",
# None),
# "M062X_def2-SVP": ("! M062X def2-SVP RIJCOSX def2/J TightOpt TIGHTSCF NumFreq Grid6 GridX6",
# None),
# "M062X_def2-TZVP": ("! M062X def2-TZVP RIJCOSX def2/J TightOpt TIGHTSCF NumFreq Grid6 GridX6",
# None),
# "M062X_def2-TZVPP": ("! M062X def2-TZVPP RIJCOSX def2/J TightOpt TIGHTSCF NumFreq Grid6 GridX6",
# None),
# "M062X_def2-QZVP": ("! M062X def2-QZVP RIJCOSX def2/J TightOpt TIGHTSCF NumFreq Grid6 GridX6",
# None),
# "wB97X-D3_def2-SVP": ("! wB97X-D3 def2-SVP RIJCOSX def2/J TightOpt TIGHTSCF Freq Grid6 GridX6",
# None),
# "wB97X-D3_def2-TZVP": ("! wB97X-D3 def2-TZVP RIJCOSX def2/J TightOpt TIGHTSCF Freq Grid6 GridX6",
# None),
# "wB97X-D3_def2-TZVPP": ("! wB97X-D3 def2-TZVPP RIJCOSX def2/J TightOpt TIGHTSCF Freq Grid6 GridX6",
# None),
# "wB97X-D3_def2-QZVP": ("! wB97X-D3 def2-QZVP RIJCOSX def2/J TightOpt TIGHTSCF Freq Grid6 GridX6",
# None),
# "RI-MP2_def2-SVP": ("! RI-MP2 def2-SVP RIJCOSX def2/J def2-SVP/C TIGHTSCF TightOpt NumFreq",
# None),
# "RI-MP2_def2-TZVP": ("! RI-MP2 def2-TZVP RIJCOSX def2/J def2-TZVP/C TIGHTSCF TightOpt NumFreq",
# None),
# "RI-MP2_def2-TZVPP": ("! RI-MP2 def2-TZVPP RIJCOSX def2/J def2-TZVPP/C TIGHTSCF TightOpt NumFreq",
# None),
# "RI-MP2_def2-QZVP": ("! RI-MP2 def2-QZVP RIJCOSX def2/J def2-QZVP/C TIGHTSCF TightOpt NumFreq",
# None),
# "SCS-MP2_def2-SVP": ("! RI-SCS-MP2 def2-SVP RIJCOSX def2/J def2-SVP/C TIGHTSCF TightOpt NumFreq",
# None),
# "SCS-MP2_def2-TZVP": ("! RI-SCS-MP2 def2-TZVP RIJCOSX def2/J def2-TZVP/C TIGHTSCF TightOpt NumFreq",
# None),
# "SCS-MP2_def2-TZVPP": ("! RI-SCS-MP2 def2-TZVPP RIJCOSX def2/J def2-TZVPP/C TIGHTSCF TightOpt NumFreq",
# None)
}
probe_dict = {"SiH3": ("[H][Si]([H])[H]", "[H][Si]([H])([H])[H]", ".[Si]%99([H])([H])[H]"),
"SeH": ("[Se][H]", "[H][Se][H]", ".[Se]%99[H]"),
"Br": ("[Br]", "Br", ".[Br]%99"),
"Cl": ("[Cl]", "Cl", ".[Cl]%99"),
"F": ("[F]", "F", ".[F]%99"),
"I": ("[I]", "I", ".[I]%99"),
"OH": ("[O][H]", "[H]O[H]", ".[O]%99[H]"),
"SH": ("[S][H]", "[H]S[H]", ".[S]%99[H]"),
"TeH": ("[Te][H]", "[H][Te][H]", ".[Te]%99[H]"),
"NH2": ("[N]([H])[H]", "[H]N([H])[H]", ".[N]%99([H])[H]"),
"PH2": ("[P]([H])[H]", "[H]P([H])[H]", ".[P]%99([H])[H]"),
"AsH2": ("[As]([H])[H]", "[H][As]([H])[H]", ".[As]%99([H])[H]"),
"CH3": ("[H][C]([H])[H]", "C", ".C%99"),
"GeH3": ("[H][Ge]([H])[H]", "[H][Ge]([H])([H])[H]", ".[Ge]%99([H])([H])[H]"),
"SnH3": ("[H][Sn]([H])[H]", "[H][Sn]([H])([H])[H]", ".[Sn]%99([H])([H])[H]")
}
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("strained", action='store', type=str, help='SMILES string of strained molecule')
parser.add_argument("adduct", action='store', type=str, help='SMILES string of adduct')
parser.add_argument("-np", '--number_processors', action='store', type=int, default=1, help='Number of processors')
parser.add_argument("-l", '--level', action='store', type=str, default="Default",
help='Level of theory for calculations', choices=['Default', 'High-level', 'Cheap', 'Proposed',
'Reference', 'maProposed', 'maReference'])
parser.add_argument("-chg", '--charge_on_probe', action='store', type=int, default=0,
help='Charge on probe for addition to strained molecule')
parser.add_argument("-m", '--max_core', action='store', type=float, default=4000, help='Maximum memory per core')
return parser.parse_args()
def make_mol_obj(smiles_string):
"""
Make an RDKit molecule object from a SMILES string (e.g. generated from ChemDraw)
:param smiles_string: (str) SMILES of a molecule
:return: (object) RDKit Mol object
"""
obj = Chem.MolFromSmiles(smiles_string)
obj = Chem.AddHs(obj)
AllChem.EmbedMultipleConfs(obj, numConfs=1, params=AllChem.ETKDG())
return obj
def gen_conformer_xyzs(mol_obj, conf_ids):
"""
Generate xyz lists for all the conformers in mol.conf_ids
:param mol_obj: rdkit object
:param conf_ids: (list) list of conformer ids to convert to xyz
:return: (list) of xyz lists
"""
xyzs = []
for i in range(len(conf_ids)):
mol_block_lines = Chem.MolToMolBlock(mol_obj, confId=conf_ids[i]).split('\n')
mol_file_xyzs = []
for line in mol_block_lines:
split_line = line.split()
if len(split_line) == 16:
atom_label, x, y, z = split_line[3], split_line[0], split_line[1], split_line[2]
mol_file_xyzs.append([atom_label, float(x), float(y), float(z)])
xyzs.append(mol_file_xyzs)
if len(xyzs) == 0:
exit('Could not generate xyzs from RDKit object')
return xyzs
def modify_adduct_smiles(smiles_string):
# e.g, [*]C([H])([H])[C]([H])[H]
if smiles_string.startswith('[*]'):
# If the 3rd and 4th characters are letters..
if smiles_string[3].isalpha() and smiles_string[4].isalpha():
smiles_string = smiles_string[3:5] + '[*]' + smiles_string[5:]
# If the 3rd character is a letter..
elif smiles_string[3].isalpha():
smiles_string = smiles_string[3:4] + '[*]' + smiles_string[4:]
else:
exit('Failed to modify the adduct SMILES string')
return smiles_string.replace("[*]", "%99")
def xyzs2xyzfile(xyzs, filename=None, basename=None, title_line=''):
"""
For a list of xyzs in the form e.g [[C, 0.0, 0.0, 0.0], ...] convert create a standard .xyz file
:param xyzs: List of xyzs
:param filename: Name of the generated xyz file
:param basename: Name of the generated xyz file without the file extension
:param title_line: String to print on the title line of an xyz file
:return: The filename
"""
if basename:
filename = basename + '.xyz'
if filename is None:
return 1
if filename.endswith('.xyz'):
with open(filename, 'w') as xyz_file:
if xyzs:
print(len(xyzs), '\n', title_line, sep='', file=xyz_file)
else:
return 1
[print('{:<3}{:^10.5f}{:^10.5f}{:^10.5f}'.format(*line), file=xyz_file) for line in xyzs]
return filename
def add_h_to_adduct(adduct_smiles):
"""
For a SMILES string of and adduct
i.e.
C
:param adduct_smiles:
:return:
"""
pattern1 = "\[.\]"
pattern2 = "\[..\]"
atom_labels_sq_brackets = re.findall(pattern1, adduct_smiles)
atom_labels_sq_brackets += re.findall(pattern2, adduct_smiles)
len_strained_smiles = len(adduct_smiles.split('.', 1)[0])
for atom_label_sq_brackets in atom_labels_sq_brackets:
if atom_label_sq_brackets != "[H]":
if atom_label_sq_brackets in adduct_smiles[:len_strained_smiles]:
adduct_smiles = adduct_smiles.replace(atom_label_sq_brackets, atom_label_sq_brackets[1:-1])
return adduct_smiles
def did_orca_calculation_terminate_normally(out_filename):
out_lines = [line for line in open(out_filename, 'r', encoding="utf-8")]
for n_line, line in enumerate(reversed(out_lines)):
if 'ORCA TERMINATED NORMALLY' or 'The optimization did not converge' in line:
return True
if n_line > 50:
# The above lines are pretty close to the end of the file – there's no point parsing it all
break
return False
def gen_orca_inp(mol, name, opt=False, sp=False, pal=1):
inp_filename = name + ".inp"
with open(inp_filename, "w") as inp_file:
if opt:
keyword_line = method_dict[level][0]
if len(mol.xyzs) == 1:
if "TightOpt" in keyword_line:
keyword_line = keyword_line.replace("TightOpt", "")
if "Opt" in keyword_line:
keyword_line = keyword_line.replace("Opt", "")
print(keyword_line, file=inp_file)
if sp:
print(method_dict[level][1], file=inp_file)
print("%maxcore", maxcore, file=inp_file)
print("%pal nprocs", pal, "end", file=inp_file)
print("*xyz", mol.charge, mol.mult, file=inp_file)
[print('{:<3}{:^12.8f}{:^12.8f}{:^12.8f}'.format(*line), file=inp_file) for line in mol.xyzs]
print('*', file=inp_file)
return inp_filename
def run_orca(inp_filename, out_filename):
"""
Run the ORCA calculation given the .inp file as a subprocess
:param inp_filename:
:param out_filename:
:return:
"""
if os.path.exists(os.path.join("Library", level)):
if os.path.exists(os.path.join("Library", level, out_filename)):
return [line for line in open(os.path.join("Library", level, out_filename), 'r', encoding="utf-8")]
orca_terminated_normally = False
if os.path.exists(out_filename):
orca_terminated_normally = did_orca_calculation_terminate_normally(out_filename)
if not orca_terminated_normally:
with open(out_filename, 'w') as orca_out:
orca_run = Popen([path_to_orca, inp_filename], stdout=orca_out)
orca_run.wait()
return [line for line in open(out_filename, 'r', encoding="utf-8")]
def get_orca_opt_xyzs_energy(out_lines):
"""
For a lost of ORCA output file lines find the optimised xyzs and energy
:param out_lines:
:return:
"""
opt_converged, geom_section = False, False
opt_xyzs, energy, gibbs_corr = [], None, None
for line in out_lines:
if 'THE OPTIMIZATION HAS CONVERGED' in line:
opt_converged = True
if 'CARTESIAN COORDINATES' in line and opt_converged:
geom_section = True
if geom_section and len(line.split()) == 0:
geom_section = False
if geom_section and len(line.split()) == 4:
atom_label, x, y, z = line.split()
opt_xyzs.append([atom_label, float(x), float(y), float(z)])
if 'FINAL SINGLE POINT ENERGY' in line:
energy = float(line.split()[4]) # e.g. line = 'FINAL SINGLE POINT ENERGY -4143.815610365798'
if 'G-E(el)' in line:
gibbs_corr = float(line.split()[2])
return opt_xyzs, energy, gibbs_corr
def get_orca_gibbs_corr_energy_single_atom(out_lines):
s_trans, h_total, energy = None, None, None
for line in out_lines:
if 'Translational entropy' in line:
s_trans = float(line.split()[3])
if 'Total enthalpy' in line:
h_total = float(line.split()[3])
if 'FINAL SINGLE POINT ENERGY' in line:
energy = float(line.split()[4])
# If any of the energies are not found return None
if any([e is None for e in [s_trans, h_total, energy]]):
return None, None
gibbs_corr = (h_total - s_trans) - energy
return energy, gibbs_corr
def get_orca_sp_energy(out_lines):
for line in out_lines[::-1]:
if 'FINAL SINGLE POINT ENERGY' in line:
return float(line.split()[4]) # e.g. line = 'FINAL SINGLE POINT ENERGY -4143.815610365798'
def get_atoms_in_smiles_string(smiles):
atoms = []
smiles_str_list = list(smiles)
for i, char in enumerate(smiles_str_list):
if i < len(smiles_str_list) - 1:
if char.isupper() and smiles_str_list[i+1].islower():
atoms.append(''.join(smiles_str_list[i:i+2]))
if char.isupper() and not smiles_str_list[i+1].islower():
atoms.append(char)
else:
if char.isupper():
atoms.append(char)
return atoms
def print_output(process, name, state):
return print("{:<30s}{:<50s}{:>10s}".format(process, name, state))
class Molecule(object):
def calc_gibbs(self):
self.optimise()
self.single_point()
self.set_gibbs()
def optimise(self):
print_output('Optimisation of', self.name, 'Running')
inp_filename = gen_orca_inp(mol=self, name=self.name + "_opt", opt=True, pal=self.pal)
orca_output_lines = run_orca(inp_filename, out_filename=inp_filename.replace(".inp", ".out"))
if len(self.xyzs) == 1:
self.energy, self.gibbs_corr = get_orca_gibbs_corr_energy_single_atom(out_lines=orca_output_lines)
else:
self.xyzs, self.energy, self.gibbs_corr = get_orca_opt_xyzs_energy(out_lines=orca_output_lines)
print_output('', '', 'Done')
def single_point(self):
print_output('Single point of of', self.name, 'Running')
if method_dict[level][1] is not None:
inp_filename = gen_orca_inp(mol=self, name=self.name + "_sp", sp=True, pal=self.pal)
orca_output_lines = run_orca(inp_filename, out_filename=inp_filename.replace(".inp", ".out"))
self.energy = get_orca_sp_energy(out_lines=orca_output_lines)
print_output('', '', 'Done')
def set_gibbs(self):
if self.energy is None or self.gibbs_corr is None:
self.gibbs = None
else:
self.gibbs = self.energy + self.gibbs_corr
def __init__(self, smiles, charge=0, mult=1, name="strained", pal=1):
print_output('Molecule object for', name, 'Generating')
self.smiles = smiles
self.charge = charge
self.mult = mult
self.name = name
self.energy = None
self.gibbs_corr = None
self.gibbs = None
self.obj = make_mol_obj(self.smiles)
self.xyzs = gen_conformer_xyzs(mol_obj=self.obj, conf_ids=[0])[0]
self.pal = pal
self.calc_gibbs()
print_output('', '', '')
def calc_dG_addition(strained, probe, adduct):
if any(gibbs is None for gibbs in [adduct.gibbs, strained.gibbs, probe.gibbs]):
return None
return (adduct.gibbs - (strained.gibbs + probe.gibbs))*conversion_factor
def calc_dG_stabilisation(probeH, adduct, probe, adductH):
if any(gibbs is None for gibbs in [probeH.gibbs, adduct.gibbs, probe.gibbs, adductH.gibbs]):
return None
return ((probe.gibbs + adductH.gibbs) - (probeH.gibbs + adduct.gibbs))*conversion_factor
def get_xs_ys_not_none(xs, ys):
xs_not_none, ys_not_none = [], []
for i in range(len(xs)):
if xs[i] is not None and ys[i] is not None:
xs_not_none.append(xs[i])
ys_not_none.append(ys[i])
return xs_not_none, ys_not_none
def get_xs_to_zero(xs):
if all([x < 0 for x in xs]):
return list(sorted(xs)) + [0]
else:
return list(sorted(xs))
def calc_dGs(general_adduct_smiles, charge_on_probe, probe_name, mult, strained, pal):
probe = Molecule(smiles=probe_dict[probe_name][0], name=probe_name + str(charge_on_probe),
charge=charge_on_probe, mult=mult, pal=pal)
probeH = Molecule(smiles=probe_dict[probe_name][1], name=probe_name + "H", pal=pal)
adduct = Molecule(smiles=general_adduct_smiles + probe_dict[probe_name][2],
name=strained.name + "_" + probe.name, charge=charge_on_probe, mult=mult, pal=pal)
adductH = Molecule(smiles=add_h_to_adduct(adduct_smiles=adduct.smiles),
name=strained.name + "_" + probe.name + "H", pal=pal)
dG_addition = calc_dG_addition(strained, probe, adduct)
dG_stabilisation = calc_dG_stabilisation(probeH, adduct, probe, adductH)
return dG_addition, dG_stabilisation
def calc_strain_graph(strained_smiles, general_adduct_smiles, charge_on_probe):
mult = 1
if charge_on_probe == 0:
mult = 2
strained = Molecule(smiles=strained_smiles, pal=n_procs)
general_adduct_smiles = modify_adduct_smiles(smiles_string=general_adduct_smiles)
n_processes = len(probe_dict)
n_core_per_process = int(n_procs/n_processes) if int(n_procs/n_processes) > 0 else 1
processes = int(n_processes/n_core_per_process)
with Pool(processes=processes) as pool:
results = [pool.apply_async(calc_dGs, (general_adduct_smiles, charge_on_probe, probe_name, mult, strained,
n_core_per_process))
for probe_name in probe_dict.keys()]
dGs = [res.get(timeout=None) for res in results]
xs = [val[1] for val in dGs]
ys = [val[0] for val in dGs]
return xs, ys
def plot_strain_graph(strained_smiles, general_adduct_smiles, charge_on_probe):
xs, ys = calc_strain_graph(strained_smiles, general_adduct_smiles, charge_on_probe)
plt.scatter(xs, ys)
xs_not_None, ys_not_None = get_xs_ys_not_none(xs, ys)
m, c, r, p, err = linregress(xs_not_None, ys_not_None)
xs_sq = []
for i in xs_not_None:
xs_sq.append(i**2)
intercept_err = err*np.sqrt(sum(xs_sq)/len(xs_sq))
plt.annotate("gradient = " + str(np.round(m,2)) + "$\pm$" + str(np.round(err, 2)) +
"\nstrain relief = " + str(np.round(c,1)) + "$\pm$" + str(np.round(intercept_err, 2)) +
"\n$r^2$ = " + str(np.round(np.square(r),3)), (0.6*min(xs_not_None), 0.8*min(ys_not_None)),
ha='center', va='center')
xs_to_zero = get_xs_to_zero(xs=xs_not_None)
plt.plot(xs_to_zero, np.array(xs_to_zero)*m + c, color = 'black', linestyle = 'dashed')
plt.xlabel("$\Delta G_{stab}$ / kcal mol$^{-1}$")
plt.ylabel("$\Delta G_{add}$ / kcal mol$^{-1}$")
plt.axhline(y=0, color='k', linewidth = '0.5')
plt.axvline(x=0, color='k', linewidth = '0.5')
return plt.savefig("_strain_graph.png", dpi=1200)
if __name__ == "__main__":
args = get_args()
level = args.level
maxcore = args.max_core
n_procs = args.number_processors
plot_strain_graph(strained_smiles=args.strained, general_adduct_smiles=args.adduct,
charge_on_probe=args.charge_on_probe)
| [
"argparse.ArgumentParser",
"rdkit.Chem.MolToMolBlock",
"os.path.join",
"numpy.round",
"matplotlib.pyplot.axvline",
"os.path.exists",
"re.findall",
"scipy.stats.linregress",
"matplotlib.pyplot.axhline",
"subprocess.Popen",
"numpy.square",
"multiprocessing.Pool",
"matplotlib.pyplot.ylabel",
... | [((6307, 6332), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6330, 6332), False, 'import argparse\n'), ((7534, 7567), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles_string'], {}), '(smiles_string)\n', (7552, 7567), False, 'from rdkit import Chem\n'), ((7578, 7593), 'rdkit.Chem.AddHs', 'Chem.AddHs', (['obj'], {}), '(obj)\n', (7588, 7593), False, 'from rdkit import Chem\n'), ((10251, 10286), 're.findall', 're.findall', (['pattern1', 'adduct_smiles'], {}), '(pattern1, adduct_smiles)\n', (10261, 10286), False, 'import re\n'), ((10318, 10353), 're.findall', 're.findall', (['pattern2', 'adduct_smiles'], {}), '(pattern2, adduct_smiles)\n', (10328, 10353), False, 'import re\n'), ((12586, 12614), 'os.path.exists', 'os.path.exists', (['out_filename'], {}), '(out_filename)\n', (12600, 12614), False, 'import os\n'), ((20190, 20209), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {}), '(xs, ys)\n', (20201, 20209), True, 'import matplotlib.pyplot as plt\n'), ((20291, 20327), 'scipy.stats.linregress', 'linregress', (['xs_not_None', 'ys_not_None'], {}), '(xs_not_None, ys_not_None)\n', (20301, 20327), False, 'from scipy.stats import linregress\n'), ((20941, 20991), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\Delta G_{stab}$ / kcal mol$^{-1}$"""'], {}), "('$\\\\Delta G_{stab}$ / kcal mol$^{-1}$')\n", (20951, 20991), True, 'import matplotlib.pyplot as plt\n'), ((20995, 21044), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\Delta G_{add}$ / kcal mol$^{-1}$"""'], {}), "('$\\\\Delta G_{add}$ / kcal mol$^{-1}$')\n", (21005, 21044), True, 'import matplotlib.pyplot as plt\n'), ((21048, 21092), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""k"""', 'linewidth': '"""0.5"""'}), "(y=0, color='k', linewidth='0.5')\n", (21059, 21092), True, 'import matplotlib.pyplot as plt\n'), ((21099, 21143), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0)', 'color': '"""k"""', 'linewidth': '"""0.5"""'}), "(x=0, color='k', linewidth='0.5')\n", (21110, 21143), True, 'import matplotlib.pyplot as plt\n'), ((21158, 21200), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""_strain_graph.png"""'], {'dpi': '(1200)'}), "('_strain_graph.png', dpi=1200)\n", (21169, 21200), True, 'import matplotlib.pyplot as plt\n'), ((12322, 12352), 'os.path.join', 'os.path.join', (['"""Library"""', 'level'], {}), "('Library', level)\n", (12334, 12352), False, 'import os\n'), ((19597, 19622), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'processes'}), '(processes=processes)\n', (19601, 19622), False, 'from multiprocessing import Pool\n'), ((7649, 7664), 'rdkit.Chem.AllChem.ETKDG', 'AllChem.ETKDG', ([], {}), '()\n', (7662, 7664), False, 'from rdkit.Chem import AllChem\n'), ((12381, 12425), 'os.path.join', 'os.path.join', (['"""Library"""', 'level', 'out_filename'], {}), "('Library', level, out_filename)\n", (12393, 12425), False, 'import os\n'), ((12816, 12868), 'subprocess.Popen', 'Popen', (['[path_to_orca, inp_filename]'], {'stdout': 'orca_out'}), '([path_to_orca, inp_filename], stdout=orca_out)\n', (12821, 12868), False, 'from subprocess import Popen\n'), ((8014, 8061), 'rdkit.Chem.MolToMolBlock', 'Chem.MolToMolBlock', (['mol_obj'], {'confId': 'conf_ids[i]'}), '(mol_obj, confId=conf_ids[i])\n', (8032, 8061), False, 'from rdkit import Chem\n'), ((20870, 20890), 'numpy.array', 'np.array', (['xs_to_zero'], {}), '(xs_to_zero)\n', (20878, 20890), True, 'import numpy as np\n'), ((20689, 20701), 'numpy.square', 'np.square', (['r'], {}), '(r)\n', (20698, 20701), True, 'import numpy as np\n'), ((12470, 12514), 'os.path.join', 'os.path.join', (['"""Library"""', 'level', 'out_filename'], {}), "('Library', level, out_filename)\n", (12482, 12514), False, 'import os\n'), ((20614, 20640), 'numpy.round', 'np.round', (['intercept_err', '(2)'], {}), '(intercept_err, 2)\n', (20622, 20640), True, 'import numpy as np\n'), ((20583, 20597), 'numpy.round', 'np.round', (['c', '(1)'], {}), '(c, 1)\n', (20591, 20597), True, 'import numpy as np\n'), ((20519, 20535), 'numpy.round', 'np.round', (['err', '(2)'], {}), '(err, 2)\n', (20527, 20535), True, 'import numpy as np\n'), ((20488, 20502), 'numpy.round', 'np.round', (['m', '(2)'], {}), '(m, 2)\n', (20496, 20502), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
# A histogram is a graph showing frequency distributions.
# It is a graph showing the number of observations within each given interval.
# hist() function will use an array of numbers to create a histogram
x = np.random.normal(170, 10, 250)
plt.hist(x)
plt.show() | [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.show",
"numpy.random.normal"
] | [((270, 300), 'numpy.random.normal', 'np.random.normal', (['(170)', '(10)', '(250)'], {}), '(170, 10, 250)\n', (286, 300), True, 'import numpy as np\n'), ((302, 313), 'matplotlib.pyplot.hist', 'plt.hist', (['x'], {}), '(x)\n', (310, 313), True, 'import matplotlib.pyplot as plt\n'), ((315, 325), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (323, 325), True, 'import matplotlib.pyplot as plt\n')] |
"""
===============================
Estimation of differences between directed graphs given two datasets representing two settings.
===============================
This module contains functions for estimating the differences between two causal
directed acyclic graph (DAG) models given samples from each model.
References
----------
[1] <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Direct estimation of differences in causal graphs.
In Advances in Neural Information Processing Systems (pp. 3770-3781).
[2] <NAME>. and <NAME>. (2010). Stability selection.
Journal of the Royal Statistical Society: Series B (Statistical Methodology), 72(4), pp.417-473.
"""
from .difference_ug import dci_undirected_graph
from ._utils import bootstrap_generator, edges2adjacency
from conditional_independence import partial_correlation_suffstat
from graphical_model_learning.utils.core_utils import powerset
from graphical_model_learning.utils.regression import RegressionHelper
from scipy.special import ncfdtr
from numpy.linalg import inv
import numpy as np
import itertools
from joblib import Parallel, delayed
from sklearn.utils import safe_mask
import networkx as nx
from typing import Optional, Set, Union, Dict
from tqdm import tqdm
import operator as op
import matplotlib.pyplot as plt
def dci(
X1,
X2,
alpha_ug: float = 0.01,
alpha_skeleton: float = 0.1,
alpha_orient: float = 0.1,
max_set_size: Optional[int] = 3,
difference_ug_method = 'constraint',
difference_ug: list = None,
nodes_cond_set: set = None,
max_iter: int = 1000,
edge_threshold: float = 0,
verbose: int = 0,
lam: float = 0,
progress: bool = False,
order_independent: bool = True
):
"""
Uses the Difference Causal Inference (DCI) algorithm to estimate the difference-DAG between two settings.
Parameters
----------
X1: array, shape = [n_samples, n_features]
First dataset.
X2: array, shape = [n_samples, n_features]
Second dataset.
alpha_ug: float, default = 0.01
Parameter for determining the difference undirected graph.
If difference_ug_method = 'constraint', alpha_ug is the significance level parameter (must be in [0,1] range),
with higher alpha_ug resulting in more edges in the difference undirected graph.
If difference_ug_method = 'kliep', alpha_ug is the L1 regularization parameter for estimating
the difference undirected graph via KLIEP algorithm.
alpha_skeleton: float, default = 0.1
Significance level parameter for determining presence of edges in the skeleton of the difference graph.
Lower alpha_skeleton results in sparser difference graph.
alpha_orient: float, default = 0.1
Significance level parameter for determining orientation of an edge.
Lower alpha_orient results in more directed edges in the difference-DAG.
max_set_size: int, default = 3
Maximum conditioning set size used to test regression invariance.
Smaller maximum conditioning set size results in faster computation time. For large datasets recommended max_set_size is 3.
If None, conditioning sets of all sizes will be used.
difference_ug_method: str, default = 'constraint'
Method for computing the undirected difference graph. Must be 'constraint' for constraint-based
method or 'kliep' for KLIEP.
difference_ug: list, default = None
List of tuples that represents edges in the difference undirected graph. If difference_ug is None,
constraint-based or KLIEP algorithm for estimating the difference undirected graph will be run.
If the number of nodes is small, difference_ug could be taken to be the complete graph between all the nodes.
nodes_cond_set: set, default = None
Nodes to be considered as conditioning sets. If nodes_cond_set is None,
constraint-based or KLIEP algorithm for estimating the difference undirected graph will be run.
If the number of nodes is small, , default = None could be taken to be all the nodes.
max_iter: int, default = 1000
Maximum number of iterations for gradient descent in KLIEP algorithm.
edge_threshold: float, default = 0
Edge weight cutoff for keeping an edge for KLIEP algorithm (all edges above or equal to this threshold are kept).
verbose: int, default = 0
The verbosity level of logging messages.
lam: float, default = 0
Amount of regularization for regression (becomes ridge regression if nonzero).
progress: bool, default = False
Whether to show DCI progress bar.
order_independent: bool = True
For orientation phase of DCI, whether to use DCI where all nodes are considered at each level of
the conditioning set size, making the output pf DCI independent of node order (recommended) or
whether to use the original DCI algorithm that iterates over nodes and thus may depend
on the order of the nodes (this may lead to incosistent or biased results).
See Also
--------
dci_undirected_graph, dci_skeleton, dci_orient
Returns
-------
adjacency_matrix: array, shape = [n_features, n_features]
Estimated difference-DAG. Edges that were found to be different between two settings but the orientation
could not be determined, are represented by assigning 1 in both directions, i.e. adjacency_matrix[i,j] = 1
and adjacency_matrix[j,i] = 1. Otherwise for oriented edges, only adjacency_matrix[i,j] = 1 is assigned.
Assignment of 0 in the adjacency matrix represents no edge.
References
----------
[1] <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Direct estimation of differences in causal graphs.
In Advances in Neural Information Processing Systems (pp. 3770-3781).
"""
assert 0 <= alpha_skeleton <= 1, "alpha_skeleton must be in [0,1] range."
assert 0 <= alpha_orient <= 1, "alpha_orient must be in [0,1] range."
num_nodes = X1.shape[1]
# obtain sufficient statistics
suffstat1 = partial_correlation_suffstat(X1)
suffstat2 = partial_correlation_suffstat(X2)
rh1 = RegressionHelper(suffstat1)
rh2 = RegressionHelper(suffstat2)
# compute the difference undirected graph via KLIEP or constraint-based method
# if the differece_ug or nodes_cond_set is not provided
if difference_ug is None or nodes_cond_set is None:
difference_ug, nodes_cond_set = dci_undirected_graph(
X1,
X2,
difference_ug_method = difference_ug_method,
alpha=alpha_ug,
max_iter=max_iter,
edge_threshold=edge_threshold,
verbose=verbose)
# estimate the skeleton of the difference-DAG
skeleton = dci_skeleton(
X1,
X2,
difference_ug,
nodes_cond_set,
rh1=rh1,
rh2=rh2,
alpha=alpha_skeleton,
max_set_size=max_set_size,
verbose=verbose,
lam=lam,
progress=progress
)
if verbose > 0: print(f"{len(skeleton)} edges in the difference skeleton")
# orient edges of the skeleton of the difference-DAG
orient_algorithm = dci_orient_order_dependent if not order_independent else dci_orient
adjacency_matrix = orient_algorithm(
X1,
X2,
skeleton,
nodes_cond_set,
rh1=rh1,
rh2=rh2,
alpha=alpha_orient,
max_set_size=max_set_size,
verbose=verbose
)
return adjacency_matrix
def dci_skeleton_multiple(
X1,
X2,
alpha_skeleton_grid: list = [0.1, 0.5],
max_set_size: int = 3,
difference_ug: list = None,
nodes_cond_set: set = None,
rh1: RegressionHelper = None,
rh2: RegressionHelper = None,
verbose: int = 0,
lam: float = 0,
progress: bool = False,
true_diff: Optional[Set] = None
):
if verbose > 0:
print("DCI skeleton estimation...")
if rh1 is None or rh2 is None:
# obtain sufficient statistics
suffstat1 = partial_correlation_suffstat(X1)
suffstat2 = partial_correlation_suffstat(X2)
rh1 = RegressionHelper(suffstat1)
rh2 = RegressionHelper(suffstat2)
n1 = rh1.suffstat['n']
n2 = rh2.suffstat['n']
for alpha in alpha_skeleton_grid:
assert 0 <= alpha <= 1, "alpha must be in [0,1] range."
min_alpha = min(alpha_skeleton_grid)
skeletons = {alpha: {(i, j) for i, j in difference_ug} for alpha in alpha_skeleton_grid}
difference_ug = tqdm(difference_ug) if (progress and len(difference_ug) != 0) else difference_ug
for i, j in difference_ug:
for cond_set in powerset(nodes_cond_set - {i, j}, r_max=max_set_size):
cond_set_i, cond_set_j = [*cond_set, j], [*cond_set, i]
# calculate regression coefficients (j regressed on cond_set_j) for both datasets
beta1_i, var1_i, precision1 = rh1.regression(i, cond_set_i, lam=lam)
beta2_i, var2_i, precision2 = rh2.regression(i, cond_set_i, lam=lam)
# compute statistic and p-value
j_ix = cond_set_i.index(j)
stat_i = (beta1_i[j_ix] - beta2_i[j_ix]) ** 2 * \
inv(var1_i * precision1 / (n1 - 1) + var2_i * precision2 / (n2 - 1))[j_ix, j_ix]
pval_i = 1 - ncfdtr(1, n1 + n2 - len(cond_set_i) - len(cond_set_j), 0, stat_i)
# remove i-j from skeleton if i regressed on (j, cond_set) is invariant
i_invariant = pval_i > min_alpha
if i_invariant:
removed_alphas = [alpha for alpha in alpha_skeleton_grid if pval_i > alpha]
if verbose > 1:
print(
f"Removing edge {j}->{i} for alpha={removed_alphas} since p-value={pval_i:.5f} with cond set {cond_set_i}")
for alpha in removed_alphas:
skeletons[alpha].discard((i, j))
if true_diff is not None:
if (i, j) in true_diff or (j, i) in true_diff:
print(
f"Incorrectly removing edge {j}->{i} for alpha={removed_alphas} since p-value={pval_i:.6f} with cond set {cond_set_i}")
if len(removed_alphas) == len(alpha_skeleton_grid):
break
elif verbose > 1:
print(f"Keeping edge {i}-{j} for now, since p-value={pval_i:.5f} with cond set {cond_set_i}")
# calculate regression coefficients (i regressed on cond_set_i) for both datasets
beta1_j, var1_j, precision1 = rh1.regression(j, cond_set_j)
beta2_j, var2_j, precision2 = rh2.regression(j, cond_set_j)
# compute statistic and p-value
i_ix = cond_set_j.index(i)
stat_j = (beta1_j[i_ix] - beta2_j[i_ix]) ** 2 * \
inv(var1_j * precision1 / (n1 - 1) + var2_j * precision2 / (n2 - 1))[i_ix, i_ix]
pval_j = 1 - ncfdtr(1, n1 + n2 - len(cond_set_i) - len(cond_set_j), 0, stat_j)
# remove i-j from skeleton if j regressed on (i, cond_set) is invariant
j_invariant = pval_j > min_alpha
if j_invariant:
removed_alphas = [alpha for alpha in alpha_skeleton_grid if pval_j > alpha]
if verbose > 1:
print(
f"Removing edge {i}->{j} for alpha={removed_alphas} since p-value={pval_j:.5f} with cond set {cond_set_j}")
for alpha in removed_alphas:
skeletons[alpha].discard((i, j))
if true_diff is not None:
if (i, j) in true_diff or (j, i) in true_diff:
print(
f"Incorrectly removing edge {j}->{i} for alpha={removed_alphas} since p-value={pval_j:.6f} with cond set {cond_set_i}")
if len(removed_alphas) == len(alpha_skeleton_grid):
break
elif verbose > 1:
print(f"Keeping edge {i}-{j} for now, since p-value={pval_j:.5f}with cond set {cond_set_j}")
return skeletons
def dci_multiple(
X1: np.ndarray,
X2: np.ndarray,
alpha_skeleton_grid: list = [0.1, 0.5],
alpha_orient_grid: list = [.1],
max_set_size: int = 3,
difference_ug: list = None,
nodes_cond_set: set = None,
edge_threshold: float = 0.05,
sample_fraction: float = 0.7,
n_bootstrap_iterations: int = 50,
alpha_ug: float = 1.,
max_iter: int = 1000,
n_jobs: int = 1,
random_state: int = None,
verbose: int = 0,
lam: float = 0,
true_diff: Optional[Set] = None,
difference_ug_method: str = 'constraint'
):
if difference_ug is None or nodes_cond_set is None:
difference_ug, nodes_cond_set = dci_undirected_graph(
X1,
X2,
difference_ug_method = difference_ug_method,
alpha=alpha_ug,
max_iter=max_iter,
edge_threshold=edge_threshold,
verbose=verbose)
if verbose > 0:
print(f"{len(difference_ug)} edges in the difference UG, over {len(nodes_cond_set)} nodes")
if true_diff:
difference_ug = {frozenset({i, j}) for i, j in difference_ug}
true_skel = {frozenset({i, j}) for i, j in true_diff}
print(f"in difference UG: {len(true_skel - difference_ug)} false negatives, {len(difference_ug - true_skel)} false positives")
print(f"{len(difference_ug)} edges in the difference UG, over {len(nodes_cond_set)} nodes")
bootstrap_samples1 = list(bootstrap_generator(n_bootstrap_iterations, sample_fraction, X1, random_state=random_state))
bootstrap_samples2 = list(bootstrap_generator(n_bootstrap_iterations, sample_fraction, X2, random_state=random_state))
skeleton_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(dci_skeleton_multiple)(
X1[safe_mask(X1, subsample1), :],
X2[safe_mask(X2, subsample2), :],
alpha_skeleton_grid=alpha_skeleton_grid,
max_set_size=max_set_size,
difference_ug=difference_ug,
nodes_cond_set=nodes_cond_set,
verbose=verbose,
lam=lam,
true_diff=true_diff)
for subsample1, subsample2 in zip(bootstrap_samples1, bootstrap_samples2)
)
p = X1.shape[1]
alpha2adjacency_skeleton = {alpha: np.zeros([p, p]) for alpha in alpha_skeleton_grid}
for res in skeleton_results:
for alpha in alpha_skeleton_grid:
alpha2adjacency_skeleton[alpha] += 1 / n_bootstrap_iterations * edges2adjacency(X1.shape[1], res[alpha],
undirected=True)
alpha2adjacency_oriented = dict()
for alpha_orient in alpha_orient_grid:
orientation_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(dci_orient)(
X1[safe_mask(X1, subsample1), :],
X2[safe_mask(X1, subsample2), :],
skeleton,
nodes_cond_set=nodes_cond_set,
alpha=alpha_orient,
max_set_size=max_set_size,
verbose=verbose)
for subsample1, subsample2, skeleton in zip(bootstrap_samples1, bootstrap_samples2, skeleton_results)
)
for alpha_skel in alpha_skeleton_grid:
bootstrap_amat = 1/n_bootstrap_iterations * sum([
orientation_results[i][alpha_skel] for i in range(n_bootstrap_iterations)
])
alpha2adjacency_oriented[(alpha_skel, alpha_orient)] = bootstrap_amat
return alpha2adjacency_skeleton, alpha2adjacency_oriented
def dci_skeletons_bootstrap_multiple(
X1,
X2,
alpha_skeleton_grid: list = [0.1, 0.5],
max_set_size: int = 3,
difference_ug: list = None,
nodes_cond_set: set = None,
edge_threshold: float = 0.05,
sample_fraction: float = 0.7,
n_bootstrap_iterations: int = 50,
alpha_ug: float = 1.,
max_iter: int = 1000,
n_jobs: int = 1,
random_state: int = None,
verbose: int = 0,
lam: float = 0,
true_diff: Optional[Set] = None
):
if difference_ug is None or nodes_cond_set is None:
difference_ug, nodes_cond_set = dci_undirected_graph(
X1,
X2,
difference_ug_method = difference_ug_method,
alpha=alpha_ug,
max_iter=max_iter,
edge_threshold=edge_threshold,
verbose=verbose)
if verbose > 0: print(f"{len(difference_ug)} edges in the difference UG, over {len(nodes_cond_set)} nodes")
bootstrap_samples1 = bootstrap_generator(n_bootstrap_iterations, sample_fraction, X1, random_state=random_state)
bootstrap_samples2 = bootstrap_generator(n_bootstrap_iterations, sample_fraction, X2, random_state=random_state)
bootstrap_results = Parallel(n_jobs, verbose=verbose)(
delayed(dci_skeleton_multiple)(
X1[safe_mask(X1, subsample1), :],
X2[safe_mask(X2, subsample2), :],
alpha_skeleton_grid=alpha_skeleton_grid,
max_set_size=max_set_size,
difference_ug=difference_ug,
nodes_cond_set=nodes_cond_set,
verbose=verbose,
lam=lam, true_diff=true_diff)
for subsample1, subsample2 in zip(bootstrap_samples1, bootstrap_samples2))
p = X1.shape[1]
alpha2adjacency = {alpha: np.zeros([p, p]) for alpha in alpha_skeleton_grid}
for res in bootstrap_results:
for alpha in alpha_skeleton_grid:
alpha2adjacency[alpha] += 1 / n_bootstrap_iterations * edges2adjacency(X1.shape[1], res[alpha],
undirected=True)
return bootstrap_results, alpha2adjacency
def dci_orient_bootstrap_multiple(
X1,
X2,
skeletons: Union[Dict[float, set], set],
alpha_orient_grid: list = [0.001, 0.1],
max_set_size: int = 3,
nodes_cond_set: set = None,
sample_fraction: float = 0.7,
n_bootstrap_iterations: int = 50,
bootstrap_threshold: float = 0.5,
n_jobs: int = 1,
random_state: int = None,
verbose: int = 0
):
bootstrap_samples1 = bootstrap_generator(n_bootstrap_iterations, sample_fraction, X1, random_state=random_state)
bootstrap_samples2 = bootstrap_generator(n_bootstrap_iterations, sample_fraction, X2, random_state=random_state)
_, n_variables = X1.shape
n_params = len(alpha_orient_grid)
stability_scores = np.zeros((n_params, n_variables, n_variables))
alpha2adjacency_oriented = dict()
for idx, alpha_orient in enumerate(alpha_orient_grid):
orientation_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(dci_orient)(
X1[safe_mask(X1, subsample1), :],
X2[safe_mask(X1, subsample2), :],
skeletons=skeletons,
nodes_cond_set=nodes_cond_set,
alpha=alpha_orient,
max_set_size=max_set_size,
verbose=verbose)
for subsample1, subsample2 in zip(bootstrap_samples1, bootstrap_samples2)
)
stability_scores[idx] = np.array(orientation_results).mean(axis=0)
adjacency_matrix = choose_stable_variables(stability_scores, bootstrap_threshold=bootstrap_threshold)
return adjacency_matrix, stability_scores
def dci_stability_selection(
X1,
X2,
alpha_ug_grid: list = [0.1, 1, 10],
alpha_skeleton_grid: list = [0.1, 0.5],
alpha_orient_grid: list = [0.001, 0.1],
max_set_size: int = 3,
difference_ug: list = None,
nodes_cond_set: set = None,
max_iter: int = 1000,
edge_threshold: float = 0.05,
sample_fraction: float = 0.7,
n_bootstrap_iterations: int = 50,
bootstrap_threshold: float = 0.5,
n_jobs: int = 1,
random_state: int = None,
verbose: int = 0,
lam: float = 0,
order_independent: bool = True
):
"""
Runs Difference Causal Inference (DCI) algorithm with stability selection to estimate the difference-DAG between two settings.
Bootstrap samples are generated from two input datasets and DCI is run across bootstrap samples and across different
combinations of hyperparameters. Edges that reliably appear across different runs are considered stable and are output as the difference-DAG.
Parameters
----------
X1: array, shape = [n_samples, n_features]
First dataset.
X2: array, shape = [n_samples, n_features]
Second dataset.
alpha_ug_grid: array-like, default = [0.1, 1, 10]
Grid of values to iterate over representing L1 regularization parameter for estimating the difference undirected graph via KLIEP algorithm.
alpha_skeleton_grid: array-like, default = [0.1, 0.5]
Grid of values to iterate over representing significance level parameter for determining presence of edges in the skeleton of the difference graph.
Lower alpha_skeleton results in sparser difference graph.
alpha_orient_grid: array-like, default = [0.001, 0.1]
Grid of values to iterate over representing significance level parameter for determining orientation of an edge.
Lower alpha_orient results in more directed edges in the difference-DAG.
max_set_size: int, default = 3
Maximum conditioning set size used to test regression invariance.
Smaller maximum conditioning set size results in faster computation time. For large datasets recommended max_set_size is 3.
difference_ug: list, default = None
List of tuples that represents edges in the difference undirected graph. If difference_ug is None,
KLIEP algorithm for estimating the difference undirected graph will be run.
If the number of nodes is small, difference_ug could be taken to be the complete graph between all the nodes.
nodes_cond_set: set
Nodes to be considered as conditioning sets.
max_iter: int, default = 1000
Maximum number of iterations for gradient descent in KLIEP algorithm.
edge_threshold: float, default = 0.05
Edge weight cutoff for keeping an edge for KLIEP algorithm (all edges above or equal to this threshold are kept).
sample_fraction: float, default = 0.7
The fraction of samples to be used in each bootstrap sample.
Should be between 0 and 1. If 1, all samples are used.
n_bootstrap_iterations: int, default = 50
Number of bootstrap samples to create.
bootstrap_threshold: float, default = 0.5
Threshold defining the minimum cutoff value for the stability scores. Edges with stability scores above
the bootstrap_threshold are kept as part of the difference-DAG.
n_jobs: int, default = 1
Number of jobs to run in parallel.
random_state: int, default = None
Seed used by the random number generator.
verbose: int, default = 0
The verbosity level of logging messages.
lam: float, default = 0
Amount of regularization for regression (becomes ridge regression if nonzero).
See Also
--------
dci, dci_undirected_graph, dci_skeleton, dci_orient
Returns
-------
adjacency_matrix: array, shape = [n_features, n_features]
Estimated difference-DAG. Edges that were found to be different between two settings but the orientation
could not be determined, are represented by assigning 1 in both directions, i.e. adjacency_matrix[i,j] = 1
and adjacency_matrix[j,i] = 1. Otherwise for oriented edges, only adjacency_matrix[i,j] = 1 is assigned.
Assignment of 0 in the adjacency matrix represents no edge.
stability_scores: array, shape = [n_params, n_features, n_features]
Stability score of each edge for for each combination of hyperparameters.
References
----------
[1] <NAME>., <NAME>., <NAME>., & <NAME>. (2018). Direct estimation of differences in causal graphs.
In Advances in Neural Information Processing Systems (pp. 3770-3781).
[2] <NAME>. and <NAME>. (2010). Stability selection.
Journal of the Royal Statistical Society: Series B (Statistical Methodology), 72(4), pp.417-473.
"""
_, n_variables = X1.shape
n_params = len(alpha_ug_grid) * len(alpha_skeleton_grid) * len(alpha_orient_grid)
hyperparams = itertools.product(alpha_ug_grid, alpha_skeleton_grid, alpha_orient_grid)
stability_scores = np.zeros((n_params, n_variables, n_variables))
for idx, params in enumerate(hyperparams):
if verbose > 0:
print(
"Fitting estimator for alpha_ug = %.5f, alpha_skeleton = %.5f, alpha_orient = %.5f with %d bootstrap iterations" %
(params[0], params[1], params[2], n_bootstrap_iterations))
bootstrap_samples1 = bootstrap_generator(n_bootstrap_iterations, sample_fraction,
X1, random_state=random_state)
bootstrap_samples2 = bootstrap_generator(n_bootstrap_iterations, sample_fraction,
X2, random_state=random_state)
bootstrap_results = Parallel(n_jobs, verbose=verbose
)(delayed(dci)(X1[safe_mask(X1, subsample1), :],
X2[safe_mask(X2, subsample2), :],
alpha_ug=params[0],
alpha_skeleton=params[1],
alpha_orient=params[2],
max_set_size=max_set_size,
difference_ug=difference_ug,
nodes_cond_set=nodes_cond_set,
max_iter=max_iter,
edge_threshold=edge_threshold,
verbose=verbose,
lam=lam,
order_independent=order_independent)
for subsample1, subsample2 in zip(bootstrap_samples1, bootstrap_samples2))
stability_scores[idx] = np.array(bootstrap_results).mean(axis=0)
adjacency_matrix = choose_stable_variables(stability_scores, bootstrap_threshold=bootstrap_threshold)
return adjacency_matrix, stability_scores
def choose_stable_variables(stability_scores, bootstrap_threshold=0.5):
"""Returns adjacency matrix corresponding to edges with stability scores above threshold."""
return (stability_scores.max(axis=0) > bootstrap_threshold).astype('float')
def dci_skeleton(
X1,
X2,
difference_ug: list,
nodes_cond_set: set,
rh1: RegressionHelper = None,
rh2: RegressionHelper = None,
alpha: float = 0.1,
max_set_size: int = 3,
verbose: int = 0,
lam: float = 0,
progress: bool = False
):
"""
Estimates the skeleton of the difference-DAG.
Parameters
----------
X1: array, shape = [n_samples, n_features]
First dataset.
X2: array, shape = [n_samples, n_features]
Second dataset.
difference_ug: list
List of tuples that represents edges in the difference undirected graph.
nodes_cond_set: set
Nodes to be considered as conditioning sets.
rh1: RegressionHelper, default = None
Sufficient statistics estimated based on samples in the first dataset, stored in RegressionHelper class.
rh2: RegressionHelper, default = None
Sufficient statistics estimated based on samples in the second dataset, stored in RegressionHelper class.
alpha: float, default = 0.1
Significance level parameter for determining presence of edges in the skeleton of the difference graph.
Lower alpha results in sparser difference graph.
max_set_size: int, default = 3
Maximum conditioning set size used to test regression invariance.
Smaller maximum conditioning set size results in faster computation time. For large datasets recommended max_set_size is 3.
verbose: int, default = 0
The verbosity level of logging messages.
lam: float, default = 0
Amount of regularization for regression (becomes ridge regression if nonzero).
See Also
--------
dci, dci_undirected_graph, dci_orient
Returns
-------
skeleton: set
Set of edges in the skeleton of the difference-DAG.
"""
if verbose > 0:
print("DCI skeleton estimation...")
assert 0 <= alpha <= 1, "alpha must be in [0,1] range."
if rh1 is None or rh2 is None:
# obtain sufficient statistics
suffstat1 = partial_correlation_suffstat(X1)
suffstat2 = partial_correlation_suffstat(X2)
rh1 = RegressionHelper(suffstat1)
rh2 = RegressionHelper(suffstat2)
n1 = rh1.suffstat['n']
n2 = rh2.suffstat['n']
skeleton = {(i, j) for i, j in difference_ug}
difference_ug = tqdm(difference_ug) if (progress and len(difference_ug) != 0) else difference_ug
for i, j in difference_ug:
for cond_set in powerset(nodes_cond_set - {i, j}, r_max=max_set_size):
cond_set_i, cond_set_j = [*cond_set, j], [*cond_set, i]
# calculate regression coefficients (j regressed on cond_set_j) for both datasets
beta1_i, var1_i, precision1 = rh1.regression(i, cond_set_i, lam=lam)
beta2_i, var2_i, precision2 = rh2.regression(i, cond_set_i, lam=lam)
# compute statistic and p-value
j_ix = cond_set_i.index(j)
stat_i = (beta1_i[j_ix] - beta2_i[j_ix]) ** 2 * \
inv(var1_i * precision1 / (n1 - 1) + var2_i * precision2 / (n2 - 1))[j_ix, j_ix]
pval_i = 1 - ncfdtr(1, n1 + n2 - len(cond_set_i) - len(cond_set_j), 0, stat_i)
# remove i-j from skeleton if i regressed on (j, cond_set) is invariant
i_invariant = pval_i > alpha
if i_invariant:
if verbose > 1:
print(
f"Removing edge {j}->{i} since p-value={pval_i:.5f} > alpha={alpha:.5f} with cond set {cond_set_i}")
skeleton.remove((i, j))
break
elif verbose > 1:
print(
f"Keeping edge {i}-{j} for now, since p-value={pval_i:.5f} < alpha={alpha:.5f} with cond set {cond_set_i}")
# calculate regression coefficients (i regressed on cond_set_i) for both datasets
beta1_j, var1_j, precision1 = rh1.regression(j, cond_set_j)
beta2_j, var2_j, precision2 = rh2.regression(j, cond_set_j)
# compute statistic and p-value
i_ix = cond_set_j.index(i)
stat_j = (beta1_j[i_ix] - beta2_j[i_ix]) ** 2 * \
inv(var1_j * precision1 / (n1 - 1) + var2_j * precision2 / (n2 - 1))[i_ix, i_ix]
pval_j = 1 - ncfdtr(1, n1 + n2 - len(cond_set_i) - len(cond_set_j), 0, stat_j)
# remove i-j from skeleton if j regressed on (i, cond_set) is invariant
j_invariant = pval_j > alpha
if j_invariant:
if verbose > 1:
print(
f"Removing edge {i}->{j} since p-value={pval_j:.5f} > alpha={alpha:.5f} with cond set {cond_set_j}")
skeleton.remove((i, j))
break
elif verbose > 1:
print(
f"Keeping edge {i}-{j} for now, since p-value={pval_j:.5f} < alpha={alpha:.5f} with cond set {cond_set_j}")
return skeleton
def dci_orient(
X1,
X2,
skeletons: Union[Dict[float, set], set],
nodes_cond_set: set,
rh1: RegressionHelper = None,
rh2: RegressionHelper = None,
alpha: float = 0.1,
max_set_size: int = 3,
verbose: int = 0
):
"""
Orients edges in the skeleton of the difference DAG by simultaneously considering all nodes at each level of conditioning set size.
Parameters
----------
X1: array, shape = [n_samples, n_features]
First dataset.
X2: array, shape = [n_samples, n_features]
Second dataset.
skeletons: set or dictionary of float-set pairs
Set of edges in the skeleton of the difference-DAG or a dictionary mapping hyperparamters (of the skeleton phase) to the set of edges corresponding to the skeleton.
nodes_cond_set: set
Nodes to be considered as conditioning sets.
rh1: RegressionHelper, default = None
Sufficient statistics estimated based on samples in the first dataset, stored in RegressionHelper class.
rh2: RegressionHelper, default = None
Sufficient statistics estimated based on samples in the second dataset, stored in RegressionHelper class.
alpha: float, default = 0.1
Significance level parameter for determining orientation of an edge.
Lower alpha results in more directed edges in the difference-DAG.
max_set_size: int, default = 3
Maximum conditioning set size used to test regression invariance.
Smaller maximum conditioning set size results in faster computation time. For large datasets recommended max_set_size is 3.
verbose: int, default = 0
The verbosity level of logging messages.
See Also
--------
dci, dci_undirected_graph, dci_skeleton
Returns
-------
adjacency_matrix: array, shape = [n_features, n_features]
Estimated difference-DAG. Edges that were found to be different between two settings but the orientation
could not be determined, are represented by assigning 1 in both directions, i.e. adjacency_matrix[i,j] = 1
and adjacency_matrix[j,i] = 1. Otherwise for oriented edges, only adjacency_matrix[i,j] = 1 is assigned.
Assignment of 0 in the adjacency matrix represents no edge.
"""
if verbose > 0:
print("DCI edge orientation...")
assert 0 <= alpha <= 1, "alpha must be in [0,1] range."
if rh1 is None or rh2 is None:
# obtain sufficient statistics
suffstat1 = partial_correlation_suffstat(X1)
suffstat2 = partial_correlation_suffstat(X2)
rh1 = RegressionHelper(suffstat1)
rh2 = RegressionHelper(suffstat2)
if isinstance(skeletons, dict):
return {
alpha: dci_orient(
X1,
X2,
skeleton,
nodes_cond_set,
rh1,
rh2,
alpha=alpha,
max_set_size=max_set_size
)
for alpha, skeleton in skeletons.items()
}
skeleton = {frozenset({i, j}) for i, j in skeletons}
nodes = {i for i, j in skeleton} | {j for i, j in skeleton}
d_nx = nx.DiGraph()
d_nx.add_nodes_from(nodes)
nodes_with_decided_parents = set()
n1 = rh1.suffstat['n']
n2 = rh2.suffstat['n']
for parent_set_size in range(max_set_size + 2):
if verbose > 0: print(f"Trying parent sets of size {parent_set_size}")
pvalue_dict = dict()
for i in nodes - nodes_with_decided_parents:
for cond_i in itertools.combinations(nodes_cond_set - {i}, parent_set_size):
beta1_i, var1_i, _ = rh1.regression(i, list(cond_i))
beta2_i, var2_i, _ = rh2.regression(i, list(cond_i))
pvalue_i = ncfdtr(n1 - parent_set_size, n2 - parent_set_size, 0, var1_i / var2_i)
pvalue_i = 2 * min(pvalue_i, 1 - pvalue_i)
pvalue_dict[(i, frozenset(cond_i))] = pvalue_i
# sort p-value dict
sorted_pvalue_dict = [
(pvalue, i, cond_i)
for (i, cond_i), pvalue in sorted(pvalue_dict.items(), key=op.itemgetter(1), reverse=True)
if pvalue > alpha
]
while sorted_pvalue_dict:
_, i, cond_i = sorted_pvalue_dict.pop(0)
i_children = {j for j in nodes - cond_i - {i} if frozenset({i, j}) in skeleton}
# don't use this parent set if it contradicts the existing edges
if any(j in d_nx.successors(i) for j in cond_i):
continue
if any(j in d_nx.predecessors(i) for j in i_children):
continue
# don't use this parent set if it creates a cycle
if any(j in nx.descendants(d_nx, i) for j in cond_i):
continue
if any(j in nx.ancestors(d_nx, i) for j in i_children):
continue
edges = {(j, i) for j in cond_i if frozenset({i, j}) in skeleton} | \
{(i, j) for j in nodes - cond_i - {i} if frozenset({i, j}) in skeleton}
nodes_with_decided_parents.add(i)
if verbose > 0: print(f"Adding {edges}")
d_nx.add_edges_from(edges)
# orient edges via graph traversal
oriented_edges = set(d_nx.edges)
unoriented_edges_before_traversal = skeleton - {frozenset({j, i}) for i, j in oriented_edges}
unoriented_edges = unoriented_edges_before_traversal.copy()
g = nx.DiGraph()
for i, j in oriented_edges:
g.add_edge(i, j)
g.add_nodes_from(nodes)
for i, j in unoriented_edges_before_traversal:
chain_path = list(nx.all_simple_paths(g, source=i, target=j))
if len(chain_path) > 0:
oriented_edges.add((i, j))
unoriented_edges.remove(frozenset({i, j}))
if verbose > 0:
print("Oriented (%d, %d) as %s with graph traversal" % (i, j, (i, j)))
else:
chain_path = list(nx.all_simple_paths(g, source=j, target=i))
if len(chain_path) > 0:
oriented_edges.add((j, i))
unoriented_edges.remove(frozenset({i, j}))
if verbose > 0:
print("Oriented (%d, %d) as %s with graph traversal" % (i, j, (j, i)))
# form an adjacency matrix containing directed and undirected edges
num_nodes = X1.shape[1]
adjacency_matrix = edges2adjacency(num_nodes, unoriented_edges, undirected=True) + edges2adjacency(num_nodes,
oriented_edges,
undirected=False)
return adjacency_matrix
def dci_orient_order_dependent(
X1,
X2,
skeletons: Union[Dict[float, set], set],
nodes_cond_set: set,
rh1: RegressionHelper = None,
rh2: RegressionHelper = None,
alpha: float = 0.1,
max_set_size: int = 3,
verbose: int = 0
):
"""
Orients edges in the skeleton of the difference DAG by iterating over nodes.
Parameters
----------
X1: array, shape = [n_samples, n_features]
First dataset.
X2: array, shape = [n_samples, n_features]
Second dataset.
skeletons: set or dictionary of float-set pairs
Set of edges in the skeleton of the difference-DAG.
nodes_cond_set: set
Nodes to be considered as conditioning sets.
rh1: RegressionHelper, default = None
Sufficient statistics estimated based on samples in the first dataset, stored in RegressionHelper class.
rh2: RegressionHelper, default = None
Sufficient statistics estimated based on samples in the second dataset, stored in RegressionHelper class.
alpha: float, default = 0.1
Significance level parameter for determining orientation of an edge.
Lower alpha results in more directed edges in the difference-DAG.
max_set_size: int, default = 3
Maximum conditioning set size used to test regression invariance.
Smaller maximum conditioning set size results in faster computation time. For large datasets recommended max_set_size is 3.
verbose: int, default = 0
The verbosity level of logging messages.
See Also
--------
dci, dci_undirected_graph, dci_skeleton
Returns
-------
adjacency_matrix: array, shape = [n_features, n_features]
Estimated difference-DAG. Edges that were found to be different between two settings but the orientation
could not be determined, are represented by assigning 1 in both directions, i.e. adjacency_matrix[i,j] = 1
and adjacency_matrix[j,i] = 1. Otherwise for oriented edges, only adjacency_matrix[i,j] = 1 is assigned.
Assignment of 0 in the adjacency matrix represents no edge.
"""
if verbose > 0:
print("DCI edge orientation...")
assert 0 <= alpha <= 1, "alpha must be in [0,1] range."
if rh1 is None or rh2 is None:
# obtain sufficient statistics
suffstat1 = partial_correlation_suffstat(X1)
suffstat2 = partial_correlation_suffstat(X2)
rh1 = RegressionHelper(suffstat1)
rh2 = RegressionHelper(suffstat2)
nodes = {i for i, j in skeleton} | {j for i, j in skeleton}
oriented_edges = set()
n1 = rh1.suffstat['n']
n2 = rh2.suffstat['n']
for i, j in skeleton:
for cond_i, cond_j in zip(powerset(nodes_cond_set - {i}, r_max=max_set_size),
powerset(nodes_cond_set - {j}, r_max=max_set_size)):
# compute residual variances for i
beta1_i, var1_i, _ = rh1.regression(i, list(cond_i))
beta2_i, var2_i, _ = rh2.regression(i, list(cond_i))
# compute p-value for invariance of residual variances for i
pvalue_i = ncfdtr(n1 - len(cond_i), n2 - len(cond_i), 0, var1_i / var2_i)
pvalue_i = 2 * min(pvalue_i, 1 - pvalue_i)
# compute residual variances for j
beta1_j, var1_j, _ = rh1.regression(j, list(cond_j))
beta2_j, var2_j, _ = rh2.regression(j, list(cond_j))
# compute p-value for invariance of residual variances for j
pvalue_j = ncfdtr(n1 - len(cond_j), n2 - len(cond_j), 0, var1_j / var2_j)
pvalue_j = 2 * min(pvalue_j, 1 - pvalue_j)
if ((pvalue_i > alpha) | (pvalue_j > alpha)):
# orient the edge according to highest p-value
if pvalue_i > pvalue_j:
edge = (j, i) if j in cond_i else (i, j)
pvalue_used = pvalue_i
else:
edge = (i, j) if i in cond_j else (j, i)
pvalue_used = pvalue_j
oriented_edges.add(edge)
if verbose > 0:
print("Oriented (%d, %d) as %s since p-value=%.5f > alpha=%.5f" % (i, j, edge, pvalue_used, alpha))
break
# orient edges via graph traversal
unoriented_edges_before_traversal = skeleton - oriented_edges - {(j, i) for i, j in oriented_edges}
unoriented_edges = unoriented_edges_before_traversal.copy()
g = nx.DiGraph()
for i, j in oriented_edges:
g.add_edge(i, j)
g.add_nodes_from(nodes)
for i, j in unoriented_edges_before_traversal:
chain_path = list(nx.all_simple_paths(g, source=i, target=j))
if len(chain_path) > 0:
oriented_edges.add((i, j))
unoriented_edges.remove((i, j))
if verbose > 0:
print("Oriented (%d, %d) as %s with graph traversal" % (i, j, (i, j)))
else:
chain_path = list(nx.all_simple_paths(g, source=j, target=i))
if len(chain_path) > 0:
oriented_edges.add((j, i))
unoriented_edges.remove((i, j))
if verbose > 0:
print("Oriented (%d, %d) as %s with graph traversal" % (i, j, (j, i)))
# form an adjacency matrix containing directed and undirected edges
num_nodes = X1.shape[1]
adjacency_matrix = edges2adjacency(num_nodes, unoriented_edges, undirected=True) + edges2adjacency(num_nodes,
oriented_edges,
undirected=False)
return adjacency_matrix
def get_directed_and_undirected_edges(adjacency_matrix):
"""
Given an adjacency matrix, which contains both directed and undirected edges,
this function returns two adjancy matrices containing directed and undirected edges separately.
Useful for plotting the difference causal graph.
Parameters
----------
adjacency_matrix: array, shape = [num_nodes, num_nodes]
Adjacency matrix representing partially directed acyclic graph,
which containts both undirected and directed edges.
Each entry should be either 0 or 1, representing absence or presence of an edge, respectively.
Returns
-------
adjacency_matrix_directed: array, shape = [num_nodes, num_nodes]
Adjacency matrix containing only directed edges.
adjacency_matrix_undirected: array, shape = [num_nodes, num_nodes]
Adjacency matrix containing only undirected edges.
"""
adjacency_matrix = adjacency_matrix.astype('float')
adjacency_matrix_sym = adjacency_matrix + adjacency_matrix.T
adjacency_matrix_undirected = (adjacency_matrix_sym == 2).astype('float')
adjacency_matrix_directed = (adjacency_matrix_sym == 1).astype('float')
adjacency_matrix_directed[adjacency_matrix_directed == 1] = adjacency_matrix[adjacency_matrix_directed == 1]
return adjacency_matrix_directed, adjacency_matrix_undirected
def plot_stability_sel_probailities(alpha2adjacency, log_scale=False):
"""Plots hyperparamter versus stability selection probability."""
nnodes = alpha2adjacency[list(alpha2adjacency.keys())[0]].shape[0]
plt.figure()
if log_scale:
plt.xscale('log')
alphas = list(alpha2adjacency.keys())
for i, j in itertools.combinations(range(nnodes), 2):
probs = np.array([alpha2adjacency[alpha][i, j] for alpha in alpha2adjacency])
plt.plot(alphas, probs, color='k')
plt.xlabel('Significance level for hypothesis tests')
plt.ylabel('Probability of selection')
plt.show()
def choose_stable_variables_from_dict(alpha2adjacency, bootstrap_threshold=0.5):
"""Returns adjacency matrix corresponding to edges with stability scores above threshold given a dictionary."""
params = list(alpha2adjacency.keys())
n_variables = alpha2adjacency[params[0]].shape[1]
stability_scores = np.zeros((len(params), n_variables, n_variables))
for i, param in enumerate(params):
stability_scores[i] = alpha2adjacency[param]
adj = (stability_scores.max(axis=0) > bootstrap_threshold).astype('float')
return adj
| [
"conditional_independence.partial_correlation_suffstat",
"matplotlib.pyplot.figure",
"networkx.ancestors",
"graphical_model_learning.utils.regression.RegressionHelper",
"scipy.special.ncfdtr",
"sklearn.utils.safe_mask",
"itertools.product",
"operator.itemgetter",
"tqdm.tqdm",
"matplotlib.pyplot.sh... | [((6164, 6196), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X1'], {}), '(X1)\n', (6192, 6196), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((6213, 6245), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X2'], {}), '(X2)\n', (6241, 6245), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((6256, 6283), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat1'], {}), '(suffstat1)\n', (6272, 6283), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((6294, 6321), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat2'], {}), '(suffstat2)\n', (6310, 6321), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((18833, 18879), 'numpy.zeros', 'np.zeros', (['(n_params, n_variables, n_variables)'], {}), '((n_params, n_variables, n_variables))\n', (18841, 18879), True, 'import numpy as np\n'), ((24731, 24803), 'itertools.product', 'itertools.product', (['alpha_ug_grid', 'alpha_skeleton_grid', 'alpha_orient_grid'], {}), '(alpha_ug_grid, alpha_skeleton_grid, alpha_orient_grid)\n', (24748, 24803), False, 'import itertools\n'), ((24827, 24873), 'numpy.zeros', 'np.zeros', (['(n_params, n_variables, n_variables)'], {}), '((n_params, n_variables, n_variables))\n', (24835, 24873), True, 'import numpy as np\n'), ((35370, 35382), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (35380, 35382), True, 'import networkx as nx\n'), ((37644, 37656), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (37654, 37656), True, 'import networkx as nx\n'), ((43422, 43434), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (43432, 43434), True, 'import networkx as nx\n'), ((46287, 46299), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (46297, 46299), True, 'import matplotlib.pyplot as plt\n'), ((46578, 46631), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Significance level for hypothesis tests"""'], {}), "('Significance level for hypothesis tests')\n", (46588, 46631), True, 'import matplotlib.pyplot as plt\n'), ((46636, 46674), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability of selection"""'], {}), "('Probability of selection')\n", (46646, 46674), True, 'import matplotlib.pyplot as plt\n'), ((46679, 46689), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (46687, 46689), True, 'import matplotlib.pyplot as plt\n'), ((8186, 8218), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X1'], {}), '(X1)\n', (8214, 8218), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((8239, 8271), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X2'], {}), '(X2)\n', (8267, 8271), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((8286, 8313), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat1'], {}), '(suffstat1)\n', (8302, 8313), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((8328, 8355), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat2'], {}), '(suffstat2)\n', (8344, 8355), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((8669, 8688), 'tqdm.tqdm', 'tqdm', (['difference_ug'], {}), '(difference_ug)\n', (8673, 8688), False, 'from tqdm import tqdm\n'), ((8806, 8859), 'graphical_model_learning.utils.core_utils.powerset', 'powerset', (['(nodes_cond_set - {i, j})'], {'r_max': 'max_set_size'}), '(nodes_cond_set - {i, j}, r_max=max_set_size)\n', (8814, 8859), False, 'from graphical_model_learning.utils.core_utils import powerset\n'), ((14001, 14041), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': 'verbose'}), '(n_jobs=n_jobs, verbose=verbose)\n', (14009, 14041), False, 'from joblib import Parallel, delayed\n'), ((14582, 14598), 'numpy.zeros', 'np.zeros', (['[p, p]'], {}), '([p, p])\n', (14590, 14598), True, 'import numpy as np\n'), ((17139, 17172), 'joblib.Parallel', 'Parallel', (['n_jobs'], {'verbose': 'verbose'}), '(n_jobs, verbose=verbose)\n', (17147, 17172), False, 'from joblib import Parallel, delayed\n'), ((17687, 17703), 'numpy.zeros', 'np.zeros', (['[p, p]'], {}), '([p, p])\n', (17695, 17703), True, 'import numpy as np\n'), ((29265, 29297), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X1'], {}), '(X1)\n', (29293, 29297), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((29318, 29350), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X2'], {}), '(X2)\n', (29346, 29350), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((29365, 29392), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat1'], {}), '(suffstat1)\n', (29381, 29392), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((29407, 29434), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat2'], {}), '(suffstat2)\n', (29423, 29434), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((29562, 29581), 'tqdm.tqdm', 'tqdm', (['difference_ug'], {}), '(difference_ug)\n', (29566, 29581), False, 'from tqdm import tqdm\n'), ((29698, 29751), 'graphical_model_learning.utils.core_utils.powerset', 'powerset', (['(nodes_cond_set - {i, j})'], {'r_max': 'max_set_size'}), '(nodes_cond_set - {i, j}, r_max=max_set_size)\n', (29706, 29751), False, 'from graphical_model_learning.utils.core_utils import powerset\n'), ((34694, 34726), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X1'], {}), '(X1)\n', (34722, 34726), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((34747, 34779), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X2'], {}), '(X2)\n', (34775, 34779), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((34794, 34821), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat1'], {}), '(suffstat1)\n', (34810, 34821), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((34836, 34863), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat2'], {}), '(suffstat2)\n', (34852, 34863), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((41299, 41331), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X1'], {}), '(X1)\n', (41327, 41331), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((41352, 41384), 'conditional_independence.partial_correlation_suffstat', 'partial_correlation_suffstat', (['X2'], {}), '(X2)\n', (41380, 41384), False, 'from conditional_independence import partial_correlation_suffstat\n'), ((41399, 41426), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat1'], {}), '(suffstat1)\n', (41415, 41426), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((41441, 41468), 'graphical_model_learning.utils.regression.RegressionHelper', 'RegressionHelper', (['suffstat2'], {}), '(suffstat2)\n', (41457, 41468), False, 'from graphical_model_learning.utils.regression import RegressionHelper\n'), ((46326, 46343), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (46336, 46343), True, 'import matplotlib.pyplot as plt\n'), ((46460, 46529), 'numpy.array', 'np.array', (['[alpha2adjacency[alpha][i, j] for alpha in alpha2adjacency]'], {}), '([alpha2adjacency[alpha][i, j] for alpha in alpha2adjacency])\n', (46468, 46529), True, 'import numpy as np\n'), ((46538, 46572), 'matplotlib.pyplot.plot', 'plt.plot', (['alphas', 'probs'], {'color': '"""k"""'}), "(alphas, probs, color='k')\n", (46546, 46572), True, 'import matplotlib.pyplot as plt\n'), ((15037, 15077), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': 'verbose'}), '(n_jobs=n_jobs, verbose=verbose)\n', (15045, 15077), False, 'from joblib import Parallel, delayed\n'), ((19008, 19048), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': 'verbose'}), '(n_jobs=n_jobs, verbose=verbose)\n', (19016, 19048), False, 'from joblib import Parallel, delayed\n'), ((25541, 25574), 'joblib.Parallel', 'Parallel', (['n_jobs'], {'verbose': 'verbose'}), '(n_jobs, verbose=verbose)\n', (25549, 25574), False, 'from joblib import Parallel, delayed\n'), ((35747, 35808), 'itertools.combinations', 'itertools.combinations', (['(nodes_cond_set - {i})', 'parent_set_size'], {}), '(nodes_cond_set - {i}, parent_set_size)\n', (35769, 35808), False, 'import itertools\n'), ((37820, 37862), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['g'], {'source': 'i', 'target': 'j'}), '(g, source=i, target=j)\n', (37839, 37862), True, 'import networkx as nx\n'), ((41676, 41726), 'graphical_model_learning.utils.core_utils.powerset', 'powerset', (['(nodes_cond_set - {i})'], {'r_max': 'max_set_size'}), '(nodes_cond_set - {i}, r_max=max_set_size)\n', (41684, 41726), False, 'from graphical_model_learning.utils.core_utils import powerset\n'), ((41762, 41812), 'graphical_model_learning.utils.core_utils.powerset', 'powerset', (['(nodes_cond_set - {j})'], {'r_max': 'max_set_size'}), '(nodes_cond_set - {j}, r_max=max_set_size)\n', (41770, 41812), False, 'from graphical_model_learning.utils.core_utils import powerset\n'), ((43598, 43640), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['g'], {'source': 'i', 'target': 'j'}), '(g, source=i, target=j)\n', (43617, 43640), True, 'import networkx as nx\n'), ((14051, 14081), 'joblib.delayed', 'delayed', (['dci_skeleton_multiple'], {}), '(dci_skeleton_multiple)\n', (14058, 14081), False, 'from joblib import Parallel, delayed\n'), ((17182, 17212), 'joblib.delayed', 'delayed', (['dci_skeleton_multiple'], {}), '(dci_skeleton_multiple)\n', (17189, 17212), False, 'from joblib import Parallel, delayed\n'), ((19507, 19536), 'numpy.array', 'np.array', (['orientation_results'], {}), '(orientation_results)\n', (19515, 19536), True, 'import numpy as np\n'), ((26735, 26762), 'numpy.array', 'np.array', (['bootstrap_results'], {}), '(bootstrap_results)\n', (26743, 26762), True, 'import numpy as np\n'), ((35975, 36045), 'scipy.special.ncfdtr', 'ncfdtr', (['(n1 - parent_set_size)', '(n2 - parent_set_size)', '(0)', '(var1_i / var2_i)'], {}), '(n1 - parent_set_size, n2 - parent_set_size, 0, var1_i / var2_i)\n', (35981, 36045), False, 'from scipy.special import ncfdtr\n'), ((38149, 38191), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['g'], {'source': 'j', 'target': 'i'}), '(g, source=j, target=i)\n', (38168, 38191), True, 'import networkx as nx\n'), ((43916, 43958), 'networkx.all_simple_paths', 'nx.all_simple_paths', (['g'], {'source': 'j', 'target': 'i'}), '(g, source=j, target=i)\n', (43935, 43958), True, 'import networkx as nx\n'), ((9353, 9421), 'numpy.linalg.inv', 'inv', (['(var1_i * precision1 / (n1 - 1) + var2_i * precision2 / (n2 - 1))'], {}), '(var1_i * precision1 / (n1 - 1) + var2_i * precision2 / (n2 - 1))\n', (9356, 9421), False, 'from numpy.linalg import inv\n'), ((10993, 11061), 'numpy.linalg.inv', 'inv', (['(var1_j * precision1 / (n1 - 1) + var2_j * precision2 / (n2 - 1))'], {}), '(var1_j * precision1 / (n1 - 1) + var2_j * precision2 / (n2 - 1))\n', (10996, 11061), False, 'from numpy.linalg import inv\n'), ((15091, 15110), 'joblib.delayed', 'delayed', (['dci_orient'], {}), '(dci_orient)\n', (15098, 15110), False, 'from joblib import Parallel, delayed\n'), ((19062, 19081), 'joblib.delayed', 'delayed', (['dci_orient'], {}), '(dci_orient)\n', (19069, 19081), False, 'from joblib import Parallel, delayed\n'), ((25613, 25625), 'joblib.delayed', 'delayed', (['dci'], {}), '(dci)\n', (25620, 25625), False, 'from joblib import Parallel, delayed\n'), ((30245, 30313), 'numpy.linalg.inv', 'inv', (['(var1_i * precision1 / (n1 - 1) + var2_i * precision2 / (n2 - 1))'], {}), '(var1_i * precision1 / (n1 - 1) + var2_i * precision2 / (n2 - 1))\n', (30248, 30313), False, 'from numpy.linalg import inv\n'), ((31405, 31473), 'numpy.linalg.inv', 'inv', (['(var1_j * precision1 / (n1 - 1) + var2_j * precision2 / (n2 - 1))'], {}), '(var1_j * precision1 / (n1 - 1) + var2_j * precision2 / (n2 - 1))\n', (31408, 31473), False, 'from numpy.linalg import inv\n'), ((14098, 14123), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample1'], {}), '(X1, subsample1)\n', (14107, 14123), False, 'from sklearn.utils import safe_mask\n'), ((14144, 14169), 'sklearn.utils.safe_mask', 'safe_mask', (['X2', 'subsample2'], {}), '(X2, subsample2)\n', (14153, 14169), False, 'from sklearn.utils import safe_mask\n'), ((17229, 17254), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample1'], {}), '(X1, subsample1)\n', (17238, 17254), False, 'from sklearn.utils import safe_mask\n'), ((17275, 17300), 'sklearn.utils.safe_mask', 'safe_mask', (['X2', 'subsample2'], {}), '(X2, subsample2)\n', (17284, 17300), False, 'from sklearn.utils import safe_mask\n'), ((36330, 36346), 'operator.itemgetter', 'op.itemgetter', (['(1)'], {}), '(1)\n', (36343, 36346), True, 'import operator as op\n'), ((36924, 36947), 'networkx.descendants', 'nx.descendants', (['d_nx', 'i'], {}), '(d_nx, i)\n', (36938, 36947), True, 'import networkx as nx\n'), ((37015, 37036), 'networkx.ancestors', 'nx.ancestors', (['d_nx', 'i'], {}), '(d_nx, i)\n', (37027, 37036), True, 'import networkx as nx\n'), ((15131, 15156), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample1'], {}), '(X1, subsample1)\n', (15140, 15156), False, 'from sklearn.utils import safe_mask\n'), ((15181, 15206), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample2'], {}), '(X1, subsample2)\n', (15190, 15206), False, 'from sklearn.utils import safe_mask\n'), ((19102, 19127), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample1'], {}), '(X1, subsample1)\n', (19111, 19127), False, 'from sklearn.utils import safe_mask\n'), ((19152, 19177), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample2'], {}), '(X1, subsample2)\n', (19161, 19177), False, 'from sklearn.utils import safe_mask\n'), ((25629, 25654), 'sklearn.utils.safe_mask', 'safe_mask', (['X1', 'subsample1'], {}), '(X1, subsample1)\n', (25638, 25654), False, 'from sklearn.utils import safe_mask\n'), ((25715, 25740), 'sklearn.utils.safe_mask', 'safe_mask', (['X2', 'subsample2'], {}), '(X2, subsample2)\n', (25724, 25740), False, 'from sklearn.utils import safe_mask\n')] |
# -*- coding: utf-8 -*-
# imageio is distributed under the terms of the (new) BSD License.
""" Read/Write images using Pillow/PIL.
Backend Library: `Pillow <https://pillow.readthedocs.io/en/stable/>`_
Plugin that wraps the the Pillow library. Pillow is a friendly fork of PIL
(Python Image Library) and supports reading and writing of common formats (jpg,
png, gif, tiff, ...). For, the complete list of features and supported formats
please refer to pillows official docs (see the Backend Library link).
Parameters
----------
request : Request
A request object representing the resource to be operated on.
Methods
-------
.. autosummary::
:toctree: _plugins/pillow
PillowPlugin.read
PillowPlugin.write
PillowPlugin.iter
PillowPlugin.get_meta
"""
from io import BytesIO
from typing import Callable, Optional, Dict, Any, Tuple, cast, Iterator, Union, List
import numpy as np
from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags # type: ignore
from ..core.request import Request, IOMode, InitializationError, URI_BYTES
from ..core.v3_plugin_api import PluginV3, ImageProperties
import warnings
from ..typing import ArrayLike
def _exif_orientation_transform(orientation: int, mode: str) -> Callable:
# get transformation that transforms an image from a
# given EXIF orientation into the standard orientation
# -1 if the mode has color channel, 0 otherwise
axis = -2 if Image.getmodebands(mode) > 1 else -1
EXIF_ORIENTATION = {
1: lambda x: x,
2: lambda x: np.flip(x, axis=axis),
3: lambda x: np.rot90(x, k=2),
4: lambda x: np.flip(x, axis=axis - 1),
5: lambda x: np.flip(np.rot90(x, k=3), axis=axis),
6: lambda x: np.rot90(x, k=1),
7: lambda x: np.flip(np.rot90(x, k=1), axis=axis),
8: lambda x: np.rot90(x, k=3),
}
return EXIF_ORIENTATION[orientation]
class PillowPlugin(PluginV3):
def __init__(self, request: Request) -> None:
"""Instantiate a new Pillow Plugin Object
Parameters
----------
request : {Request}
A request object representing the resource to be operated on.
"""
super().__init__(request)
self._image: Image = None
if request.mode.io_mode == IOMode.read:
try:
with Image.open(request.get_file()):
# Check if it is generally possible to read the image.
# This will not read any data and merely try to find a
# compatible pillow plugin (ref: the pillow docs).
pass
except UnidentifiedImageError:
if request._uri_type == URI_BYTES:
raise InitializationError(
"Pillow can not read the provided bytes."
) from None
else:
raise InitializationError(
f"Pillow can not read {request.raw_uri}."
) from None
self._image = Image.open(self._request.get_file())
else:
extension = self.request.extension or self.request.format_hint
if extension is None:
warnings.warn(
"Can't determine file format to write as. You _must_"
" set `format` during write or the call will fail. Use "
"`extension` to supress this warning. ",
UserWarning,
)
return
tirage = [Image.preinit, Image.init]
for format_loader in tirage:
format_loader()
if extension in Image.registered_extensions().keys():
return
raise InitializationError(
f"Pillow can not write `{extension}` files."
) from None
def close(self) -> None:
if self._image:
self._image.close()
self._request.finish()
def read(
self, *, index=None, mode=None, rotate=False, apply_gamma=False
) -> np.ndarray:
"""
Parses the given URI and creates a ndarray from it.
Parameters
----------
index : {integer}
If the ImageResource contains multiple ndimages, and index is an
integer, select the index-th ndimage from among them and return it.
If index is an ellipsis (...), read all ndimages in the file and
stack them along a new batch dimension and return them. If index is
None, this plugin reads the first image of the file (index=0) unless
the image is a GIF or APNG, in which case all images are read
(index=...).
mode : {str, None}
Convert the image to the given mode before returning it. If None,
the mode will be left unchanged. Possible modes can be found at:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
rotate : {bool}
If set to ``True`` and the image contains an EXIF orientation tag,
apply the orientation before returning the ndimage.
apply_gamma : {bool}
If ``True`` and the image contains metadata about gamma, apply gamma
correction to the image.
Returns
-------
ndimage : ndarray
A numpy array containing the loaded image data
Notes
-----
If you open a GIF - or any other format using color pallets - you may
wish to manually set the `mode` parameter. Otherwise, the numbers in
the returned image will refer to the entries in the color pallet, which
is discarded during conversion to ndarray.
"""
if index is None:
if self._image.format == "GIF":
index = Ellipsis
elif self._image.custom_mimetype == "image/apng":
index = Ellipsis
else:
index = 0
if isinstance(index, int):
# will raise IO error if index >= number of frames in image
self._image.seek(index)
image = self._apply_transforms(self._image, mode, rotate, apply_gamma)
return image
else:
iterator = self.iter(mode=mode, rotate=rotate, apply_gamma=apply_gamma)
image = np.stack([im for im in iterator], axis=0)
return image
def iter(
self, *, mode: str = None, rotate: bool = False, apply_gamma: bool = False
) -> Iterator[np.ndarray]:
"""
Iterate over all ndimages/frames in the URI
Parameters
----------
mode : {str, None}
Convert the image to the given mode before returning it. If None,
the mode will be left unchanged. Possible modes can be found at:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
rotate : {bool}
If set to ``True`` and the image contains an EXIF orientation tag,
apply the orientation before returning the ndimage.
apply_gamma : {bool}
If ``True`` and the image contains metadata about gamma, apply gamma
correction to the image.
"""
for im in ImageSequence.Iterator(self._image):
yield self._apply_transforms(im, mode, rotate, apply_gamma)
def _apply_transforms(self, image, mode, rotate, apply_gamma) -> np.ndarray:
if mode is not None:
image = image.convert(mode)
elif image.format == "GIF":
# adjust for pillow9 changes
# see: https://github.com/python-pillow/Pillow/issues/5929
image = image.convert(image.palette.mode)
image = np.asarray(image)
meta = self.metadata(index=self._image.tell(), exclude_applied=False)
if rotate and "Orientation" in meta:
transformation = _exif_orientation_transform(
meta["Orientation"], self._image.mode
)
image = transformation(image)
if apply_gamma and "gamma" in meta:
gamma = float(meta["gamma"])
scale = float(65536 if image.dtype == np.uint16 else 255)
gain = 1.0
image = ((image / scale) ** gamma) * scale * gain + 0.4999
image = np.round(image).astype(np.uint8)
return image
def write(
self,
ndimage: Union[ArrayLike, List[ArrayLike]],
*,
mode: str = None,
format: str = None,
**kwargs,
) -> Optional[bytes]:
"""
Write an ndimage to the URI specified in path.
If the URI points to a file on the current host and the file does not
yet exist it will be created. If the file exists already, it will be
appended if possible; otherwise, it will be replaced.
If necessary, the image is broken down along the leading dimension to
fit into individual frames of the chosen format. If the format doesn't
support multiple frames, and IOError is raised.
Parameters
----------
image : ndarray
The ndimage to write.
mode : {str, None}
Specify the image's color format. If None (default), the mode is
inferred from the array's shape and dtype. Possible modes can be
found at:
https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes
format : {str, None}
Optional format override. If omitted, the format to use is
determined from the filename extension. If a file object was used
instead of a filename, this parameter must always be used.
kwargs : ...
Extra arguments to pass to pillow. If a writer doesn't recognise an
option, it is silently ignored. The available options are described
in pillow's `image format documentation
<https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html>`_
for each writer.
Notes
-----
When writing batches of very narrow (2-4 pixels wide) gray images set
the ``mode`` explicitly to avoid the batch being identified as a colored
image.
"""
extension = self.request.extension or self.request.format_hint
save_args = {
"format": format or Image.registered_extensions()[extension],
}
if isinstance(ndimage, list):
ndimage = np.stack(ndimage, axis=0)
is_batch = True
else:
ndimage = np.asarray(ndimage)
is_batch = None
# check if ndimage is a batch of frames/pages (e.g. for writing GIF)
# if mode is given, use it; otherwise fall back to image.ndim only
if is_batch is True:
pass # ndimage was list; we know it is a batch
if mode is not None:
is_batch = (
ndimage.ndim > 3 if Image.getmodebands(mode) > 1 else ndimage.ndim > 2
)
elif ndimage.ndim == 2:
is_batch = False
elif ndimage.ndim == 3 and ndimage.shape[-1] in [2, 3, 4]:
# Note: this makes a channel-last assumption
# (pillow seems to make it as well)
is_batch = False
else:
is_batch = True
if not is_batch:
ndimage = ndimage[None, ...]
pil_frames = list()
for frame in ndimage:
pil_frame = Image.fromarray(frame, mode=mode)
if "bits" in kwargs:
pil_frame = pil_frame.quantize(colors=2 ** kwargs["bits"])
pil_frames.append(pil_frame)
primary_image, other_images = pil_frames[0], pil_frames[1:]
if is_batch:
save_args["save_all"] = True
save_args["append_images"] = other_images
save_args.update(kwargs)
primary_image.save(self._request.get_file(), **save_args)
if self._request._uri_type == URI_BYTES:
file = cast(BytesIO, self._request.get_file())
return file.getvalue()
return None
def get_meta(self, *, index=0) -> Dict[str, Any]:
return self.metadata(index=index, exclude_applied=False)
def metadata(
self, index: int = None, exclude_applied: bool = True
) -> Dict[str, Any]:
"""Read ndimage metadata.
Parameters
----------
index : {integer, None}
If the ImageResource contains multiple ndimages, and index is an
integer, select the index-th ndimage from among them and return its
metadata. If index is an ellipsis (...), read and return global
metadata. If index is None, this plugin reads metadata from the
first image of the file (index=0) unless the image is a GIF or APNG,
in which case global metadata is read (index=...).
Returns
-------
metadata : dict
A dictionary of format-specific metadata.
"""
if index is None:
if self._image.format == "GIF":
index = Ellipsis
elif self._image.custom_mimetype == "image/apng":
index = Ellipsis
else:
index = 0
if isinstance(index, int) and self._image.tell() != index:
self._image.seek(index)
metadata = self._image.info.copy()
metadata["mode"] = self._image.mode
metadata["shape"] = self._image.size
if self._image.mode == "P":
metadata["palette"] = self._image.palette
if self._image.getexif():
exif_data = {
ExifTags.TAGS.get(key, "unknown"): value
for key, value in dict(self._image.getexif()).items()
}
exif_data.pop("unknown", None)
metadata.update(exif_data)
if exclude_applied:
metadata.pop("Orientation", None)
return metadata
def properties(self, index: int = None) -> ImageProperties:
"""Standardized ndimage metadata
Parameters
----------
index : int
If the ImageResource contains multiple ndimages, and index is an
integer, select the index-th ndimage from among them and return its
properties. If index is an ellipsis (...), read and return the
properties of all ndimages in the file stacked along a new batch
dimension. If index is None, this plugin reads and returns the
properties of the first image (index=0) unless the image is a GIF or
APNG, in which case it reads and returns the properties all images
(index=...).
Returns
-------
properties : ImageProperties
A dataclass filled with standardized image metadata.
Notes
-----
This does not decode pixel data and is 394fast for large images.
"""
if index is None:
if self._image.format == "GIF":
index = Ellipsis
elif self._image.custom_mimetype == "image/apng":
index = Ellipsis
else:
index = 0
if index is Ellipsis:
self._image.seek(0)
else:
self._image.seek(index)
if self._image.format == "GIF":
# GIF mode is determined by pallette
mode = self._image.palette.mode
else:
mode = self._image.mode
width: int = self._image.width
height: int = self._image.height
shape: Tuple[int, ...] = (height, width)
n_frames: int = self._image.n_frames
if index is ...:
shape = (n_frames, *shape)
dummy = np.asarray(Image.new(mode, (1, 1)))
pil_shape: Tuple[int, ...] = dummy.shape
if len(pil_shape) > 2:
shape = (*shape, *pil_shape[2:])
return ImageProperties(
shape=shape,
dtype=dummy.dtype,
is_batch=True if index is Ellipsis else False,
)
| [
"numpy.stack",
"PIL.Image.new",
"numpy.flip",
"PIL.Image.registered_extensions",
"numpy.asarray",
"PIL.ImageSequence.Iterator",
"numpy.rot90",
"PIL.Image.getmodebands",
"PIL.Image.fromarray",
"warnings.warn",
"numpy.round",
"PIL.ExifTags.TAGS.get"
] | [((7275, 7310), 'PIL.ImageSequence.Iterator', 'ImageSequence.Iterator', (['self._image'], {}), '(self._image)\n', (7297, 7310), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((7753, 7770), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (7763, 7770), True, 'import numpy as np\n'), ((1437, 1461), 'PIL.Image.getmodebands', 'Image.getmodebands', (['mode'], {}), '(mode)\n', (1455, 1461), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((1545, 1566), 'numpy.flip', 'np.flip', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (1552, 1566), True, 'import numpy as np\n'), ((1589, 1605), 'numpy.rot90', 'np.rot90', (['x'], {'k': '(2)'}), '(x, k=2)\n', (1597, 1605), True, 'import numpy as np\n'), ((1628, 1653), 'numpy.flip', 'np.flip', (['x'], {'axis': '(axis - 1)'}), '(x, axis=axis - 1)\n', (1635, 1653), True, 'import numpy as np\n'), ((1735, 1751), 'numpy.rot90', 'np.rot90', (['x'], {'k': '(1)'}), '(x, k=1)\n', (1743, 1751), True, 'import numpy as np\n'), ((1833, 1849), 'numpy.rot90', 'np.rot90', (['x'], {'k': '(3)'}), '(x, k=3)\n', (1841, 1849), True, 'import numpy as np\n'), ((6368, 6409), 'numpy.stack', 'np.stack', (['[im for im in iterator]'], {'axis': '(0)'}), '([im for im in iterator], axis=0)\n', (6376, 6409), True, 'import numpy as np\n'), ((10516, 10541), 'numpy.stack', 'np.stack', (['ndimage'], {'axis': '(0)'}), '(ndimage, axis=0)\n', (10524, 10541), True, 'import numpy as np\n'), ((10606, 10625), 'numpy.asarray', 'np.asarray', (['ndimage'], {}), '(ndimage)\n', (10616, 10625), True, 'import numpy as np\n'), ((11505, 11538), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {'mode': 'mode'}), '(frame, mode=mode)\n', (11520, 11538), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((15792, 15815), 'PIL.Image.new', 'Image.new', (['mode', '(1, 1)'], {}), '(mode, (1, 1))\n', (15801, 15815), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((1684, 1700), 'numpy.rot90', 'np.rot90', (['x'], {'k': '(3)'}), '(x, k=3)\n', (1692, 1700), True, 'import numpy as np\n'), ((1782, 1798), 'numpy.rot90', 'np.rot90', (['x'], {'k': '(1)'}), '(x, k=1)\n', (1790, 1798), True, 'import numpy as np\n'), ((3229, 3411), 'warnings.warn', 'warnings.warn', (['"""Can\'t determine file format to write as. You _must_ set `format` during write or the call will fail. Use `extension` to supress this warning. """', 'UserWarning'], {}), '(\n "Can\'t determine file format to write as. You _must_ set `format` during write or the call will fail. Use `extension` to supress this warning. "\n , UserWarning)\n', (3242, 3411), False, 'import warnings\n'), ((13694, 13727), 'PIL.ExifTags.TAGS.get', 'ExifTags.TAGS.get', (['key', '"""unknown"""'], {}), "(key, 'unknown')\n", (13711, 13727), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((8333, 8348), 'numpy.round', 'np.round', (['image'], {}), '(image)\n', (8341, 8348), True, 'import numpy as np\n'), ((10403, 10432), 'PIL.Image.registered_extensions', 'Image.registered_extensions', ([], {}), '()\n', (10430, 10432), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((10986, 11010), 'PIL.Image.getmodebands', 'Image.getmodebands', (['mode'], {}), '(mode)\n', (11004, 11010), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n'), ((3685, 3714), 'PIL.Image.registered_extensions', 'Image.registered_extensions', ([], {}), '()\n', (3712, 3714), False, 'from PIL import Image, UnidentifiedImageError, ImageSequence, ExifTags\n')] |
import numpy as np
nums = np.loadtxt("1.txt").astype(int)
for idx in np.where(nums[:, None] + nums[None, :] == 2020):
x = nums[idx]
print(np.prod(x))
for idx in np.where(nums[:, None, None] + nums[None, :, None] + nums[None, None, :] == 2020):
print(np.prod([nums[i] for i in set(idx)]))
| [
"numpy.where",
"numpy.loadtxt",
"numpy.prod"
] | [((69, 116), 'numpy.where', 'np.where', (['(nums[:, None] + nums[None, :] == 2020)'], {}), '(nums[:, None] + nums[None, :] == 2020)\n', (77, 116), True, 'import numpy as np\n'), ((169, 254), 'numpy.where', 'np.where', (['(nums[:, None, None] + nums[None, :, None] + nums[None, None, :] == 2020)'], {}), '(nums[:, None, None] + nums[None, :, None] + nums[None, None, :] ==\n 2020)\n', (177, 254), True, 'import numpy as np\n'), ((26, 45), 'numpy.loadtxt', 'np.loadtxt', (['"""1.txt"""'], {}), "('1.txt')\n", (36, 45), True, 'import numpy as np\n'), ((146, 156), 'numpy.prod', 'np.prod', (['x'], {}), '(x)\n', (153, 156), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright 2020 PyPAL authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from functools import partial
import pickle
import click
import joblib
import numpy as np
import pandas as pd
from lightgbm import LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from dispersant_screener.ga import FEATURES, predict_gpy_coregionalized, run_ga, regularizer_novelty, _get_average_dist
TIMESTR = time.strftime('%Y%m%d-%H%M%S')
DATADIR = '../data'
df_full_factorial_feat = pd.read_csv(os.path.join(DATADIR, 'new_features_full_random.csv'))[FEATURES].values
a2 = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_new.csv'))['A2_normalized'].values
deltaGMax = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_new.csv'))['A2_normalized'].values
gibbs = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_deltaG.csv'))['deltaGmin'].values
gibbs_max = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_new.csv'))['deltaGmax'].values
force_max = pd.read_csv(os.path.join(DATADIR, 'b1-b21_random_virial_large_fit2.csv'))['F_repel_max'].values
rg = pd.read_csv(os.path.join(DATADIR, 'rg_results.csv'))['Rg'].values
y = np.hstack([
rg.reshape(-1, 1),
gibbs.reshape(-1, 1) * (-1),
gibbs_max.reshape(-1, 1),
])
assert len(df_full_factorial_feat) == len(a2) == len(gibbs) == len(y)
with open('sweeps3/20201021-235927_dispersant_0.01_0.05_0.05_60-models.pkl', 'rb') as fh:
coregionalized_model = pickle.load(fh)[0]
feat_scaler = StandardScaler()
X = feat_scaler.fit_transform(df_full_factorial_feat)
@click.command('cli')
@click.argument('target', type=int, default=0)
@click.argument('runs', type=int, default=10)
@click.argument('outdir', type=click.Path(), default='.')
@click.option('--all', is_flag=True)
def main(target, runs, outdir, all):
if all:
targets = [0, 1, 2]
else:
targets = [target]
for target in targets:
y_selected = y[:, target]
predict_partial = partial(predict_gpy_coregionalized, model=coregionalized_model, i=target)
regularizer_novelty_partial = partial(regularizer_novelty,
y=y_selected,
X_data=X,
average_dist=_get_average_dist(X))
gas = []
for novelty_penalty_ratio in [0, 0.1, 0.2, 0.5, 1, 2]:
for _ in range(runs):
gas.append(
run_ga( # pylint:disable=invalid-name
predict_partial,
regularizer_novelty_partial,
features=FEATURES,
y_mean=np.median(y_selected),
novelty_pentaly_ratio=novelty_penalty_ratio))
if not os.path.exists(outdir):
os.mkdir(outdir)
joblib.dump(gas, os.path.join(outdir, TIMESTR + '-ga_{}.joblib'.format(target)))
if __name__ == '__main__':
main()
| [
"functools.partial",
"os.mkdir",
"sklearn.preprocessing.StandardScaler",
"click.argument",
"numpy.median",
"click.option",
"time.strftime",
"os.path.exists",
"click.command",
"dispersant_screener.ga._get_average_dist",
"pickle.load",
"click.Path",
"os.path.join"
] | [((1003, 1033), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (1016, 1033), False, 'import time\n'), ((2077, 2093), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2091, 2093), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2151, 2171), 'click.command', 'click.command', (['"""cli"""'], {}), "('cli')\n", (2164, 2171), False, 'import click\n'), ((2173, 2218), 'click.argument', 'click.argument', (['"""target"""'], {'type': 'int', 'default': '(0)'}), "('target', type=int, default=0)\n", (2187, 2218), False, 'import click\n'), ((2220, 2264), 'click.argument', 'click.argument', (['"""runs"""'], {'type': 'int', 'default': '(10)'}), "('runs', type=int, default=10)\n", (2234, 2264), False, 'import click\n'), ((2324, 2359), 'click.option', 'click.option', (['"""--all"""'], {'is_flag': '(True)'}), "('--all', is_flag=True)\n", (2336, 2359), False, 'import click\n'), ((2043, 2058), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2054, 2058), False, 'import pickle\n'), ((2563, 2636), 'functools.partial', 'partial', (['predict_gpy_coregionalized'], {'model': 'coregionalized_model', 'i': 'target'}), '(predict_gpy_coregionalized, model=coregionalized_model, i=target)\n', (2570, 2636), False, 'from functools import partial\n'), ((3377, 3399), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (3391, 3399), False, 'import os\n'), ((3409, 3425), 'os.mkdir', 'os.mkdir', (['outdir'], {}), '(outdir)\n', (3417, 3425), False, 'import os\n'), ((2296, 2308), 'click.Path', 'click.Path', ([], {}), '()\n', (2306, 2308), False, 'import click\n'), ((1092, 1145), 'os.path.join', 'os.path.join', (['DATADIR', '"""new_features_full_random.csv"""'], {}), "(DATADIR, 'new_features_full_random.csv')\n", (1104, 1145), False, 'import os\n'), ((1181, 1240), 'os.path.join', 'os.path.join', (['DATADIR', '"""b1-b21_random_virial_large_new.csv"""'], {}), "(DATADIR, 'b1-b21_random_virial_large_new.csv')\n", (1193, 1240), False, 'import os\n'), ((1290, 1349), 'os.path.join', 'os.path.join', (['DATADIR', '"""b1-b21_random_virial_large_new.csv"""'], {}), "(DATADIR, 'b1-b21_random_virial_large_new.csv')\n", (1302, 1349), False, 'import os\n'), ((1395, 1444), 'os.path.join', 'os.path.join', (['DATADIR', '"""b1-b21_random_deltaG.csv"""'], {}), "(DATADIR, 'b1-b21_random_deltaG.csv')\n", (1407, 1444), False, 'import os\n'), ((1490, 1549), 'os.path.join', 'os.path.join', (['DATADIR', '"""b1-b21_random_virial_large_new.csv"""'], {}), "(DATADIR, 'b1-b21_random_virial_large_new.csv')\n", (1502, 1549), False, 'import os\n'), ((1595, 1655), 'os.path.join', 'os.path.join', (['DATADIR', '"""b1-b21_random_virial_large_fit2.csv"""'], {}), "(DATADIR, 'b1-b21_random_virial_large_fit2.csv')\n", (1607, 1655), False, 'import os\n'), ((1696, 1735), 'os.path.join', 'os.path.join', (['DATADIR', '"""rg_results.csv"""'], {}), "(DATADIR, 'rg_results.csv')\n", (1708, 1735), False, 'import os\n'), ((2880, 2900), 'dispersant_screener.ga._get_average_dist', '_get_average_dist', (['X'], {}), '(X)\n', (2897, 2900), False, 'from dispersant_screener.ga import FEATURES, predict_gpy_coregionalized, run_ga, regularizer_novelty, _get_average_dist\n'), ((3272, 3293), 'numpy.median', 'np.median', (['y_selected'], {}), '(y_selected)\n', (3281, 3293), True, 'import numpy as np\n')] |
"""Toy cover domain.
This environment IS downward refinable (low-level search won't ever
fail), but it still requires backtracking.
"""
from typing import ClassVar, Dict, List, Optional, Sequence, Set, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from gym.spaces import Box
from predicators.src import utils
from predicators.src.envs import BaseEnv
from predicators.src.settings import CFG
from predicators.src.structs import Action, Array, GroundAtom, Object, \
ParameterizedOption, Predicate, State, Task, Type
class CoverEnv(BaseEnv):
"""Toy cover domain."""
_allow_free_space_placing: ClassVar[bool] = False
_initial_pick_offsets: ClassVar[List[float]] = [] # see CoverEnvRegrasp
_workspace_x: ClassVar[float] = 1.35
_workspace_z: ClassVar[float] = 0.75
def __init__(self) -> None:
super().__init__()
# Types
self._block_type = Type(
"block", ["is_block", "is_target", "width", "pose", "grasp"])
self._target_type = Type("target",
["is_block", "is_target", "width", "pose"])
self._robot_type = Type("robot", ["hand", "pose_x", "pose_z"])
# Predicates
self._IsBlock = Predicate("IsBlock", [self._block_type],
self._IsBlock_holds)
self._IsTarget = Predicate("IsTarget", [self._target_type],
self._IsTarget_holds)
self._Covers = Predicate("Covers",
[self._block_type, self._target_type],
self._Covers_holds)
self._HandEmpty = Predicate("HandEmpty", [], self._HandEmpty_holds)
self._Holding = Predicate("Holding", [self._block_type],
self._Holding_holds)
# Options
self._PickPlace: ParameterizedOption = \
utils.SingletonParameterizedOption(
"PickPlace", self._PickPlace_policy,
params_space=Box(0, 1, (1, )))
# Static objects (always exist no matter the settings).
self._robot = Object("robby", self._robot_type)
@classmethod
def get_name(cls) -> str:
return "cover"
def simulate(self, state: State, action: Action) -> State:
assert self.action_space.contains(action.arr)
pose = action.arr.item()
next_state = state.copy()
hand_regions = self._get_hand_regions(state)
# If we're not in any hand region, no-op.
if not any(hand_lb <= pose <= hand_rb
for hand_lb, hand_rb in hand_regions):
return next_state
# Identify which block we're holding and which block we're above.
held_block = None
above_block = None
for block in state.get_objects(self._block_type):
if state.get(block, "grasp") != -1:
assert held_block is None
held_block = block
block_lb = state.get(block, "pose") - state.get(block, "width") / 2
block_ub = state.get(block, "pose") + state.get(block, "width") / 2
if state.get(block,
"grasp") == -1 and block_lb <= pose <= block_ub:
assert above_block is None
above_block = block
# If we're not holding anything and we're above a block, grasp it.
# The grasped block's pose stays the same.
if held_block is None and above_block is not None:
grasp = pose - state.get(above_block, "pose")
next_state.set(self._robot, "hand", pose)
next_state.set(above_block, "grasp", grasp)
# If we are holding something, place it.
# Disallow placing on another block.
if held_block is not None and above_block is None:
new_pose = pose - state.get(held_block, "grasp")
# Prevent collisions with other blocks.
if self._any_intersection(new_pose,
state.get(held_block, "width"),
state.data,
block_only=True,
excluded_object=held_block):
return next_state
# Only place if free space placing is allowed, or if we're
# placing onto some target.
targets = state.get_objects(self._target_type)
if self._allow_free_space_placing or \
any(state.get(targ, "pose")-state.get(targ, "width")/2
<= pose <=
state.get(targ, "pose")+state.get(targ, "width")/2
for targ in targets):
next_state.set(self._robot, "hand", pose)
next_state.set(held_block, "pose", new_pose)
next_state.set(held_block, "grasp", -1)
return next_state
def _generate_train_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_train_tasks, rng=self._train_rng)
def _generate_test_tasks(self) -> List[Task]:
return self._get_tasks(num=CFG.num_test_tasks, rng=self._test_rng)
@property
def predicates(self) -> Set[Predicate]:
return {
self._IsBlock, self._IsTarget, self._Covers, self._HandEmpty,
self._Holding
}
@property
def goal_predicates(self) -> Set[Predicate]:
return {self._Covers}
@property
def types(self) -> Set[Type]:
return {self._block_type, self._target_type, self._robot_type}
@property
def options(self) -> Set[ParameterizedOption]:
return {self._PickPlace}
@property
def action_space(self) -> Box:
return Box(0, 1, (1, )) # same as option param space
def render_state_plt(
self,
state: State,
task: Task,
action: Optional[Action] = None,
caption: Optional[str] = None) -> matplotlib.figure.Figure:
fig, ax = plt.subplots(1, 1)
# Draw main line
plt.plot([-0.2, 1.2], [-0.055, -0.055], color="black")
# Draw hand regions
hand_regions = self._get_hand_regions(state)
for i, (hand_lb, hand_rb) in enumerate(hand_regions):
if i == 0:
label = "Allowed hand region"
else:
label = None
plt.plot([hand_lb, hand_rb], [-0.08, -0.08],
color="red",
alpha=0.5,
lw=8.,
label=label)
# Draw hand
plt.scatter(state.get(self._robot, "hand"),
0.05,
color="r",
s=100,
alpha=1.,
zorder=10,
label="Hand")
lw = 3
height = 0.1
cs = ["blue", "purple", "green", "yellow"]
block_alpha = 0.75
targ_alpha = 0.25
# Draw blocks
for i, block in enumerate(state.get_objects(self._block_type)):
c = cs[i]
if state.get(block, "grasp") != -1:
lcolor = "red"
pose = state.get(self._robot, "hand") - state.get(
block, "grasp")
suffix = " (grasped)"
else:
lcolor = "gray"
pose = state.get(block, "pose")
suffix = ""
rect = plt.Rectangle(
(pose - state.get(block, "width") / 2., -height / 2.),
state.get(block, "width"),
height,
linewidth=lw,
edgecolor=lcolor,
facecolor=c,
alpha=block_alpha,
label=f"block{i}" + suffix)
ax.add_patch(rect)
# Draw targets
for i, targ in enumerate(state.get_objects(self._target_type)):
c = cs[i]
rect = plt.Rectangle(
(state.get(targ, "pose") - state.get(targ, "width") / 2.,
-height / 2.),
state.get(targ, "width"),
height,
linewidth=lw,
edgecolor=lcolor,
facecolor=c,
alpha=targ_alpha,
label=f"target{i}")
ax.add_patch(rect)
plt.xlim(-0.2, 1.2)
plt.ylim(-0.25, 0.5)
plt.yticks([])
plt.legend()
if caption is not None:
plt.suptitle(caption, wrap=True)
plt.tight_layout()
return fig
def _get_hand_regions(self, state: State) -> List[Tuple[float, float]]:
hand_regions = []
for block in state.get_objects(self._block_type):
hand_regions.append(
(state.get(block, "pose") - state.get(block, "width") / 2,
state.get(block, "pose") + state.get(block, "width") / 2))
for targ in state.get_objects(self._target_type):
hand_regions.append(
(state.get(targ, "pose") - state.get(targ, "width") / 10,
state.get(targ, "pose") + state.get(targ, "width") / 10))
return hand_regions
def _create_blocks_and_targets(self) -> Tuple[List[Object], List[Object]]:
blocks = []
targets = []
for i in range(CFG.cover_num_blocks):
blocks.append(Object(f"block{i}", self._block_type))
for i in range(CFG.cover_num_targets):
targets.append(Object(f"target{i}", self._target_type))
return blocks, targets
def _get_tasks(self, num: int, rng: np.random.Generator) -> List[Task]:
tasks = []
# Create blocks and targets.
blocks, targets = self._create_blocks_and_targets()
# Create goals.
goal1 = {GroundAtom(self._Covers, [blocks[0], targets[0]])}
goals = [goal1]
if len(blocks) > 1 and len(targets) > 1:
goal2 = {GroundAtom(self._Covers, [blocks[1], targets[1]])}
goals.append(goal2)
goal3 = {
GroundAtom(self._Covers, [blocks[0], targets[0]]),
GroundAtom(self._Covers, [blocks[1], targets[1]])
}
goals.append(goal3)
for i in range(num):
init = self._create_initial_state(blocks, targets, rng)
assert init.get_objects(self._block_type) == blocks
assert init.get_objects(self._target_type) == targets
tasks.append(Task(init, goals[i % len(goals)]))
return tasks
def _create_initial_state(self, blocks: List[Object],
targets: List[Object],
rng: np.random.Generator) -> State:
data: Dict[Object, Array] = {}
assert len(CFG.cover_block_widths) == len(blocks)
for block, width in zip(blocks, CFG.cover_block_widths):
while True:
pose = rng.uniform(width / 2, 1.0 - width / 2)
if not self._any_intersection(pose, width, data):
break
# [is_block, is_target, width, pose, grasp]
data[block] = np.array([1.0, 0.0, width, pose, -1.0])
assert len(CFG.cover_target_widths) == len(targets)
for target, width in zip(targets, CFG.cover_target_widths):
while True:
pose = rng.uniform(width / 2, 1.0 - width / 2)
if not self._any_intersection(
pose, width, data, larger_gap=True):
break
# [is_block, is_target, width, pose]
data[target] = np.array([0.0, 1.0, width, pose])
# [hand, pose_x, pose_z]
# For the non-PyBullet environments, pose_x and pose_z are constant.
data[self._robot] = np.array(
[0.5, self._workspace_x, self._workspace_z])
state = State(data)
# Allow some chance of holding a block in the initial state.
if rng.uniform() < CFG.cover_initial_holding_prob:
block = blocks[rng.choice(len(blocks))]
block_pose = state.get(block, "pose")
pick_pose = block_pose
if self._initial_pick_offsets:
offset = rng.choice(self._initial_pick_offsets)
assert -1.0 < offset < 1.0, \
"initial pick offset should be between -1 and 1"
pick_pose += state.get(block, "width") * offset / 2.
state.set(self._robot, "hand", pick_pose)
state.set(block, "grasp", pick_pose - block_pose)
return state
@staticmethod
def _IsBlock_holds(state: State, objects: Sequence[Object]) -> bool:
block, = objects
return block in state
@staticmethod
def _IsTarget_holds(state: State, objects: Sequence[Object]) -> bool:
target, = objects
return target in state
@staticmethod
def _Covers_holds(state: State, objects: Sequence[Object]) -> bool:
block, target = objects
block_pose = state.get(block, "pose")
block_width = state.get(block, "width")
target_pose = state.get(target, "pose")
target_width = state.get(target, "width")
return (block_pose-block_width/2 <= target_pose-target_width/2) and \
(block_pose+block_width/2 >= target_pose+target_width/2) and \
state.get(block, "grasp") == -1
def _HandEmpty_holds(self, state: State,
objects: Sequence[Object]) -> bool:
assert not objects
for obj in state:
if obj.is_instance(self._block_type) and \
state.get(obj, "grasp") != -1:
return False
return True
@staticmethod
def _Holding_holds(state: State, objects: Sequence[Object]) -> bool:
block, = objects
return state.get(block, "grasp") != -1
@staticmethod
def _PickPlace_policy(state: State, memory: Dict,
objects: Sequence[Object], params: Array) -> Action:
del state, memory, objects # unused
return Action(params) # action is simply the parameter
def _any_intersection(self,
pose: float,
width: float,
data: Dict[Object, Array],
block_only: bool = False,
larger_gap: bool = False,
excluded_object: Optional[Object] = None) -> bool:
mult = 1.5 if larger_gap else 0.5
for other in data:
if block_only and other.type != self._block_type:
continue
if other == excluded_object:
continue
other_feats = data[other]
distance = abs(other_feats[3] - pose)
if distance <= (width + other_feats[2]) * mult:
return True
return False
class CoverEnvTypedOptions(CoverEnv):
"""Toy cover domain with options that have object arguments.
This means we need two options (one for block, one for target).
"""
def __init__(self) -> None:
super().__init__()
del self._PickPlace
self._Pick: ParameterizedOption = utils.SingletonParameterizedOption(
"Pick",
self._Pick_policy,
types=[self._block_type],
params_space=Box(-0.1, 0.1, (1, )))
self._Place: ParameterizedOption = utils.SingletonParameterizedOption(
"Place",
self._PickPlace_policy, # use the parent class's policy
types=[self._target_type],
params_space=Box(0, 1, (1, )))
@classmethod
def get_name(cls) -> str:
return "cover_typed_options"
@property
def options(self) -> Set[ParameterizedOption]:
return {self._Pick, self._Place}
def _Pick_policy(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> Action:
del m # unused
_ = self # unused
# The pick parameter is a RELATIVE position, so we need to
# add the pose of the object.
pick_pose = s.get(o[0], "pose") + p[0]
pick_pose = min(max(pick_pose, 0.0), 1.0)
return Action(np.array([pick_pose], dtype=np.float32))
class CoverEnvHierarchicalTypes(CoverEnv):
"""Toy cover domain with hierarchical types, just for testing."""
def __init__(self) -> None:
super().__init__()
# Change blocks to be of a derived type
self._parent_block_type = self._block_type
self._block_type = Type(
"block_derived",
["is_block", "is_target", "width", "pose", "grasp"],
parent=self._parent_block_type)
@classmethod
def get_name(cls) -> str:
return "cover_hierarchical_types"
@property
def types(self) -> Set[Type]:
return {
self._block_type, self._parent_block_type, self._target_type,
self._robot_type
}
class CoverEnvRegrasp(CoverEnv):
"""A cover environment that is not always downward refinable, because the
grasp on the initially held object sometimes requires placing and
regrasping.
This environment also has two different oracle NSRTs for placing, one for
placing a target and one for placing on the table.
This environment also has a Clear predicate, to prevent placing on already
covered targets.
Finally, to allow placing on the table, we need to change the allowed
hand regions. We implement it so that there is a relatively small hand
region centered at each target, but then everywhere else is allowed.
"""
_allow_free_space_placing: ClassVar[bool] = True
_initial_pick_offsets: ClassVar[List[float]] = [-0.95, 0.0, 0.95]
def __init__(self) -> None:
super().__init__()
# Add a Clear predicate to prevent attempts at placing on already
# covered targets.
self._Clear = Predicate("Clear", [self._target_type],
self._Clear_holds)
@classmethod
def get_name(cls) -> str:
return "cover_regrasp"
@property
def predicates(self) -> Set[Predicate]:
return super().predicates | {self._Clear}
def _get_hand_regions(self, state: State) -> List[Tuple[float, float]]:
hand_regions = []
# Construct the allowed hand regions from left to right.
left_bound = 0.0
targets = state.get_objects(self._target_type)
for targ in sorted(targets, key=lambda t: state.get(t, "pose")):
w = state.get(targ, "width")
targ_left = state.get(targ, "pose") - w / 2
targ_right = state.get(targ, "pose") + w / 2
hand_regions.append((left_bound, targ_left - w))
hand_regions.append((targ_left + w / 3, targ_right - w / 3))
left_bound = targ_right + w
hand_regions.append((left_bound, 1.0))
return hand_regions
def _Clear_holds(self, state: State, objects: Sequence[Object]) -> bool:
assert len(objects) == 1
target = objects[0]
for b in state:
if b.type != self._block_type:
continue
if self._Covers_holds(state, [b, target]):
return False
return True
class CoverMultistepOptions(CoverEnvTypedOptions):
"""Cover domain with a lower level action space. Useful for using and
learning multistep options.
The action space is (dx, dy, dgrip). The last dimension
controls the gripper "magnet" or "vacuum". The state space is updated to
track x, y, grip.
The robot can move anywhere as long as it, and the block it may be holding,
does not collide with another block. Picking up a block is allowed when the
robot gripper is empty, when the robot is in the allowable hand region, and
when the robot is sufficiently close to the block in the y-direction.
Placing is allowed anywhere. Collisions are handled in simulate().
"""
grasp_thresh: ClassVar[float] = 0.0
initial_block_y: ClassVar[float] = 0.1
block_height: ClassVar[float] = 0.1
target_height: ClassVar[float] = 0.1 # Only for rendering purposes.
initial_robot_y: ClassVar[float] = 0.4
grip_lb: ClassVar[float] = -1.0
grip_ub: ClassVar[float] = 1.0
snap_tol: ClassVar[float] = 1e-2
def __init__(self) -> None:
super().__init__()
# Need to now include y and gripper info in state.
# Removing "pose" because that's ambiguous.
# Also adding height to blocks.
# The y position corresponds to the top of the block.
# The x position corresponds to the center of the block.
self._block_type = Type(
"block",
["is_block", "is_target", "width", "x", "grasp", "y", "height"])
# Targets don't need y because they're constant.
self._target_type = Type("target",
["is_block", "is_target", "width", "x"])
# Also removing "hand" because that's ambiguous.
self._robot_type = Type("robot", ["x", "y", "grip", "holding"])
self._block_hand_region_type = Type("block_hand_region",
["lb", "ub", "block_idx"])
self._target_hand_region_type = Type("target_hand_region",
["lb", "ub"])
# Need to override predicate creation because the types are
# now different (in terms of equality).
self._IsBlock = Predicate("IsBlock", [self._block_type],
self._IsBlock_holds)
self._IsTarget = Predicate("IsTarget", [self._target_type],
self._IsTarget_holds)
self._Covers = Predicate("Covers",
[self._block_type, self._target_type],
self._Covers_holds)
self._HandEmpty = Predicate("HandEmpty", [], self._HandEmpty_holds)
self._Holding = Predicate("Holding",
[self._block_type, self._robot_type],
self._Holding_holds)
# Need to override static object creation because the types are now
# different (in terms of equality).
self._robot = Object("robby", self._robot_type)
# Override the original options to make them multi-step. Note that
# the parameter spaces are designed to match what would be learned
# by the neural option learners.
self._Pick = ParameterizedOption(
"Pick",
types=[self._block_type, self._robot_type],
params_space=Box(-np.inf, np.inf, (5, )),
policy=self._Pick_policy,
initiable=self._Pick_initiable,
terminal=self._Pick_terminal)
self._Place = ParameterizedOption(
"Place",
types=[self._block_type, self._robot_type, self._target_type],
params_space=Box(-np.inf, np.inf, (5, )),
policy=self._Place_policy,
initiable=self._Place_initiable,
terminal=self._Place_terminal)
@classmethod
def get_name(cls) -> str:
return "cover_multistep_options"
@property
def options(self) -> Set[ParameterizedOption]:
return {self._Pick, self._Place}
@property
def action_space(self) -> Box:
# This is the main difference with respect to the parent
# env. The action space now is (dx, dy, dgrip). The
# last dimension controls the gripper "magnet" or "vacuum".
# Note that the bounds are relatively low, which necessitates
# multi-step options. The action limits are for dx, dy only;
# dgrip is constrained separately based on the grip limits.
lb, ub = CFG.cover_multistep_action_limits
return Box(np.array([lb, lb, self.grip_lb], dtype=np.float32),
np.array([ub, ub, self.grip_ub], dtype=np.float32))
def simulate(self, state: State, action: Action) -> State:
# Since the action space is lower level, we need to write
# a lower level simulate function.
assert self.action_space.contains(action.arr)
dx, dy, dgrip = action.arr
next_state = state.copy()
x = state.get(self._robot, "x")
y = state.get(self._robot, "y")
grip = state.get(self._robot, "grip")
blocks = state.get_objects(self._block_type)
# Detect if a block is held, and if so, record that block's features.
held_block = None
for block in blocks:
if state.get(block, "grasp") != -1:
assert held_block is None
held_block = block
hx, hy = state.get(held_block, "x"), \
state.get(held_block, "y")
hw, hh = state.get(held_block, "width"), \
state.get(held_block, "height")
# Note: the block (x, y) is the middle-top of the block. The
# Rectangle expects the lower left corner as (x, y).
held_rect = utils.Rectangle(x=(hx - hw / 2),
y=(hy - hh),
width=hw,
height=hh,
theta=0)
next_held_rect = utils.Rectangle(x=(held_rect.x + dx),
y=(held_rect.y + dy),
width=held_rect.width,
height=held_rect.height,
theta=held_rect.theta)
# Compute line segments corresponding to the movement of each
# of the held object vertices.
held_move_segs = [
utils.LineSegment(x1, y1, x2, y2) for (x1, y1), (
x2,
y2) in zip(held_rect.vertices, next_held_rect.vertices)
]
# Prevent the robot from going below the top of the blocks.
y_min_robot = self.block_height
if y + dy < y_min_robot:
dy = y_min_robot - y
# Prevent the robot from going above the initial robot position.
if y + dy > self.initial_robot_y:
dy = self.initial_robot_y - y
# If the robot is holding a block that is close to the floor, and if
# the robot is moving down to place, snap the robot so that the block
# is exactly on the floor.
at_place_height = False
if held_block is not None and dy < 0:
held_block_bottom = hy - hh
# Always snap if below the floor.
y_floor = 0
if held_block_bottom + dy < y_floor or \
abs(held_block_bottom + dy - y_floor) < self.snap_tol:
dy = y_floor - held_block_bottom
at_place_height = True
# If the robot is not holding anything and is moving down, and if
# the robot is close enough to the top of the block, snap it so that
# the robot is exactly on top of the block.
block_to_be_picked = None
if held_block is None and dy < 0:
for block in blocks:
bx_lb = state.get(block, "x") - state.get(block, "width") / 2
bx_ub = state.get(block, "x") + state.get(block, "width") / 2
if bx_lb <= x <= bx_ub:
above_block_top = state.get(block, "y")
if abs(y + dy - above_block_top) < self.snap_tol:
block_to_be_picked = block
dy = above_block_top - y
break
# Ensure that blocks do not collide with other blocks.
if held_block is not None:
for block in blocks:
if block == held_block:
continue
bx, by = state.get(block, "x"), state.get(block, "y")
bw, bh = state.get(block, "width"), state.get(block, "height")
rect = utils.Rectangle(x=(bx - bw / 2),
y=(by - bh),
width=bw,
height=bh,
theta=0)
# Check the line segments corresponding to the movement of each
# of the held object vertices.
if any(seg.intersects(rect) for seg in held_move_segs):
return state.copy()
# Check for overlap between the held object and this block.
if rect.intersects(next_held_rect):
return state.copy()
# Update the robot state.
x += dx
y += dy
# Set desired grip directly and clip it.
grip = np.clip(dgrip, self.grip_lb, self.grip_ub)
next_state.set(self._robot, "x", x)
next_state.set(self._robot, "y", y)
next_state.set(self._robot, "grip", grip)
if held_block is not None:
hx = hx + dx
hy = hy + dy
next_state.set(held_block, "x", hx)
next_state.set(held_block, "y", hy)
# If we're not holding anything and we're close enough to a block, grasp
# it if the gripper is on and we are in the allowed grasping region.
# Note: unlike parent env, we also need to check the grip.
if block_to_be_picked is not None and grip > self.grasp_thresh and \
any(hand_lb <= x <= hand_rb
for hand_lb, hand_rb in self._get_hand_regions_block(state)):
by = state.get(block_to_be_picked, "y")
assert abs(y - by) < 1e-7 # due to snapping
next_state.set(block_to_be_picked, "grasp", 1)
next_state.set(self._robot, "holding", 1)
# If we are holding something and we're not above a block, place it if
# the gripper is off and we are low enough. Placing anywhere is allowed
# but if we are over a target, we must be in its hand region.
# Note: unlike parent env, we also need to check the grip.
if held_block is not None and block_to_be_picked is None and \
grip < self.grasp_thresh and at_place_height:
# Tentatively set the next state and check whether the placement
# would cover some target.
next_state.set(held_block, "y", self.initial_block_y)
next_state.set(held_block, "grasp", -1)
next_state.set(self._robot, "holding", -1)
targets = state.get_objects(self._target_type)
place_would_cover = any(
self._Covers_holds(next_state, [held_block, targ])
for targ in targets)
# If the place would cover, but we were outside of an allowed
# hand region, then disallow the place. Otherwise, keep the new
# next state (place succeeded).
if place_would_cover and not any(hand_lb <= x <= hand_rb for \
hand_lb, hand_rb in self._get_hand_regions_target(state)):
return state.copy()
return next_state
def render_state_plt(
self,
state: State,
task: Task,
action: Optional[Action] = None,
caption: Optional[str] = None) -> matplotlib.figure.Figure:
# Need to override rendering to account for new state features.
fig, ax = plt.subplots(1, 1)
# Draw main line
plt.plot([-0.2, 1.2], [-0.001, -0.001], color="black", linewidth=0.4)
# Draw hand regions
block_hand_regions = self._get_hand_regions_block(state)
target_hand_regions = self._get_hand_regions_target(state)
hand_regions = block_hand_regions + target_hand_regions
for i, (hand_lb, hand_rb) in enumerate(hand_regions):
if i == 0:
label = "Allowed hand region"
else:
label = None
plt.plot([hand_lb, hand_rb], [-0.08, -0.08],
color="red",
alpha=0.5,
lw=4.,
label=label)
# Draw hand
plt.scatter(state.get(self._robot, "x"),
state.get(self._robot, "y"),
color="r",
s=100,
alpha=1.,
zorder=10,
label="Hand")
plt.plot([state.get(self._robot, "x"),
state.get(self._robot, "x")],
[1, state.get(self._robot, "y")],
color="r",
alpha=1.,
zorder=10,
label=None)
lw = 2
cs = ["blue", "purple", "green", "yellow"]
block_alpha = 0.75
targ_alpha = 0.25
# Draw blocks
for i, block in enumerate(state.get_objects(self._block_type)):
c = cs[i]
bx, by = state.get(block, "x"), state.get(block, "y")
bw = state.get(block, "width")
bh = state.get(block, "height")
if state.get(block, "grasp") != -1:
lcolor = "red"
suffix = " (grasped)"
else:
lcolor = "gray"
suffix = ""
rect = plt.Rectangle((bx - bw / 2., by - bh),
bw,
bh,
linewidth=lw,
edgecolor=lcolor,
facecolor=c,
alpha=block_alpha,
label=f"block{i}" + suffix)
ax.add_patch(rect)
# Draw targets
for i, targ in enumerate(state.get_objects(self._target_type)):
c = cs[i]
rect = plt.Rectangle(
(state.get(targ, "x") - state.get(targ, "width") / 2., 0.0),
state.get(targ, "width"),
self.target_height,
linewidth=0,
edgecolor=lcolor,
facecolor=c,
alpha=targ_alpha,
label=f"target{i}")
ax.add_patch(rect)
grip = state.get(self._robot, "grip")
plt.title(f"Grip: {grip:.3f}")
plt.xlim(-0.2, 1.2)
plt.ylim(-0.25, 1)
plt.legend()
if caption is not None:
plt.suptitle(caption, wrap=True)
plt.tight_layout()
return fig
def _create_initial_state(self, blocks: List[Object],
targets: List[Object],
rng: np.random.Generator) -> State:
"""Creates initial state by (1) placing targets and blocks in random
locations such that each target has enough space on either side to
ensure no covering placement will cause a collision (note that this is
not necessary to make the task solvable; but we can do this and instead
sufficiently tune the difficulty through hand region specification),
and (2) choosing hand region intervals on the targets and blocks such
that the problem is solvable."""
assert len(blocks) == CFG.cover_num_blocks
assert len(targets) == CFG.cover_num_targets
data: Dict[Object, Array] = {}
# Create hand regions for each block and target.
block_hand_region_objs = []
target_hand_region_objs = []
for i in range(CFG.cover_num_blocks):
block_hand_region_objs.append(
Object(f"block{i}_hand_region", self._block_hand_region_type))
for i in range(CFG.cover_num_targets):
target_hand_region_objs.append(
Object(f"target{i}_hand_region",
self._target_hand_region_type))
# Place targets and blocks
counter = 0
while True:
overlap = False
counter += 1
if counter > CFG.cover_multistep_max_tb_placements:
raise RuntimeError("Reached maximum number of " \
"placements of targets and blocks.")
block_placements = []
for block, bw in zip(blocks, CFG.cover_block_widths):
xb = rng.uniform(bw / 2, 1 - bw / 2)
left_pt = xb - bw / 2
right_pt = xb + bw / 2
block_placements.append((left_pt, xb, right_pt))
target_placements = []
for target, tw, bw in zip(targets, CFG.cover_target_widths,
CFG.cover_block_widths):
xt = rng.uniform(bw - tw / 2, 1 - bw + tw / 2)
left_pt = xt + tw / 2 - bw
right_pt = xt - tw / 2 + bw
target_placements.append((left_pt, xt, right_pt))
# check for overlap
all_placements = target_placements + block_placements
all_placements.sort(key=lambda x: x[0])
for i in range(1, len(all_placements)):
curr = all_placements[i]
prev = all_placements[i - 1]
if curr[0] < prev[2]:
overlap = True
if not overlap:
break
# Make the targets, blocks, and robot objects
for target, width, placement in zip(targets, CFG.cover_target_widths,
target_placements):
_, x, _ = placement
# [is_block, is_target, width, x]
data[target] = np.array([0.0, 1.0, width, x])
for block, width, placement in zip(blocks, CFG.cover_block_widths,
block_placements):
_, x, _ = placement
# [is_block, is_target, width, x, grasp, y, height]
data[block] = np.array([
1.0, 0.0, width, x, -1.0, self.initial_block_y,
self.block_height
])
# [x, y, grip, holding]
data[self._robot] = np.array([0.0, self.initial_robot_y, -1.0, -1.0])
# Make the hand regions
# Sample a hand region interval in each target
target_hand_regions = []
for i, target in enumerate(targets):
target_hr = target_hand_region_objs[i]
tw = CFG.cover_target_widths[i]
_, x, _ = target_placements[i]
region_length = tw * CFG.cover_multistep_thr_percent
if CFG.cover_multistep_bimodal_goal:
if rng.uniform(0, 1) < 0.5:
left_pt = x - tw / 2
region = [left_pt, left_pt + region_length]
else:
right_pt = x + tw / 2
region = [right_pt - region_length, right_pt]
else:
left_pt = rng.uniform(x - tw / 2, x + tw / 2 - region_length)
region = [left_pt, left_pt + region_length]
data[target_hr] = np.array(region)
target_hand_regions.append(region)
# Sample a hand region interval in each block
for i, block in enumerate(blocks):
block_hr = block_hand_region_objs[i]
thr_left, thr_right = target_hand_regions[i]
bw = CFG.cover_block_widths[i]
tw = CFG.cover_target_widths[i]
_, bx, _ = block_placements[i]
_, tx, _ = target_placements[i]
region_length = bw * CFG.cover_multistep_bhr_percent
# The hand region we assign must not make it impossible to
# cover the block's target.
# To check this, we perform the following operation:
# "Place" the block in the leftmost position that still covers
# the target. Move the block to the right until it reaches the
# rightmost position that still covers the target. During this,
# check that there is nonzero overlap between the interval
# spanned by the moving block's hand region, and the target's
# hand region. In other words, make sure that there is at least
# one placement of the block which covers the target, and in
# which the block's hand region and the target's hand region
# have nonzero overlap.
# A proxy for this operation, which we do below, is to check:
# (1) That in the block's rightmost covering placement, its
# interval IS NOT completely to the left of the target's
# hand region, and
# (2) That in the block's leftmost covering placement, its
# interval IS NOT completely to the right of the target's
# hand region.
counter = 0
while True:
counter += 1
if counter > CFG.cover_multistep_max_hr_placements:
raise RuntimeError("Reached maximum number of " \
"placements of hand regions.")
# Sample hand region
left_pt = rng.uniform(bx - bw / 2, bx + bw / 2 - region_length)
region = [left_pt, left_pt + region_length]
# Need to make hand region relative to center of block for
# the hand region to move with the block for use by
# _get_hand_regions()
relative_region = [region[0] - bx, region[1] - bx]
# Perform the valid interval check
relative_r = region[1] - (bx - bw / 2) # for (1)
relative_l = bx + bw / 2 - region[0] # for (2)
if relative_l >= (tx + tw/2 - thr_right) and \
relative_r >= (thr_left-(tx - tw/2)):
break
# Store the block index (i) in the hand region object.
data[block_hr] = np.array(relative_region + [i])
return State(data)
def _get_hand_regions_block(self, state: State) \
-> List[Tuple[float, float]]:
blocks = state.get_objects(self._block_type)
block_hand_regions = state.get_objects(self._block_hand_region_type)
hand_regions = []
for block_hr in block_hand_regions:
block_idx_flt = state.get(block_hr, "block_idx")
assert block_idx_flt.is_integer()
block_idx = int(block_idx_flt)
block = blocks[block_idx]
hand_regions.append(
(state.get(block, "x") + state.get(block_hr, "lb"),
state.get(block, "x") + state.get(block_hr, "ub")))
return hand_regions
def _get_hand_regions_target(self, state: State) \
-> List[Tuple[float, float]]:
hand_regions = []
target_hand_regions = state.get_objects(self._target_hand_region_type)
for target_hr in target_hand_regions:
hand_regions.append((state.get(target_hr,
"lb"), state.get(target_hr, "ub")))
return hand_regions
def _Pick_initiable(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> bool:
# Convert the relative parameters into absolute parameters.
m["params"] = p
# Get the non-static object features.
block, robot = o
vec = [
s.get(block, "grasp"),
s.get(robot, "x"),
s.get(robot, "y"),
s.get(robot, "grip"),
s.get(robot, "holding"),
]
m["absolute_params"] = vec + p
return self._HandEmpty_holds(s, [])
def _Pick_policy(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> Action:
assert np.allclose(p, m["params"])
del p
absolute_params = m["absolute_params"]
# The object is the one we want to pick.
assert len(o) == 2
obj = o[0]
assert obj.type == self._block_type
x = s.get(self._robot, "x")
y = s.get(self._robot, "y")
by = s.get(obj, "y")
desired_x = absolute_params[1]
desired_y = by + 1e-3
at_desired_x = abs(desired_x - x) < 1e-5
lb, ub = CFG.cover_multistep_action_limits
# If we're above the object, move down and turn on the gripper.
if at_desired_x:
delta_y = np.clip(desired_y - y, lb, ub)
return Action(np.array([0., delta_y, 1.0], dtype=np.float32))
# If we're not above the object, but we're at a safe height,
# then move left/right.
if y >= self.initial_robot_y:
delta_x = np.clip(desired_x - x, lb, ub)
return Action(np.array([delta_x, 0., 1.0], dtype=np.float32))
# If we're not above the object, and we're not at a safe height,
# then move up.
delta_y = np.clip(self.initial_robot_y + 1e-2 - y, lb, ub)
return Action(np.array([0., delta_y, 1.0], dtype=np.float32))
def _Pick_terminal(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> bool:
assert np.allclose(p, m["params"])
# Pick is done when we're holding the desired object.
return self._Holding_holds(s, o)
def _Place_initiable(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> bool:
block, robot, _ = o
assert block.is_instance(self._block_type)
assert robot.is_instance(self._robot_type)
# Convert the relative parameters into absolute parameters.
m["params"] = p
# Only the block and robot are changing. Get the non-static features.
vec = [
s.get(block, "x"),
s.get(block, "grasp"),
s.get(robot, "x"),
s.get(robot, "grip"),
s.get(robot, "holding"),
]
m["absolute_params"] = vec + p
# Place is initiable if we're holding the object.
return self._Holding_holds(s, [block, robot])
def _Place_policy(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> Action:
assert np.allclose(p, m["params"])
del p
absolute_params = m["absolute_params"]
# The object is the one we want to place at.
assert len(o) == 3
obj = o[0]
assert obj.type == self._block_type
x = s.get(self._robot, "x")
y = s.get(self._robot, "y")
bh = s.get(obj, "height")
desired_x = absolute_params[2]
desired_y = bh + 1e-3
at_desired_x = abs(desired_x - x) < 1e-5
lb, ub = CFG.cover_multistep_action_limits
# If we're already above the object, move down and turn off the magnet.
if at_desired_x:
delta_y = np.clip(desired_y - y, lb, ub)
return Action(np.array([0., delta_y, -1.0], dtype=np.float32))
# If we're not above the object, but we're at a safe height,
# then move left/right.
if y >= self.initial_robot_y:
delta_x = np.clip(desired_x - x, lb, ub)
return Action(np.array([delta_x, 0., 1.0], dtype=np.float32))
# If we're not above the object, and we're not at a safe height,
# then move up.
delta_y = np.clip(self.initial_robot_y + 1e-2 - y, lb, ub)
return Action(np.array([0., delta_y, 1.0], dtype=np.float32))
def _Place_terminal(self, s: State, m: Dict, o: Sequence[Object],
p: Array) -> bool:
del o # unused
assert np.allclose(p, m["params"])
# Place is done when the hand is empty.
return self._HandEmpty_holds(s, [])
@staticmethod
def _Holding_holds(state: State, objects: Sequence[Object]) -> bool:
block, robot = objects
return state.get(block, "grasp") != -1 and \
state.get(robot, "holding") != -1
@staticmethod
def _Covers_holds(state: State, objects: Sequence[Object]) -> bool:
# Overriding because of the change from "pose" to "x" and because
# block's x-position is updated in every step of simulate and not just
# at the end of a place() operation so we cannot allow the predicate to
# hold when the block is in the air.
block, target = objects
block_pose = state.get(block, "x")
block_width = state.get(block, "width")
target_pose = state.get(target, "x")
target_width = state.get(target, "width")
by, bh = state.get(block, "y"), state.get(block, "height")
return (block_pose-block_width/2 <= target_pose-target_width/2) and \
(block_pose+block_width/2 >= target_pose+target_width/2) and \
(by - bh == 0)
| [
"matplotlib.pyplot.title",
"predicators.src.structs.Predicate",
"matplotlib.pyplot.suptitle",
"numpy.allclose",
"numpy.clip",
"predicators.src.structs.Object",
"matplotlib.pyplot.tight_layout",
"predicators.src.structs.Action",
"matplotlib.pyplot.yticks",
"predicators.src.structs.State",
"matplo... | [((924, 990), 'predicators.src.structs.Type', 'Type', (['"""block"""', "['is_block', 'is_target', 'width', 'pose', 'grasp']"], {}), "('block', ['is_block', 'is_target', 'width', 'pose', 'grasp'])\n", (928, 990), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1032, 1090), 'predicators.src.structs.Type', 'Type', (['"""target"""', "['is_block', 'is_target', 'width', 'pose']"], {}), "('target', ['is_block', 'is_target', 'width', 'pose'])\n", (1036, 1090), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1151, 1194), 'predicators.src.structs.Type', 'Type', (['"""robot"""', "['hand', 'pose_x', 'pose_z']"], {}), "('robot', ['hand', 'pose_x', 'pose_z'])\n", (1155, 1194), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1240, 1301), 'predicators.src.structs.Predicate', 'Predicate', (['"""IsBlock"""', '[self._block_type]', 'self._IsBlock_holds'], {}), "('IsBlock', [self._block_type], self._IsBlock_holds)\n", (1249, 1301), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1361, 1425), 'predicators.src.structs.Predicate', 'Predicate', (['"""IsTarget"""', '[self._target_type]', 'self._IsTarget_holds'], {}), "('IsTarget', [self._target_type], self._IsTarget_holds)\n", (1370, 1425), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1484, 1562), 'predicators.src.structs.Predicate', 'Predicate', (['"""Covers"""', '[self._block_type, self._target_type]', 'self._Covers_holds'], {}), "('Covers', [self._block_type, self._target_type], self._Covers_holds)\n", (1493, 1562), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1655, 1704), 'predicators.src.structs.Predicate', 'Predicate', (['"""HandEmpty"""', '[]', 'self._HandEmpty_holds'], {}), "('HandEmpty', [], self._HandEmpty_holds)\n", (1664, 1704), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((1729, 1790), 'predicators.src.structs.Predicate', 'Predicate', (['"""Holding"""', '[self._block_type]', 'self._Holding_holds'], {}), "('Holding', [self._block_type], self._Holding_holds)\n", (1738, 1790), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((2126, 2159), 'predicators.src.structs.Object', 'Object', (['"""robby"""', 'self._robot_type'], {}), "('robby', self._robot_type)\n", (2132, 2159), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((5707, 5722), 'gym.spaces.Box', 'Box', (['(0)', '(1)', '(1,)'], {}), '(0, 1, (1,))\n', (5710, 5722), False, 'from gym.spaces import Box\n'), ((5984, 6002), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (5996, 6002), True, 'import matplotlib.pyplot as plt\n'), ((6036, 6090), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.2, 1.2]', '[-0.055, -0.055]'], {'color': '"""black"""'}), "([-0.2, 1.2], [-0.055, -0.055], color='black')\n", (6044, 6090), True, 'import matplotlib.pyplot as plt\n'), ((8288, 8307), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.2)', '(1.2)'], {}), '(-0.2, 1.2)\n', (8296, 8307), True, 'import matplotlib.pyplot as plt\n'), ((8316, 8336), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.25)', '(0.5)'], {}), '(-0.25, 0.5)\n', (8324, 8336), True, 'import matplotlib.pyplot as plt\n'), ((8345, 8359), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (8355, 8359), True, 'import matplotlib.pyplot as plt\n'), ((8368, 8380), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8378, 8380), True, 'import matplotlib.pyplot as plt\n'), ((8466, 8484), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8482, 8484), True, 'import matplotlib.pyplot as plt\n'), ((11704, 11757), 'numpy.array', 'np.array', (['[0.5, self._workspace_x, self._workspace_z]'], {}), '([0.5, self._workspace_x, self._workspace_z])\n', (11712, 11757), True, 'import numpy as np\n'), ((11787, 11798), 'predicators.src.structs.State', 'State', (['data'], {}), '(data)\n', (11792, 11798), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((13993, 14007), 'predicators.src.structs.Action', 'Action', (['params'], {}), '(params)\n', (13999, 14007), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((16462, 16572), 'predicators.src.structs.Type', 'Type', (['"""block_derived"""', "['is_block', 'is_target', 'width', 'pose', 'grasp']"], {'parent': 'self._parent_block_type'}), "('block_derived', ['is_block', 'is_target', 'width', 'pose', 'grasp'],\n parent=self._parent_block_type)\n", (16466, 16572), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((17846, 17904), 'predicators.src.structs.Predicate', 'Predicate', (['"""Clear"""', '[self._target_type]', 'self._Clear_holds'], {}), "('Clear', [self._target_type], self._Clear_holds)\n", (17855, 17904), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((20609, 20687), 'predicators.src.structs.Type', 'Type', (['"""block"""', "['is_block', 'is_target', 'width', 'x', 'grasp', 'y', 'height']"], {}), "('block', ['is_block', 'is_target', 'width', 'x', 'grasp', 'y', 'height'])\n", (20613, 20687), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((20798, 20853), 'predicators.src.structs.Type', 'Type', (['"""target"""', "['is_block', 'is_target', 'width', 'x']"], {}), "('target', ['is_block', 'is_target', 'width', 'x'])\n", (20802, 20853), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((20971, 21015), 'predicators.src.structs.Type', 'Type', (['"""robot"""', "['x', 'y', 'grip', 'holding']"], {}), "('robot', ['x', 'y', 'grip', 'holding'])\n", (20975, 21015), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21055, 21107), 'predicators.src.structs.Type', 'Type', (['"""block_hand_region"""', "['lb', 'ub', 'block_idx']"], {}), "('block_hand_region', ['lb', 'ub', 'block_idx'])\n", (21059, 21107), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21192, 21232), 'predicators.src.structs.Type', 'Type', (['"""target_hand_region"""', "['lb', 'ub']"], {}), "('target_hand_region', ['lb', 'ub'])\n", (21196, 21232), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21419, 21480), 'predicators.src.structs.Predicate', 'Predicate', (['"""IsBlock"""', '[self._block_type]', 'self._IsBlock_holds'], {}), "('IsBlock', [self._block_type], self._IsBlock_holds)\n", (21428, 21480), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21540, 21604), 'predicators.src.structs.Predicate', 'Predicate', (['"""IsTarget"""', '[self._target_type]', 'self._IsTarget_holds'], {}), "('IsTarget', [self._target_type], self._IsTarget_holds)\n", (21549, 21604), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21663, 21741), 'predicators.src.structs.Predicate', 'Predicate', (['"""Covers"""', '[self._block_type, self._target_type]', 'self._Covers_holds'], {}), "('Covers', [self._block_type, self._target_type], self._Covers_holds)\n", (21672, 21741), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21834, 21883), 'predicators.src.structs.Predicate', 'Predicate', (['"""HandEmpty"""', '[]', 'self._HandEmpty_holds'], {}), "('HandEmpty', [], self._HandEmpty_holds)\n", (21843, 21883), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((21908, 21987), 'predicators.src.structs.Predicate', 'Predicate', (['"""Holding"""', '[self._block_type, self._robot_type]', 'self._Holding_holds'], {}), "('Holding', [self._block_type, self._robot_type], self._Holding_holds)\n", (21917, 21987), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((22198, 22231), 'predicators.src.structs.Object', 'Object', (['"""robby"""', 'self._robot_type'], {}), "('robby', self._robot_type)\n", (22204, 22231), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((28811, 28853), 'numpy.clip', 'np.clip', (['dgrip', 'self.grip_lb', 'self.grip_ub'], {}), '(dgrip, self.grip_lb, self.grip_ub)\n', (28818, 28853), True, 'import numpy as np\n'), ((31439, 31457), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (31451, 31457), True, 'import matplotlib.pyplot as plt\n'), ((31491, 31560), 'matplotlib.pyplot.plot', 'plt.plot', (['[-0.2, 1.2]', '[-0.001, -0.001]'], {'color': '"""black"""', 'linewidth': '(0.4)'}), "([-0.2, 1.2], [-0.001, -0.001], color='black', linewidth=0.4)\n", (31499, 31560), True, 'import matplotlib.pyplot as plt\n'), ((34235, 34265), 'matplotlib.pyplot.title', 'plt.title', (['f"""Grip: {grip:.3f}"""'], {}), "(f'Grip: {grip:.3f}')\n", (34244, 34265), True, 'import matplotlib.pyplot as plt\n'), ((34274, 34293), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-0.2)', '(1.2)'], {}), '(-0.2, 1.2)\n', (34282, 34293), True, 'import matplotlib.pyplot as plt\n'), ((34302, 34320), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.25)', '(1)'], {}), '(-0.25, 1)\n', (34310, 34320), True, 'import matplotlib.pyplot as plt\n'), ((34329, 34341), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (34339, 34341), True, 'import matplotlib.pyplot as plt\n'), ((34427, 34445), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34443, 34445), True, 'import matplotlib.pyplot as plt\n'), ((37968, 38017), 'numpy.array', 'np.array', (['[0.0, self.initial_robot_y, -1.0, -1.0]'], {}), '([0.0, self.initial_robot_y, -1.0, -1.0])\n', (37976, 38017), True, 'import numpy as np\n'), ((41791, 41802), 'predicators.src.structs.State', 'State', (['data'], {}), '(data)\n', (41796, 41802), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((43567, 43594), 'numpy.allclose', 'np.allclose', (['p', "m['params']"], {}), "(p, m['params'])\n", (43578, 43594), True, 'import numpy as np\n'), ((44671, 44719), 'numpy.clip', 'np.clip', (['(self.initial_robot_y + 0.01 - y)', 'lb', 'ub'], {}), '(self.initial_robot_y + 0.01 - y, lb, ub)\n', (44678, 44719), True, 'import numpy as np\n'), ((44917, 44944), 'numpy.allclose', 'np.allclose', (['p', "m['params']"], {}), "(p, m['params'])\n", (44928, 44944), True, 'import numpy as np\n'), ((45936, 45963), 'numpy.allclose', 'np.allclose', (['p', "m['params']"], {}), "(p, m['params'])\n", (45947, 45963), True, 'import numpy as np\n'), ((47059, 47107), 'numpy.clip', 'np.clip', (['(self.initial_robot_y + 0.01 - y)', 'lb', 'ub'], {}), '(self.initial_robot_y + 0.01 - y, lb, ub)\n', (47066, 47107), True, 'import numpy as np\n'), ((47331, 47358), 'numpy.allclose', 'np.allclose', (['p', "m['params']"], {}), "(p, m['params'])\n", (47342, 47358), True, 'import numpy as np\n'), ((6362, 6455), 'matplotlib.pyplot.plot', 'plt.plot', (['[hand_lb, hand_rb]', '[-0.08, -0.08]'], {'color': '"""red"""', 'alpha': '(0.5)', 'lw': '(8.0)', 'label': 'label'}), "([hand_lb, hand_rb], [-0.08, -0.08], color='red', alpha=0.5, lw=8.0,\n label=label)\n", (6370, 6455), True, 'import matplotlib.pyplot as plt\n'), ((8425, 8457), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['caption'], {'wrap': '(True)'}), '(caption, wrap=True)\n', (8437, 8457), True, 'import matplotlib.pyplot as plt\n'), ((9729, 9778), 'predicators.src.structs.GroundAtom', 'GroundAtom', (['self._Covers', '[blocks[0], targets[0]]'], {}), '(self._Covers, [blocks[0], targets[0]])\n', (9739, 9778), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((11067, 11106), 'numpy.array', 'np.array', (['[1.0, 0.0, width, pose, -1.0]'], {}), '([1.0, 0.0, width, pose, -1.0])\n', (11075, 11106), True, 'import numpy as np\n'), ((11532, 11565), 'numpy.array', 'np.array', (['[0.0, 1.0, width, pose]'], {}), '([0.0, 1.0, width, pose])\n', (11540, 11565), True, 'import numpy as np\n'), ((16120, 16159), 'numpy.array', 'np.array', (['[pick_pose]'], {'dtype': 'np.float32'}), '([pick_pose], dtype=np.float32)\n', (16128, 16159), True, 'import numpy as np\n'), ((23755, 23805), 'numpy.array', 'np.array', (['[lb, lb, self.grip_lb]'], {'dtype': 'np.float32'}), '([lb, lb, self.grip_lb], dtype=np.float32)\n', (23763, 23805), True, 'import numpy as np\n'), ((23826, 23876), 'numpy.array', 'np.array', (['[ub, ub, self.grip_ub]'], {'dtype': 'np.float32'}), '([ub, ub, self.grip_ub], dtype=np.float32)\n', (23834, 23876), True, 'import numpy as np\n'), ((31975, 32068), 'matplotlib.pyplot.plot', 'plt.plot', (['[hand_lb, hand_rb]', '[-0.08, -0.08]'], {'color': '"""red"""', 'alpha': '(0.5)', 'lw': '(4.0)', 'label': 'label'}), "([hand_lb, hand_rb], [-0.08, -0.08], color='red', alpha=0.5, lw=4.0,\n label=label)\n", (31983, 32068), True, 'import matplotlib.pyplot as plt\n'), ((33281, 33425), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(bx - bw / 2.0, by - bh)', 'bw', 'bh'], {'linewidth': 'lw', 'edgecolor': 'lcolor', 'facecolor': 'c', 'alpha': 'block_alpha', 'label': "(f'block{i}' + suffix)"}), "((bx - bw / 2.0, by - bh), bw, bh, linewidth=lw, edgecolor=\n lcolor, facecolor=c, alpha=block_alpha, label=f'block{i}' + suffix)\n", (33294, 33425), True, 'import matplotlib.pyplot as plt\n'), ((34386, 34418), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['caption'], {'wrap': '(True)'}), '(caption, wrap=True)\n', (34398, 34418), True, 'import matplotlib.pyplot as plt\n'), ((37494, 37524), 'numpy.array', 'np.array', (['[0.0, 1.0, width, x]'], {}), '([0.0, 1.0, width, x])\n', (37502, 37524), True, 'import numpy as np\n'), ((37784, 37861), 'numpy.array', 'np.array', (['[1.0, 0.0, width, x, -1.0, self.initial_block_y, self.block_height]'], {}), '([1.0, 0.0, width, x, -1.0, self.initial_block_y, self.block_height])\n', (37792, 37861), True, 'import numpy as np\n'), ((38901, 38917), 'numpy.array', 'np.array', (['region'], {}), '(region)\n', (38909, 38917), True, 'import numpy as np\n'), ((41743, 41774), 'numpy.array', 'np.array', (['(relative_region + [i])'], {}), '(relative_region + [i])\n', (41751, 41774), True, 'import numpy as np\n'), ((44185, 44215), 'numpy.clip', 'np.clip', (['(desired_y - y)', 'lb', 'ub'], {}), '(desired_y - y, lb, ub)\n', (44192, 44215), True, 'import numpy as np\n'), ((44451, 44481), 'numpy.clip', 'np.clip', (['(desired_x - x)', 'lb', 'ub'], {}), '(desired_x - x, lb, ub)\n', (44458, 44481), True, 'import numpy as np\n'), ((44742, 44789), 'numpy.array', 'np.array', (['[0.0, delta_y, 1.0]'], {'dtype': 'np.float32'}), '([0.0, delta_y, 1.0], dtype=np.float32)\n', (44750, 44789), True, 'import numpy as np\n'), ((46572, 46602), 'numpy.clip', 'np.clip', (['(desired_y - y)', 'lb', 'ub'], {}), '(desired_y - y, lb, ub)\n', (46579, 46602), True, 'import numpy as np\n'), ((46839, 46869), 'numpy.clip', 'np.clip', (['(desired_x - x)', 'lb', 'ub'], {}), '(desired_x - x, lb, ub)\n', (46846, 46869), True, 'import numpy as np\n'), ((47130, 47177), 'numpy.array', 'np.array', (['[0.0, delta_y, 1.0]'], {'dtype': 'np.float32'}), '([0.0, delta_y, 1.0], dtype=np.float32)\n', (47138, 47177), True, 'import numpy as np\n'), ((2022, 2037), 'gym.spaces.Box', 'Box', (['(0)', '(1)', '(1,)'], {}), '(0, 1, (1,))\n', (2025, 2037), False, 'from gym.spaces import Box\n'), ((9310, 9347), 'predicators.src.structs.Object', 'Object', (['f"""block{i}"""', 'self._block_type'], {}), "(f'block{i}', self._block_type)\n", (9316, 9347), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((9423, 9462), 'predicators.src.structs.Object', 'Object', (['f"""target{i}"""', 'self._target_type'], {}), "(f'target{i}', self._target_type)\n", (9429, 9462), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((9874, 9923), 'predicators.src.structs.GroundAtom', 'GroundAtom', (['self._Covers', '[blocks[1], targets[1]]'], {}), '(self._Covers, [blocks[1], targets[1]])\n', (9884, 9923), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((9995, 10044), 'predicators.src.structs.GroundAtom', 'GroundAtom', (['self._Covers', '[blocks[0], targets[0]]'], {}), '(self._Covers, [blocks[0], targets[0]])\n', (10005, 10044), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((10062, 10111), 'predicators.src.structs.GroundAtom', 'GroundAtom', (['self._Covers', '[blocks[1], targets[1]]'], {}), '(self._Covers, [blocks[1], targets[1]])\n', (10072, 10111), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((15269, 15289), 'gym.spaces.Box', 'Box', (['(-0.1)', '(0.1)', '(1,)'], {}), '(-0.1, 0.1, (1,))\n', (15272, 15289), False, 'from gym.spaces import Box\n'), ((15525, 15540), 'gym.spaces.Box', 'Box', (['(0)', '(1)', '(1,)'], {}), '(0, 1, (1,))\n', (15528, 15540), False, 'from gym.spaces import Box\n'), ((22566, 22592), 'gym.spaces.Box', 'Box', (['(-np.inf)', 'np.inf', '(5,)'], {}), '(-np.inf, np.inf, (5,))\n', (22569, 22592), False, 'from gym.spaces import Box\n'), ((22883, 22909), 'gym.spaces.Box', 'Box', (['(-np.inf)', 'np.inf', '(5,)'], {}), '(-np.inf, np.inf, (5,))\n', (22886, 22909), False, 'from gym.spaces import Box\n'), ((25011, 25082), 'predicators.src.utils.Rectangle', 'utils.Rectangle', ([], {'x': '(hx - hw / 2)', 'y': '(hy - hh)', 'width': 'hw', 'height': 'hh', 'theta': '(0)'}), '(x=hx - hw / 2, y=hy - hh, width=hw, height=hh, theta=0)\n', (25026, 25082), False, 'from predicators.src import utils\n'), ((25296, 25427), 'predicators.src.utils.Rectangle', 'utils.Rectangle', ([], {'x': '(held_rect.x + dx)', 'y': '(held_rect.y + dy)', 'width': 'held_rect.width', 'height': 'held_rect.height', 'theta': 'held_rect.theta'}), '(x=held_rect.x + dx, y=held_rect.y + dy, width=held_rect.\n width, height=held_rect.height, theta=held_rect.theta)\n', (25311, 25427), False, 'from predicators.src import utils\n'), ((28041, 28112), 'predicators.src.utils.Rectangle', 'utils.Rectangle', ([], {'x': '(bx - bw / 2)', 'y': '(by - bh)', 'width': 'bw', 'height': 'bh', 'theta': '(0)'}), '(x=bx - bw / 2, y=by - bh, width=bw, height=bh, theta=0)\n', (28056, 28112), False, 'from predicators.src import utils\n'), ((35530, 35591), 'predicators.src.structs.Object', 'Object', (['f"""block{i}_hand_region"""', 'self._block_hand_region_type'], {}), "(f'block{i}_hand_region', self._block_hand_region_type)\n", (35536, 35591), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((35700, 35763), 'predicators.src.structs.Object', 'Object', (['f"""target{i}_hand_region"""', 'self._target_hand_region_type'], {}), "(f'target{i}_hand_region', self._target_hand_region_type)\n", (35706, 35763), False, 'from predicators.src.structs import Action, Array, GroundAtom, Object, ParameterizedOption, Predicate, State, Task, Type\n'), ((44242, 44289), 'numpy.array', 'np.array', (['[0.0, delta_y, 1.0]'], {'dtype': 'np.float32'}), '([0.0, delta_y, 1.0], dtype=np.float32)\n', (44250, 44289), True, 'import numpy as np\n'), ((44508, 44555), 'numpy.array', 'np.array', (['[delta_x, 0.0, 1.0]'], {'dtype': 'np.float32'}), '([delta_x, 0.0, 1.0], dtype=np.float32)\n', (44516, 44555), True, 'import numpy as np\n'), ((46629, 46677), 'numpy.array', 'np.array', (['[0.0, delta_y, -1.0]'], {'dtype': 'np.float32'}), '([0.0, delta_y, -1.0], dtype=np.float32)\n', (46637, 46677), True, 'import numpy as np\n'), ((46896, 46943), 'numpy.array', 'np.array', (['[delta_x, 0.0, 1.0]'], {'dtype': 'np.float32'}), '([delta_x, 0.0, 1.0], dtype=np.float32)\n', (46904, 46943), True, 'import numpy as np\n'), ((25803, 25836), 'predicators.src.utils.LineSegment', 'utils.LineSegment', (['x1', 'y1', 'x2', 'y2'], {}), '(x1, y1, x2, y2)\n', (25820, 25836), False, 'from predicators.src import utils\n')] |
#!/usr/bin/env python
import numpy as np
from keras.models import load_model
n_samples = 50
model = load_model('generator.h5')
input_shape = list(model.input_shape[1:])
input_data = np.random.random([n_samples] + input_shape)
input_data[0][0] = 1
for i in range(99):
input_data[0][i + 1] = 0
for i in range(100):
input_data[1][i] = 0
for i in range(100):
input_data[2][i] = 1
output_data = model.predict(input_data)
import scipy.misc
for el in output_data:
print(el.shape)
scipy.misc.imshow(el.squeeze())
| [
"keras.models.load_model",
"numpy.random.random"
] | [((103, 129), 'keras.models.load_model', 'load_model', (['"""generator.h5"""'], {}), "('generator.h5')\n", (113, 129), False, 'from keras.models import load_model\n'), ((185, 228), 'numpy.random.random', 'np.random.random', (['([n_samples] + input_shape)'], {}), '([n_samples] + input_shape)\n', (201, 228), True, 'import numpy as np\n')] |
import numpy as np
def get_indices(N, n_batches, split_ratio):
"""Generates splits of indices from 0 to N-1 into uniformly distributed\
batches. Each batch is defined by 3 indices [i, j, k] where\
(j-i) = split_ratio*(k-j). The first batch starts with i = 0,\
the last one ends with k = N - 1.
Args:
N (int): total counts
n_batches (int): number of splits
split_ratio (float): split ratio, defines position of j in [i, j, k].
Returns:
generator for batch indices [i, j, k]
"""
inds = np.array([0, 0, 0])
for i in range(n_batches):
# todo: move forward batch
# calculate new indices
yield inds
def main():
for inds in get_indices(100, 5, 0.25):
print(inds)
# expected result:
# [0, 44, 55]
# [11, 55, 66]
# [22, 66, 77]
# [33, 77, 88]
# [44, 88, 99]
if __name__ == "__main__":
main() | [
"numpy.array"
] | [((559, 578), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (567, 578), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import sys
# local module
import video
from video import presets
class CamshiftTracker:
def __init__(self):
self.camera = get_new_video_source()
self.point = None
# Selection stuff
# idk how this works
self.user_selection_box = None
self.drag_start = None
self.track_window = None
self.selection_completed = False
def get_last_frame(self):
return self.frame
def ask_for_selection(self):
cv.namedWindow("selection picker")
cv.setMouseCallback("selection picker", self.selector)
while not self.selection_completed:
self.read_frame()
cv.imshow("selection picker", self.frame)
ch = cv.waitKey(5)
if ch == 27:
# if the user presses escape,
# abort the selection process.
break
cv.destroyWindow("selection picker")
def selector(self, event, x, y, flags, param):
if not self.selection_completed:
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
self.track_window = None
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.user_selection_box = (xmin, ymin, xmax, ymax)
if event == cv.EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
print("Got selection!")
print(self.track_window)
# compute histogram for selection
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
# mask = cv.inRange(hsv, np.array((70., 100., 50.)), np.array((150., 255., 255.)))
mask = cv.inRange(hsv, np.array((0., 0., 0.)), np.array((255., 255., 255.)))
x0, y0, x1, y1 = self.user_selection_box
hsv_roi = hsv[y0:y1, x0:x1]
mask_roi = mask[y0:y1, x0:x1]
hist = cv.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv.normalize(hist, hist, 0, 255, cv.NORM_MINMAX)
self.hist = hist.reshape(-1)
self.selection_completed = True
def read_frame(self):
self.frame = read_frame_from_camera(self.camera)
def update(self):
hsv = cv.cvtColor(self.frame, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv, np.array((0., 00., 127.)), np.array((180., 255., 255.)))
# TODO: figure out wtf this does
if self.track_window and self.track_window[2] > 0 and self.track_window[3] > 0:
prob = cv.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv.CamShift(prob, self.track_window, term_crit)
x = int(track_box[0][0])
y = int(track_box[0][1])
self.point = (x, y)
self.track_box = track_box
def update_all(self):
self.read_frame()
self.update()
def get_new_video_source():
video_src = 0
camera = video.create_capture(video_src, presets['cube'])
return camera
def read_frame_from_camera(camera):
# Reads frame from camera, and flips it.
ret_value, frame = camera.read()
frame = cv.flip(frame, 1)
assert ret_value, "Something went horribly wrong, unable to read frame from camera!"
return frame
if __name__ == "__main__":
ct = CamshiftTracker()
cv.namedWindow("main")
# first wait for the user to press s,
# then take a selection
while True:
ch = cv.waitKey(5)
if ch == 27:
# escape pressed
sys.exit()
elif ch == ord("s"):
# create a selection
ct.ask_for_selection()
break
ct.read_frame()
cv.imshow("main", ct.get_last_frame())
# now track that
while True:
ch = cv.waitKey(5)
if ch == 27:
# escape pressed
break
ct.update_all()
frame = ct.get_last_frame()
# cv.circle(frame, ct.point, 10, (0, 0, 255), -1)
cv.ellipse(frame, ct.track_box, (0, 0, 255), 2)
cv.imshow("main", frame)
print(ct.point)
# cleanup!
cv.destroyAllWindows()
| [
"video.create_capture",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.calcHist",
"cv2.imshow",
"sys.exit",
"cv2.CamShift",
"cv2.setMouseCallback",
"cv2.ellipse",
"numpy.array",
"cv2.destroyWindow",
"cv2.calcBackProject",
"cv2.normalize",
"cv2.flip",
"cv2.destroyAllWindows",
"cv2.namedWindow"
] | [((3354, 3402), 'video.create_capture', 'video.create_capture', (['video_src', "presets['cube']"], {}), "(video_src, presets['cube'])\n", (3374, 3402), False, 'import video\n'), ((3554, 3571), 'cv2.flip', 'cv.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (3561, 3571), True, 'import cv2 as cv\n'), ((3739, 3761), 'cv2.namedWindow', 'cv.namedWindow', (['"""main"""'], {}), "('main')\n", (3753, 3761), True, 'import cv2 as cv\n'), ((4526, 4548), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (4546, 4548), True, 'import cv2 as cv\n'), ((522, 556), 'cv2.namedWindow', 'cv.namedWindow', (['"""selection picker"""'], {}), "('selection picker')\n", (536, 556), True, 'import cv2 as cv\n'), ((565, 619), 'cv2.setMouseCallback', 'cv.setMouseCallback', (['"""selection picker"""', 'self.selector'], {}), "('selection picker', self.selector)\n", (584, 619), True, 'import cv2 as cv\n'), ((930, 966), 'cv2.destroyWindow', 'cv.destroyWindow', (['"""selection picker"""'], {}), "('selection picker')\n", (946, 966), True, 'import cv2 as cv\n'), ((2541, 2582), 'cv2.cvtColor', 'cv.cvtColor', (['self.frame', 'cv.COLOR_BGR2HSV'], {}), '(self.frame, cv.COLOR_BGR2HSV)\n', (2552, 2582), True, 'import cv2 as cv\n'), ((3862, 3875), 'cv2.waitKey', 'cv.waitKey', (['(5)'], {}), '(5)\n', (3872, 3875), True, 'import cv2 as cv\n'), ((4188, 4201), 'cv2.waitKey', 'cv.waitKey', (['(5)'], {}), '(5)\n', (4198, 4201), True, 'import cv2 as cv\n'), ((4399, 4446), 'cv2.ellipse', 'cv.ellipse', (['frame', 'ct.track_box', '(0, 0, 255)', '(2)'], {}), '(frame, ct.track_box, (0, 0, 255), 2)\n', (4409, 4446), True, 'import cv2 as cv\n'), ((4456, 4480), 'cv2.imshow', 'cv.imshow', (['"""main"""', 'frame'], {}), "('main', frame)\n", (4465, 4480), True, 'import cv2 as cv\n'), ((707, 748), 'cv2.imshow', 'cv.imshow', (['"""selection picker"""', 'self.frame'], {}), "('selection picker', self.frame)\n", (716, 748), True, 'import cv2 as cv\n'), ((767, 780), 'cv2.waitKey', 'cv.waitKey', (['(5)'], {}), '(5)\n', (777, 780), True, 'import cv2 as cv\n'), ((2614, 2641), 'numpy.array', 'np.array', (['(0.0, 0.0, 127.0)'], {}), '((0.0, 0.0, 127.0))\n', (2622, 2641), True, 'import numpy as np\n'), ((2641, 2672), 'numpy.array', 'np.array', (['(180.0, 255.0, 255.0)'], {}), '((180.0, 255.0, 255.0))\n', (2649, 2672), True, 'import numpy as np\n'), ((2820, 2874), 'cv2.calcBackProject', 'cv.calcBackProject', (['[hsv]', '[0]', 'self.hist', '[0, 180]', '(1)'], {}), '([hsv], [0], self.hist, [0, 180], 1)\n', (2838, 2874), True, 'import cv2 as cv\n'), ((3024, 3071), 'cv2.CamShift', 'cv.CamShift', (['prob', 'self.track_window', 'term_crit'], {}), '(prob, self.track_window, term_crit)\n', (3035, 3071), True, 'import cv2 as cv\n'), ((3939, 3949), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3947, 3949), False, 'import sys\n'), ((1801, 1842), 'cv2.cvtColor', 'cv.cvtColor', (['self.frame', 'cv.COLOR_BGR2HSV'], {}), '(self.frame, cv.COLOR_BGR2HSV)\n', (1812, 1842), True, 'import cv2 as cv\n'), ((2206, 2259), 'cv2.calcHist', 'cv.calcHist', (['[hsv_roi]', '[0]', 'mask_roi', '[16]', '[0, 180]'], {}), '([hsv_roi], [0], mask_roi, [16], [0, 180])\n', (2217, 2259), True, 'import cv2 as cv\n'), ((2278, 2326), 'cv2.normalize', 'cv.normalize', (['hist', 'hist', '(0)', '(255)', 'cv.NORM_MINMAX'], {}), '(hist, hist, 0, 255, cv.NORM_MINMAX)\n', (2290, 2326), True, 'import cv2 as cv\n'), ((1981, 2006), 'numpy.array', 'np.array', (['(0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0))\n', (1989, 2006), True, 'import numpy as np\n'), ((2005, 2036), 'numpy.array', 'np.array', (['(255.0, 255.0, 255.0)'], {}), '((255.0, 255.0, 255.0))\n', (2013, 2036), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Импорт библиотек для работы с классификаторами,
# импорт функии preprocessing из модуля common
import os
import numpy as np
from termcolor import colored
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
from common import preprocessing
from functools import partial
preprocessor = partial(preprocessing, remove_punctuation=True, remove_stopwords=True, normalisation=True)
class W2vModel(KeyedVectors):
def __init__(self, vector_size: int = 500):
# sz500 OR sz100
super().__init__(vector_size)
self.w2v_model = Word2VecKeyedVectors(vector_size)
self.w2v_model_loaded = False
def load_w2v(self, fpath: str = "all.norm-sz500-w10-cb0-it3-min5.w2v", binary=True, unicode_errors='ignore'):
# all.norm-sz500-w10-cb0-it3-min5.w2v OR all.norm-sz100-w10-cb0-it1-min100.w2v
print(colored('Загрузка обученной модели Word2Vec....', 'blue'))
if self.w2v_model_loaded:
return self.w2v_model
self.w2v_model = self.load_word2vec_format(fpath, binary=binary, unicode_errors=unicode_errors)
# self.w2v_model.init_sims(replace=True)
self.w2v_model_loaded = True
return self.w2v_model
w2v_model = W2vModel()
def __get_data_by_paths(paths: list):
data = []
for path in paths:
text = ''
with open(path, 'r', encoding='utf-8') as file:
for line in file:
text += line
data.append(text)
return data
# Синтез методов векторизации текстов TF-IDF и векторизации слов Word2vec
def __w2v_weigh_tfidf(tfidf_, w2v_, concatenation=True):
tfidf = tfidf_.toarray()
w2v = np.array(w2v_, dtype=float)
# N - размерность пространства векторов Word2vec
_, N = w2v.shape
docs_n, words_n = tfidf.shape
if concatenation:
res = np.zeros((docs_n, words_n + N), dtype=np.float32)
else:
res = np.zeros((docs_n, N), dtype=np.float32)
# Заполнение результирущей матрицы векторными представлениями текстов
for i in range(docs_n):
v_sum = np.zeros(N)
for j in range(words_n):
v_sum = v_sum + tfidf[i][j] * w2v[j]
if concatenation:
res[i] = np.concatenate((tfidf[i], v_sum))
else:
res[i] = v_sum
return res
# Классификация текстов на основе классического метода tf-idf,
# на входе: data_dir_path - путь к папке с текстовыми данными,
# test_size - доля тестовой выборки,
# classifier - выбор классификатора (по умолчанию - MLP),
# и дополнительные параметры:
# verbose - вывод в консоль,
# svm_kernel - задание ядра SVM
# mlp_activation - функция активации нейронов скрытого слоя MLP
# Функция возвращает точность классификации на обучающей и тестовой выборках при заданных параметрах
def tf_idf(data_dir_path: str, test_size: float = 0.3, classifier: str = 'MLP', **kwargs):
try:
verbose = kwargs['verbose']
except KeyError:
verbose = False
try:
svm_kernel = kwargs['svm_kernel']
except KeyError:
svm_kernel = ''
try:
mlp_activation = kwargs['mlp_activation']
except KeyError:
mlp_activation = ''
# Кол-во кластеров для классификации определяется кол-вом подпапок в директории data_dir_path
n_clusters = len(os.listdir(data_dir_path))
paths, answers = [], []
for i, category_dir in enumerate(os.listdir(data_dir_path)):
for root, _, files in os.walk(os.path.join(data_dir_path, category_dir), topdown=False):
for name in files:
paths.append(os.path.join(root, name))
answers.append(i)
# Разделим выборку на обучающую и тестовую
X_train, X_test, y_train, y_test = train_test_split(paths, answers, test_size=test_size, random_state=42, shuffle=True)
TRAIN_DATA = __get_data_by_paths(X_train)
if verbose:
print(colored('TF-IDF / {} classifier'.format(classifier), 'green'))
tfidf_vectorizer = TfidfVectorizer(preprocessor=preprocessor)
# Для каждого документа обучающей выборки получим соответствующий вектор tfidf
tfidf = tfidf_vectorizer.fit_transform(TRAIN_DATA)
if verbose:
print(colored('Число примеров тренировочной выборки - {}'.format(len(TRAIN_DATA)), 'blue'))
print(colored('Всего различных слов - {}'.format(len(tfidf_vectorizer.get_feature_names())), 'blue'))
if verbose:
print(colored('Классификация... Кол-во кластеов: {}'.format(n_clusters), 'red'))
# Задание параметров классификаторов
if classifier == 'SVM': # Метод опорных векторов
# Если ядро не задано, по умолчанию используется линейное ядро
if svm_kernel:
clf = svm.SVC(kernel=svm_kernel)
else:
clf = svm.SVC(kernel='linear')
elif classifier == 'KNN': # k ближайших соседей (k = 7)
clf = KNeighborsClassifier(n_neighbors=7)
elif classifier == 'RFC': # Случайный лес (100 деревьев)
clf = RandomForestClassifier(n_estimators=100)
else:
if mlp_activation: # Перцептрон (один скрытый слой)
clf = MLPClassifier(activation=mlp_activation)
else:
clf = MLPClassifier() # activation = ReLU
# Обучение классификатора на обучающей выборке
clf.fit(tfidf, y_train)
if verbose:
print(colored('Готово!', 'red'))
if verbose:
print(colored('Тестирование.', 'cyan'))
# Ответ модели на примеры обучающей выборки
train_response = clf.predict(tfidf)
# Подсчёт правильно классифицированных текстов обучающей выборки
k_train = 0
for i in range(len(train_response)):
if train_response[i] == y_train[i]:
k_train += 1
TEST_DATA = __get_data_by_paths(X_test)
if verbose:
print(colored('Число примеров тестовой выборки - {}'.format(len(TEST_DATA)), 'blue'))
# Ответ модели на примеры тестовой выборки
test_response = clf.predict(tfidf_vectorizer.transform(TEST_DATA))
# Подсчёт правильно классифицированных текстов тестовой выборки
k_test = 0
for i in range(len(test_response)):
if test_response[i] == y_test[i]:
k_test += 1
# Расчёт точности классификации на обучающей и тестовой выборках
accuracy_train = k_train/len(train_response)*100
accuracy_test = k_test/len(test_response)*100
if verbose:
print(colored('Точность на обучающей выборке: {:.3f}%'.format(accuracy_train), 'blue'))
print(colored('Точность на тестовой выборке: {:.3f}%'.format(accuracy_test), 'blue'))
return accuracy_train, accuracy_test
# Классификация текстов на основе синтезированного метода tf-idf+word2vec,
# на входе: data_dir_path - путь к папке с текстовыми данными,
# test_size - доля тестовой выборки,
# classifier - выбор классификатора (по умолчанию - MLP),
# и дополнительные параметры:
# verbose - вывод в консоль,
# svm_kernel - задание ядра SVM
# mlp_activation - функция активации нейронов скрытого слоя MLP
# Функция возвращает точность классификации на обучающей и тестовой выборках при заданных параметрах
def w2v_w_tf_idf(data_dir_path: str, test_size: float = 0.3, classifier: str = 'MLP', concatenate=True, **kwargs):
# Загрузка предобученной модели word2vec
model = w2v_model.load_w2v()
N = w2v_model.vector_size
try:
verbose = kwargs['verbose']
except KeyError:
verbose = False
try:
svm_kernel = kwargs['svm_kernel']
except KeyError:
svm_kernel = ''
try:
mlp_activation = kwargs['mlp_activation']
except KeyError:
mlp_activation = ''
n_clusters = len(os.listdir(data_dir_path)) # Число кластеров = кол-во подпапок каталога данных
paths, answers = [], []
for i, category_dir in enumerate(os.listdir(data_dir_path)):
for root, _, files in os.walk(os.path.join(data_dir_path, category_dir), topdown=False):
for name in files:
paths.append(os.path.join(root, name))
answers.append(i)
# Разделим выборку на обучающую и тестовую
X_train, X_test, y_train, y_test = train_test_split(paths, answers, test_size=test_size, random_state=42, shuffle=True)
TRAIN_DATA = __get_data_by_paths(X_train)
# W2V + TF-IDF
if verbose and concatenate:
print(colored('W2V weighed by TF-IDF with concatenation / {} classifier'.format(classifier), 'green'))
elif verbose and not concatenate:
print(colored('W2V weighed by TF-IDF without concatenation / {} classifier'.format(classifier), 'green'))
tfidf_vectorizer = TfidfVectorizer(preprocessor=preprocessor)
# Для каждого документа обучающей выборки получим соответствующий вектор tfidf
tfidf = tfidf_vectorizer.fit_transform(TRAIN_DATA)
if verbose:
print(colored('Число примеров тренировочной выборки - {}'.format(len(TRAIN_DATA)), 'blue'))
print(colored('Всего различных слов - {}'.format(len(tfidf_vectorizer.get_feature_names())), 'blue'))
if verbose:
print(colored('Расчёт векторов слов по w2v...', 'blue'))
w2v = []
no_term_error = 0
# Получение векторов слов из предобученной модели word2vec
for term in tfidf_vectorizer.get_feature_names():
try:
vector = model.wv[term]
except KeyError:
# print('w2v: no term {}'.format(term))
no_term_error += 1
vector = [0] * N
w2v.append(vector)
if no_term_error and verbose:
print(colored('В загруженной модели w2v отсутствуют вектора для {} слов!'.format(no_term_error), 'red'))
if verbose:
print(colored('Взвешивание и конкатенация...', 'yellow'))
# Множество векторных представлений текстов коллекции (синтезированный метод)
res = __w2v_weigh_tfidf(tfidf, w2v, concatenation=concatenate)
# print(res)
if verbose:
print(colored('Классификация... Кол-во кластеов: {}'.format(n_clusters), 'red'))
# svc = svm.SVC(kernel=svm_kernel)
# svc.fit(res, y_train)
# knn = KNeighborsClassifier(n_neighbors=7)
# knn.fit(res, y_train)
# rfc = RandomForestClassifier(n_estimators=100)
# rfc.fit(res, y_train)
# mlp = MLPClassifier()
# mlp.fit(res, y_train)
# Выбор классификатора
if classifier == 'SVM':
if svm_kernel:
clf = svm.SVC(kernel=svm_kernel)
else:
clf = svm.SVC(kernel='linear')
elif classifier == 'KNN':
clf = KNeighborsClassifier(n_neighbors=7)
elif classifier == 'RFC':
clf = RandomForestClassifier(n_estimators=100)
else:
if mlp_activation:
clf = MLPClassifier(activation=mlp_activation)
else:
clf = MLPClassifier()
# Обучение классификатора
clf.fit(res, y_train)
if verbose:
print(colored('Готово!', 'red'))
if verbose:
print(colored('Тестирование.', 'cyan'))
# train_response = svc.predict(res)
# train_response = knn.predict(res)
# train_response = rfc.predict(res)
# train_response = mlp.predict(res)
# Ответ модели на примеры обучающей выборки
train_response = clf.predict(res)
# Подсчёт правильно классифицированных текстов обучающей выборки
k_train = 0
for i in range(len(train_response)):
if train_response[i] == y_train[i]:
k_train += 1
TEST_DATA = __get_data_by_paths(X_test)
if verbose:
print(colored('Число примеров тестовой выборки - {}'.format(len(TEST_DATA)), 'blue'))
tfidf = tfidf_vectorizer.transform(TEST_DATA)
if verbose:
print(colored('Взвешивание и конкатенация...', 'yellow'))
# Векторизация данных синтезированным методом
res = __w2v_weigh_tfidf(tfidf, w2v, concatenation=concatenate)
# test_response = svc.predict(res)
# test_response = knn.predict(res)
# test_response = rfc.predict(res)
# test_response = mlp.predict(res)
# Ответ модели на примеры тестовой выборки
test_response = clf.predict(res)
# Подсчёт правильно классифицированных текстов тестовой выборки
k_test = 0
for i in range(len(test_response)):
if test_response[i] == y_test[i]:
k_test += 1
# Расчёт точности классификации на обучающей и тестовой выборках
accuracy_train = k_train/len(train_response)*100
accuracy_test = k_test/len(test_response)*100
if verbose:
print(colored('Точность на обучающей выборке: {:.3f}%'.format(accuracy_train), 'blue'))
print(colored('Точность на тестовой выборке: {:.3f}%'.format(accuracy_test), 'blue'))
return accuracy_train, accuracy_test
| [
"sklearn.ensemble.RandomForestClassifier",
"functools.partial",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.model_selection.train_test_split",
"gensim.models.keyedvectors.Word2VecKeyedVectors",
"numpy.zeros",
"termcolor.colored",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.array... | [((648, 742), 'functools.partial', 'partial', (['preprocessing'], {'remove_punctuation': '(True)', 'remove_stopwords': '(True)', 'normalisation': '(True)'}), '(preprocessing, remove_punctuation=True, remove_stopwords=True,\n normalisation=True)\n', (655, 742), False, 'from functools import partial\n'), ((1991, 2018), 'numpy.array', 'np.array', (['w2v_'], {'dtype': 'float'}), '(w2v_, dtype=float)\n', (1999, 2018), True, 'import numpy as np\n'), ((4038, 4126), 'sklearn.model_selection.train_test_split', 'train_test_split', (['paths', 'answers'], {'test_size': 'test_size', 'random_state': '(42)', 'shuffle': '(True)'}), '(paths, answers, test_size=test_size, random_state=42,\n shuffle=True)\n', (4054, 4126), False, 'from sklearn.model_selection import train_test_split\n'), ((4287, 4329), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'preprocessor': 'preprocessor'}), '(preprocessor=preprocessor)\n', (4302, 4329), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((8404, 8492), 'sklearn.model_selection.train_test_split', 'train_test_split', (['paths', 'answers'], {'test_size': 'test_size', 'random_state': '(42)', 'shuffle': '(True)'}), '(paths, answers, test_size=test_size, random_state=42,\n shuffle=True)\n', (8420, 8492), False, 'from sklearn.model_selection import train_test_split\n'), ((8874, 8916), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'preprocessor': 'preprocessor'}), '(preprocessor=preprocessor)\n', (8889, 8916), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((907, 940), 'gensim.models.keyedvectors.Word2VecKeyedVectors', 'Word2VecKeyedVectors', (['vector_size'], {}), '(vector_size)\n', (927, 940), False, 'from gensim.models.keyedvectors import Word2VecKeyedVectors\n'), ((2164, 2213), 'numpy.zeros', 'np.zeros', (['(docs_n, words_n + N)'], {'dtype': 'np.float32'}), '((docs_n, words_n + N), dtype=np.float32)\n', (2172, 2213), True, 'import numpy as np\n'), ((2238, 2277), 'numpy.zeros', 'np.zeros', (['(docs_n, N)'], {'dtype': 'np.float32'}), '((docs_n, N), dtype=np.float32)\n', (2246, 2277), True, 'import numpy as np\n'), ((2396, 2407), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2404, 2407), True, 'import numpy as np\n'), ((3614, 3639), 'os.listdir', 'os.listdir', (['data_dir_path'], {}), '(data_dir_path)\n', (3624, 3639), False, 'import os\n'), ((3706, 3731), 'os.listdir', 'os.listdir', (['data_dir_path'], {}), '(data_dir_path)\n', (3716, 3731), False, 'import os\n'), ((7927, 7952), 'os.listdir', 'os.listdir', (['data_dir_path'], {}), '(data_dir_path)\n', (7937, 7952), False, 'import os\n'), ((8072, 8097), 'os.listdir', 'os.listdir', (['data_dir_path'], {}), '(data_dir_path)\n', (8082, 8097), False, 'import os\n'), ((1195, 1252), 'termcolor.colored', 'colored', (['"""Загрузка обученной модели Word2Vec...."""', '"""blue"""'], {}), "('Загрузка обученной модели Word2Vec....', 'blue')\n", (1202, 1252), False, 'from termcolor import colored\n'), ((2538, 2571), 'numpy.concatenate', 'np.concatenate', (['(tfidf[i], v_sum)'], {}), '((tfidf[i], v_sum))\n', (2552, 2571), True, 'import numpy as np\n'), ((3772, 3813), 'os.path.join', 'os.path.join', (['data_dir_path', 'category_dir'], {}), '(data_dir_path, category_dir)\n', (3784, 3813), False, 'import os\n'), ((5008, 5034), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': 'svm_kernel'}), '(kernel=svm_kernel)\n', (5015, 5034), False, 'from sklearn import svm\n'), ((5067, 5091), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (5074, 5091), False, 'from sklearn import svm\n'), ((5168, 5203), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (5188, 5203), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5633, 5658), 'termcolor.colored', 'colored', (['"""Готово!"""', '"""red"""'], {}), "('Готово!', 'red')\n", (5640, 5658), False, 'from termcolor import colored\n'), ((5691, 5723), 'termcolor.colored', 'colored', (['"""Тестирование."""', '"""cyan"""'], {}), "('Тестирование.', 'cyan')\n", (5698, 5723), False, 'from termcolor import colored\n'), ((8138, 8179), 'os.path.join', 'os.path.join', (['data_dir_path', 'category_dir'], {}), '(data_dir_path, category_dir)\n', (8150, 8179), False, 'import os\n'), ((9313, 9362), 'termcolor.colored', 'colored', (['"""Расчёт векторов слов по w2v..."""', '"""blue"""'], {}), "('Расчёт векторов слов по w2v...', 'blue')\n", (9320, 9362), False, 'from termcolor import colored\n'), ((9908, 9958), 'termcolor.colored', 'colored', (['"""Взвешивание и конкатенация..."""', '"""yellow"""'], {}), "('Взвешивание и конкатенация...', 'yellow')\n", (9915, 9958), False, 'from termcolor import colored\n'), ((10610, 10636), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': 'svm_kernel'}), '(kernel=svm_kernel)\n', (10617, 10636), False, 'from sklearn import svm\n'), ((10669, 10693), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (10676, 10693), False, 'from sklearn import svm\n'), ((10738, 10773), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (10758, 10773), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((11091, 11116), 'termcolor.colored', 'colored', (['"""Готово!"""', '"""red"""'], {}), "('Готово!', 'red')\n", (11098, 11116), False, 'from termcolor import colored\n'), ((11149, 11181), 'termcolor.colored', 'colored', (['"""Тестирование."""', '"""cyan"""'], {}), "('Тестирование.', 'cyan')\n", (11156, 11181), False, 'from termcolor import colored\n'), ((11859, 11909), 'termcolor.colored', 'colored', (['"""Взвешивание и конкатенация..."""', '"""yellow"""'], {}), "('Взвешивание и конкатенация...', 'yellow')\n", (11866, 11909), False, 'from termcolor import colored\n'), ((5281, 5321), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (5303, 5321), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((10818, 10858), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (10840, 10858), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3891, 3915), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (3903, 3915), False, 'import os\n'), ((5411, 5451), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': 'mlp_activation'}), '(activation=mlp_activation)\n', (5424, 5451), False, 'from sklearn.neural_network import MLPClassifier\n'), ((5484, 5499), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (5497, 5499), False, 'from sklearn.neural_network import MLPClassifier\n'), ((8257, 8281), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (8269, 8281), False, 'import os\n'), ((10914, 10954), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'activation': 'mlp_activation'}), '(activation=mlp_activation)\n', (10927, 10954), False, 'from sklearn.neural_network import MLPClassifier\n'), ((10987, 11002), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {}), '()\n', (11000, 11002), False, 'from sklearn.neural_network import MLPClassifier\n')] |
# Project: GBS Tool
# Author: Dr. <NAME>, <EMAIL>
# Date: September 30, 2017 -->
# License: MIT License (see LICENSE file of this package for more information
'''
Description
OBJECTIVE: The functionality of this class is to:
a) determine if the data provide in wtgDescriptor.xml already is sufficiently dense to define the power curve without
estimation of additional points.
b) if necessary estimate additional points of the power curve such that a dense curve available.
A dense curve will have power levels associated with all wind speeds in increments of 0.1 m/s and approximates power in
1 kW steps. Several methods are useful in estimating power curves for wind turbines. For now, this module only implements
calculation of via cubic splines. For a source discussing splines and other methods of estimation see Sohoni, Gupta and Nema, 2016:
https://www.hindawi.com/journals/jen/2016/8519785/
ASSUMPTIONS:
INPUTS: data used as input is already from a cleaned up power curve. This tool is not intended to produce a power
from operational time-series data, which would require cleaning and filtering. Thus, it is required that for each
wind speed value one and only one power value exists. That is, temperature compensations, if desired will have to be
handled separately.
CUTOUTWINDSPEEDMAX describes the point beyond which the turbine is shut off for protection. Right at this point, it
produces power at nameplate capacity levels. Beyond this point the turbine is stopped and P = 0 kW.
CUTOUTWINDSPEEDMIN describes the point where the turbine does not produce power any longer, e.g. P = 0 kW at this
wind speed.
CUTINWINDSPEED is the minimum wind speed at which a stopped turbine starts up. At this point power production is
immediately greater zero, i.e. CUTOUTWINDSPEEDMIN < CUTINWINDSPEED.
OUTPUTS:
The output is a WindPowerCurve object, which contains the new estimated power curve, as well as additional methods
to estimate air density corrections (later).
'''
# General Imports
import numpy as np
from scipy.interpolate import CubicSpline
'''
The WindPowerCurve class contains methods to determine a wind power curve from data provided in the wtgDescriptor.xml
file.
INPUTS:
powerCurveDataPoints: list of tuples of floats, e.g., [(ws0, p0), ... , (wsN, pN)], where ws and p are wind speed (m/s) and
power (kW) respectively.
cutInWindSpeed: float, wind speed at which a stopped turbine begins to produce power again, m/s
cutOutWindSpeedMin: float, the wind speed at which the turbine does not produce power anymore due to lack of wind
power, units are m/s
cutOutWindSpeedMax: float, the wind speed beyond which the turbine is stopped for protection, units are m/s.
POutMaxPa: float, nameplate power of the turbine, units are kW.
OUTPUTS:
powerCurve: list of tuples of floats, with a defined range ws = 0 m/s to ws = cutOutWindSpeedMax and some fixed
points
powerCurve = [(0,0), (cutOutWindSpeedMin, 0), ..., (cutOutWindSpeedMax, PCutOutWindSpeedMax), (>cutOutWindSpeedMax, 0)]
Wind speeds are reported in increments of 0.1 m/s, power values are in kW.
powerCurveInt: list of tuples of integers derived from the float values in `powerCurve` by rounding to the nearest
integer and typecasting from float to int. Wind speed data, to preserve resolution, is multiplied by 10, e.g.,
3.6 m/s is now reported as 36, and power data is rounded to the next kW.
METHODS:
checkInputs: Internal method. Checks input data for basic consistency and ensures that there are no duplicate data points which could
interfere with some of the curve approximations that assume there is a unique power value for each wind speed.
cubicSplineCurveEstimator: calculates a cubic spline for the give input data set with the constraints given by the
power curve, cut-in and cut-out wind speeds, a condition that the boundary conditions be `clamped`, i.e., that the
first derivative at the end points be zero, and with the condition that the spline not extrapolate to points outside
the input interval.
'''
class WindPowerCurve:
# TODO: Testing with sparse data required. Additional checks to be implemented. Documentation
# Constructor
def __init__(self):
# ------Variable definitions-------
# ******Input variables************
# tuples of wind speed and power, list of tuples of floats, (m/s, kW)
self.powerCurveDataPoints = []
# Cut-in wind speed, float, m/s
self.cutInWindSpeed = 0
# Cut-out wind speed min, float, m/s
self.cutOutWindSpeedMin = 0
# Cut-out wind speed max, float, m/s
self.cutOutWindSpeedMax = 0
# Nameplate power, float, kW
self.POutMaxPa = 0
# ******Internal variables*********
# Set of coordinates (known points and constraints).
self.coords = []
# ******Output variables***********
# the power curve, list of tuples of floats, (m/s, kW)
self.powerCurve = []
# the power curve with all entries of type `int`. For this, the wind speeds are multiplied by a factor of 10.
self.powerCurveInt = []
def checkInputs(self):
'''
checkInputs makes sure the input data is self-consistent and setup such that curve estimation methods can be run.
This function should be called by all curve estimators.
'''
# Check input data is self-consistent
if not self.powerCurveDataPoints:
raise ValueError('PowerCurveDataPoints is empty list')
elif self.cutInWindSpeed == 0 or self.cutOutWindSpeedMin == 0 or self.cutOutWindSpeedMax == 0:
raise ValueError('Cut-in or cut-out wind speed not initialized with physical value.')
elif self.cutInWindSpeed < self.cutOutWindSpeedMin or self.cutOutWindSpeedMin > self.cutOutWindSpeedMax:
raise ValueError('Constraining wind speeds not ordered correctly.')
elif self.POutMaxPa == 0:
raise ValueError('Nameplate capacity not initialized.')
# Setting up the extended data set including constraining tuples
inptDataPoints = self.powerCurveDataPoints
inptDataPoints.append((self.cutOutWindSpeedMin, 0))
inptDataPoints.append((self.cutOutWindSpeedMax, self.POutMaxPa))
inptDataPoints.append((0, 0))
# Sort the new list of tuples based on wind speeds.
inptDataPoints.sort()
# Check for duplicates and clean up if possible.
prevVal = (-999, -999)
for inptDataPoint in inptDataPoints:
# remove duplicates
if inptDataPoint[0] == prevVal[0] and inptDataPoint[1] == prevVal[1]:
inptDataPoints.remove(inptDataPoint)
# if there's multiple power values for the same wind speed raise an exception
elif inptDataPoint[0] == prevVal[0] and inptDataPoint[1] != prevVal[1]:
raise ValueError('Power curve data points ill-defined, multiple power values for single wind speed.')
# Copy current to previous value and proceed.
prevVal = inptDataPoint
self.coords = inptDataPoints
def cubicSplineCurveEstimator(self):
'''
cubicSplineCurveEstimator calculates a cubic spline for the given data points in powerCurveDataPoints using
constraints defined by cut-in an cut-out wind speeds.
:return:
'''
self.checkInputs()
# Setup x and y coordinates as numpy arrays
x, y = zip(*self.coords)
xCoords = np.array(x)
yCoords = np.array(y)
# Calculate the cubic spline using the scipy function for this. Boundary conditions are that the derivative at
# end points has to be zero ('clamped'). We do not allow interpolation beyond existing data points. Wind speeds
# beyond the wind speeds with power data are set to NaN (and subsequently to zero further down).
cs = CubicSpline(xCoords, yCoords, bc_type='clamped', extrapolate=False)
# To pull an array of values from the cubic spline, we need to setup a wind speed array. This sensibly starts at
# 0 m/s and in this case we carry it out to twice the maximum cut-out wind speed, with a step size depending on
# the range of wind speeds observed.
# TODO: this should be pulled from defaults
self.wsScale = 1000
windSpeeds = np.arange(0, self.cutOutWindSpeedMax*2, 1/self.wsScale)
# With wind speed vector setup, we can extract values from the cubic spline of each wind speed point. We
# immediately backfill the NaNs created by requesting data outside of the initial data range with zeros.
# NOTE that this assumes that we know that this is true because we have defined values all the way to the
# cut-out wind speed.
power = np.nan_to_num(cs(windSpeeds))
# check if any values from the cubic spline went out of bounds. Correct if did.
for index, item in enumerate(power):
if item > self.POutMaxPa:
power[index] = self.POutMaxPa
elif item < 0:
power[index] = 0
# The results are packaged into the power curve list, which is the key output of this class.
self.powerCurve = list(zip(windSpeeds, power))
# For computational and memory efficiency we also provide a rounded and integer only power curve. For this the
# wind speeds are multiplied by 10 and then type cast to integers. The power values are rounded to the nearest
# kW and type cast to int.
# NOTE that this variable is significantly more efficient in memory usage.
self.powerCurveInt = zip(np.rint(self.wsScale*windSpeeds).astype(int), np.rint(power).astype(int))
| [
"scipy.interpolate.CubicSpline",
"numpy.rint",
"numpy.array",
"numpy.arange"
] | [((7657, 7668), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7665, 7668), True, 'import numpy as np\n'), ((7687, 7698), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7695, 7698), True, 'import numpy as np\n'), ((8057, 8124), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['xCoords', 'yCoords'], {'bc_type': '"""clamped"""', 'extrapolate': '(False)'}), "(xCoords, yCoords, bc_type='clamped', extrapolate=False)\n", (8068, 8124), False, 'from scipy.interpolate import CubicSpline\n'), ((8515, 8574), 'numpy.arange', 'np.arange', (['(0)', '(self.cutOutWindSpeedMax * 2)', '(1 / self.wsScale)'], {}), '(0, self.cutOutWindSpeedMax * 2, 1 / self.wsScale)\n', (8524, 8574), True, 'import numpy as np\n'), ((9814, 9848), 'numpy.rint', 'np.rint', (['(self.wsScale * windSpeeds)'], {}), '(self.wsScale * windSpeeds)\n', (9821, 9848), True, 'import numpy as np\n'), ((9860, 9874), 'numpy.rint', 'np.rint', (['power'], {}), '(power)\n', (9867, 9874), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library developed by
# Visual Analytics and Imaging System Group of the Science Technology
# Facilities Council, STFC
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from ccpi.framework import DataProcessor, AcquisitionData,\
AcquisitionGeometry, ImageGeometry, ImageData
from ccpi.reconstruction.parallelbeam import alg as pbalg
import numpy
def setupCCPiGeometries(voxel_num_x, voxel_num_y, voxel_num_z, angles, counter):
vg = ImageGeometry(voxel_num_x=voxel_num_x,voxel_num_y=voxel_num_y, voxel_num_z=voxel_num_z)
Phantom_ccpi = ImageData(geometry=vg,
dimension_labels=['horizontal_x','horizontal_y','vertical'])
#.subset(['horizontal_x','horizontal_y','vertical'])
# ask the ccpi code what dimensions it would like
voxel_per_pixel = 1
geoms = pbalg.pb_setup_geometry_from_image(Phantom_ccpi.as_array(),
angles,
voxel_per_pixel )
pg = AcquisitionGeometry('parallel',
'3D',
angles,
geoms['n_h'], 1.0,
geoms['n_v'], 1.0 #2D in 3D is a slice 1 pixel thick
)
center_of_rotation = Phantom_ccpi.get_dimension_size('horizontal_x') / 2
ad = AcquisitionData(geometry=pg,dimension_labels=['angle','vertical','horizontal'])
geoms_i = pbalg.pb_setup_geometry_from_acquisition(ad.as_array(),
angles,
center_of_rotation,
voxel_per_pixel )
counter+=1
if counter < 4:
if (not ( geoms_i == geoms )):
print ("not equal and {0}".format(counter))
X = max(geoms['output_volume_x'], geoms_i['output_volume_x'])
Y = max(geoms['output_volume_y'], geoms_i['output_volume_y'])
Z = max(geoms['output_volume_z'], geoms_i['output_volume_z'])
return setupCCPiGeometries(X,Y,Z,angles, counter)
else:
return geoms
else:
return geoms_i
class CCPiForwardProjector(DataProcessor):
'''Normalization based on flat and dark
This processor read in a AcquisitionData and normalises it based on
the instrument reading with and without incident photons or neutrons.
Input: AcquisitionData
Parameter: 2D projection with flat field (or stack)
2D projection with dark field (or stack)
Output: AcquisitionDataSetn
'''
def __init__(self,
image_geometry = None,
acquisition_geometry = None,
output_axes_order = None):
if output_axes_order is None:
# default ccpi projector image storing order
output_axes_order = ['angle','vertical','horizontal']
kwargs = {
'image_geometry' : image_geometry,
'acquisition_geometry' : acquisition_geometry,
'output_axes_order' : output_axes_order,
'default_image_axes_order' : ['horizontal_x','horizontal_y','vertical'],
'default_acquisition_axes_order' : ['angle','vertical','horizontal']
}
super(CCPiForwardProjector, self).__init__(**kwargs)
def check_input(self, dataset):
if dataset.number_of_dimensions == 3 or dataset.number_of_dimensions == 2:
# sort in the order that this projector needs it
return True
else:
raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
.format(dataset.number_of_dimensions))
def process(self):
volume = self.get_input()
volume_axes = volume.get_data_axes_order(new_order=self.default_image_axes_order)
if not volume_axes == [0,1,2]:
volume.array = numpy.transpose(volume.array, volume_axes)
pixel_per_voxel = 1 # should be estimated from image_geometry and
# acquisition_geometry
if self.acquisition_geometry.geom_type == 'parallel':
pixels = pbalg.pb_forward_project(volume.as_array(),
self.acquisition_geometry.angles,
pixel_per_voxel)
out = AcquisitionData(geometry=self.acquisition_geometry,
label_dimensions=self.default_acquisition_axes_order)
out.fill(pixels)
out_axes = out.get_data_axes_order(new_order=self.output_axes_order)
if not out_axes == [0,1,2]:
out.array = numpy.transpose(out.array, out_axes)
return out
else:
raise ValueError('Cannot process cone beam')
class CCPiBackwardProjector(DataProcessor):
'''Backward projector
This processor reads in a AcquisitionData and performs a backward projection,
i.e. project to reconstruction space.
Notice that it assumes that the center of rotation is in the middle
of the horizontal axis: in case when that's not the case it can be chained
with the AcquisitionDataPadder.
Input: AcquisitionData
Parameter: 2D projection with flat field (or stack)
2D projection with dark field (or stack)
Output: AcquisitionDataSetn
'''
def __init__(self,
image_geometry = None,
acquisition_geometry = None,
output_axes_order=None):
if output_axes_order is None:
# default ccpi projector image storing order
output_axes_order = ['horizontal_x','horizontal_y','vertical']
kwargs = {
'image_geometry' : image_geometry,
'acquisition_geometry' : acquisition_geometry,
'output_axes_order' : output_axes_order,
'default_image_axes_order' : ['horizontal_x','horizontal_y','vertical'],
'default_acquisition_axes_order' : ['angle','vertical','horizontal']
}
super(CCPiBackwardProjector, self).__init__(**kwargs)
def check_input(self, dataset):
if dataset.number_of_dimensions == 3 or dataset.number_of_dimensions == 2:
return True
else:
raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
.format(dataset.number_of_dimensions))
def process(self):
projections = self.get_input()
projections_axes = projections.get_data_axes_order(new_order=self.default_acquisition_axes_order)
if not projections_axes == [0,1,2]:
projections.array = numpy.transpose(projections.array, projections_axes)
pixel_per_voxel = 1 # should be estimated from image_geometry and acquisition_geometry
image_geometry = ImageGeometry(voxel_num_x = self.acquisition_geometry.pixel_num_h,
voxel_num_y = self.acquisition_geometry.pixel_num_h,
voxel_num_z = self.acquisition_geometry.pixel_num_v)
# input centered/padded acquisitiondata
center_of_rotation = projections.get_dimension_size('horizontal') / 2
if self.acquisition_geometry.geom_type == 'parallel':
back = pbalg.pb_backward_project(
projections.as_array(),
self.acquisition_geometry.angles,
center_of_rotation,
pixel_per_voxel
)
out = ImageData(geometry=self.image_geometry,
dimension_labels=self.default_image_axes_order)
out_axes = out.get_data_axes_order(new_order=self.output_axes_order)
if not out_axes == [0,1,2]:
back = numpy.transpose(back, out_axes)
out.fill(back)
return out
else:
raise ValueError('Cannot process cone beam')
class AcquisitionDataPadder(DataProcessor):
'''Normalization based on flat and dark
This processor read in a AcquisitionData and normalises it based on
the instrument reading with and without incident photons or neutrons.
Input: AcquisitionData
Parameter: 2D projection with flat field (or stack)
2D projection with dark field (or stack)
Output: AcquisitionDataSetn
'''
def __init__(self,
center_of_rotation = None,
acquisition_geometry = None,
pad_value = 1e-5):
kwargs = {
'acquisition_geometry' : acquisition_geometry,
'center_of_rotation' : center_of_rotation,
'pad_value' : pad_value
}
super(AcquisitionDataPadder, self).__init__(**kwargs)
def check_input(self, dataset):
if self.acquisition_geometry is None:
self.acquisition_geometry = dataset.geometry
if dataset.number_of_dimensions == 3:
return True
else:
raise ValueError("Expected input dimensions is 2 or 3, got {0}"\
.format(dataset.number_of_dimensions))
def process(self):
projections = self.get_input()
w = projections.get_dimension_size('horizontal')
delta = w - 2 * self.center_of_rotation
padded_width = int (
numpy.ceil(abs(delta)) + w
)
delta_pix = padded_width - w
voxel_per_pixel = 1
geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(),
self.acquisition_geometry.angles,
self.center_of_rotation,
voxel_per_pixel )
padded_geometry = self.acquisition_geometry.clone()
padded_geometry.pixel_num_h = geom['n_h']
padded_geometry.pixel_num_v = geom['n_v']
delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h
delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v
if delta_pix_h == 0:
delta_pix_h = delta_pix
padded_geometry.pixel_num_h = padded_width
#initialize a new AcquisitionData with values close to 0
out = AcquisitionData(geometry=padded_geometry)
out = out + self.pad_value
#pad in the horizontal-vertical plane -> slice on angles
if delta > 0:
#pad left of middle
command = "out.array["
for i in range(out.number_of_dimensions):
if out.dimension_labels[i] == 'horizontal':
value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w)
command = command + str(value)
else:
if out.dimension_labels[i] == 'vertical' :
value = '{0}:'.format(delta_pix_v)
command = command + str(value)
else:
command = command + ":"
if i < out.number_of_dimensions -1:
command = command + ','
command = command + '] = projections.array'
#print (command)
else:
#pad right of middle
command = "out.array["
for i in range(out.number_of_dimensions):
if out.dimension_labels[i] == 'horizontal':
value = '{0}:{1}'.format(0, w)
command = command + str(value)
else:
if out.dimension_labels[i] == 'vertical' :
value = '{0}:'.format(delta_pix_v)
command = command + str(value)
else:
command = command + ":"
if i < out.number_of_dimensions -1:
command = command + ','
command = command + '] = projections.array'
#print (command)
#cleaned = eval(command)
exec(command)
return out | [
"ccpi.framework.ImageGeometry",
"numpy.transpose",
"ccpi.framework.ImageData",
"ccpi.framework.AcquisitionData",
"ccpi.framework.AcquisitionGeometry"
] | [((1080, 1173), 'ccpi.framework.ImageGeometry', 'ImageGeometry', ([], {'voxel_num_x': 'voxel_num_x', 'voxel_num_y': 'voxel_num_y', 'voxel_num_z': 'voxel_num_z'}), '(voxel_num_x=voxel_num_x, voxel_num_y=voxel_num_y, voxel_num_z\n =voxel_num_z)\n', (1093, 1173), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((1188, 1277), 'ccpi.framework.ImageData', 'ImageData', ([], {'geometry': 'vg', 'dimension_labels': "['horizontal_x', 'horizontal_y', 'vertical']"}), "(geometry=vg, dimension_labels=['horizontal_x', 'horizontal_y',\n 'vertical'])\n", (1197, 1277), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((1658, 1746), 'ccpi.framework.AcquisitionGeometry', 'AcquisitionGeometry', (['"""parallel"""', '"""3D"""', 'angles', "geoms['n_h']", '(1.0)', "geoms['n_v']", '(1.0)'], {}), "('parallel', '3D', angles, geoms['n_h'], 1.0, geoms[\n 'n_v'], 1.0)\n", (1677, 1746), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((2027, 2113), 'ccpi.framework.AcquisitionData', 'AcquisitionData', ([], {'geometry': 'pg', 'dimension_labels': "['angle', 'vertical', 'horizontal']"}), "(geometry=pg, dimension_labels=['angle', 'vertical',\n 'horizontal'])\n", (2042, 2113), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((7871, 8046), 'ccpi.framework.ImageGeometry', 'ImageGeometry', ([], {'voxel_num_x': 'self.acquisition_geometry.pixel_num_h', 'voxel_num_y': 'self.acquisition_geometry.pixel_num_h', 'voxel_num_z': 'self.acquisition_geometry.pixel_num_v'}), '(voxel_num_x=self.acquisition_geometry.pixel_num_h,\n voxel_num_y=self.acquisition_geometry.pixel_num_h, voxel_num_z=self.\n acquisition_geometry.pixel_num_v)\n', (7884, 8046), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((11636, 11677), 'ccpi.framework.AcquisitionData', 'AcquisitionData', ([], {'geometry': 'padded_geometry'}), '(geometry=padded_geometry)\n', (11651, 11677), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((4760, 4802), 'numpy.transpose', 'numpy.transpose', (['volume.array', 'volume_axes'], {}), '(volume.array, volume_axes)\n', (4775, 4802), False, 'import numpy\n'), ((5236, 5346), 'ccpi.framework.AcquisitionData', 'AcquisitionData', ([], {'geometry': 'self.acquisition_geometry', 'label_dimensions': 'self.default_acquisition_axes_order'}), '(geometry=self.acquisition_geometry, label_dimensions=self.\n default_acquisition_axes_order)\n', (5251, 5346), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((7686, 7738), 'numpy.transpose', 'numpy.transpose', (['projections.array', 'projections_axes'], {}), '(projections.array, projections_axes)\n', (7701, 7738), False, 'import numpy\n'), ((8620, 8712), 'ccpi.framework.ImageData', 'ImageData', ([], {'geometry': 'self.image_geometry', 'dimension_labels': 'self.default_image_axes_order'}), '(geometry=self.image_geometry, dimension_labels=self.\n default_image_axes_order)\n', (8629, 8712), False, 'from ccpi.framework import DataProcessor, AcquisitionData, AcquisitionGeometry, ImageGeometry, ImageData\n'), ((5560, 5596), 'numpy.transpose', 'numpy.transpose', (['out.array', 'out_axes'], {}), '(out.array, out_axes)\n', (5575, 5596), False, 'import numpy\n'), ((8899, 8930), 'numpy.transpose', 'numpy.transpose', (['back', 'out_axes'], {}), '(back, out_axes)\n', (8914, 8930), False, 'import numpy\n')] |
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0,1,2,3"
import tensorflow as tf
import gzip
import numpy as np
import os
import six.moves.cPickle as pickle
import time
def load_mnist_data(dataset):
""" Load the dataset
Code adapted from http://deeplearning.net/tutorial/code/logistic_sgd.py
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
"""
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
dataset
)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
print('Loading data...')
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
return train_set, valid_set, test_set
def convert_to_one_hot(vals, max_val = 0):
"""Helper method to convert label array to one-hot array."""
if max_val == 0:
max_val = vals.max() + 1
one_hot_vals = np.zeros((vals.size, max_val))
one_hot_vals[np.arange(vals.size), vals] = 1
return one_hot_vals
# if __name__ == "__main__":
# global_batch_size = 4000
# mirrored_strategy = tf.distribute.MirroredStrategy()
# # 在mirrored_strategy空间下
# # network
# with mirrored_strategy.scope():
# rand = np.random.RandomState(seed=123)
# W1_val = rand.normal(scale=0.1, size=(784, 256))
# W2_val = rand.normal(scale=0.1, size=(256, 256))
# W3_val = rand.normal(scale=0.1, size=(256, 10))
# b1_val = rand.normal(scale=0.1, size=(256))
# b2_val = rand.normal(scale=0.1, size=(256))
# b3_val = rand.normal(scale=0.1, size=(10))
# W1 = tf.Variable(W1_val, dtype = tf.float32)
# W2 = tf.Variable(W2_val, dtype = tf.float32)
# W3 = tf.Variable(W3_val, dtype = tf.float32)
# b1 = tf.Variable(b1_val, dtype = tf.float32)
# b2 = tf.Variable(b2_val, dtype = tf.float32)
# b3 = tf.Variable(b3_val, dtype = tf.float32)
# # relu(X W1 + b1)
# z1 = tf.matmul(x, W1) + b1
# z2 = tf.nn.relu(z1)
# # relu(z2 W2 + b2)
# z3 = tf.matmul(z2,W2) + b2
# z4 = tf.nn.relu(z3)
# # relu(z4 W3 + b3)
# z5 = tf.matmul(z4,W3) + b3
# y = tf.nn.softmax(z5)
# # loss
# # cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_real*tf.log(y),reduction_indices=[1]))
# cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = z5 , labels = y_real))
# optimizer = tf.train.GradientDescentOptimizer(0.1)
# # 在mirrored_strategy空间下
# # dataset
# with mirrored_strategy.scope():
# datasets = load_mnist_data("mnist.pkl.gz")
# train_set_x, train_set_y = datasets[0]
# valid_set_x, valid_set_y = datasets[1]
# test_set_x, test_set_y = datasets[2]
# n_train_batches = train_set_x.shape[0] // global_batch_size
# n_valid_batches = valid_set_x.shape[0] // global_batch_size
# dataset = tf.data.Dataset.from_tensors((train_set_x, convert_to_one_hot(train_set_y, max_val=10))).batch(global_batch_size)
# # print(dataset)
# # 这里要分发一下数据
# dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
# # print(dist_dataset.__dict__['_cloned_datasets'])
# def train_step(dist_inputs):
# def step_fn(inputs):
# features, labels = inputs
# logits = model(features)
# cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
# logits=logits, labels=labels)
# loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size)
# train_op = optimizer.minimize(loss)
# with tf.control_dependencies([train_op]):
# return tf.identity(loss)
# # 返回所有gpu的loss
# per_replica_losses = mirrored_strategy.experimental_run_v2(step_fn, args=(dist_inputs,))
# # reduce loss并返回
# mean_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
# return mean_loss
# with mirrored_strategy.scope():
# input_iterator = dist_dataset.make_initializable_iterator()
# iterator_init = input_iterator.initialize()
# var_init = tf.global_variables_initializer()
# loss = train_step(input_iterator.get_next())
# with tf.Session() as sess:
# sess.run([var_init, iterator_init])
# for _ in range(100):
# print(sess.run(loss))
if __name__ == "__main__":
start = time.time()
global_batch_size = 4000
mirrored_strategy = tf.distribute.MirroredStrategy(devices=["/gpu:0", "/gpu:1", "/gpu:2", "/gpu:3"])
# 在mirrored_strategy空间下
with mirrored_strategy.scope():
# model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
model = tf.keras.Sequential([tf.keras.layers.Dense(256, input_shape=(784,), activation = 'relu'),
tf.keras.layers.Dense(256, activation = 'relu'),
tf.keras.layers.Dense(10, activation = 'relu')])
optimizer = tf.train.GradientDescentOptimizer(0.001)
# 在mirrored_strategy空间下
with mirrored_strategy.scope():
datasets = load_mnist_data("mnist.pkl.gz")
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
n_train_batches = train_set_x.shape[0] // global_batch_size
n_valid_batches = valid_set_x.shape[0] // global_batch_size
# dataset = tf.data.Dataset.from_tensor_slices((tf.cast(train_set_x, tf.float32),
# tf.cast(train_set_y, tf.int32))).batch(global_batch_size)
dataset = tf.data.Dataset.from_tensor_slices((tf.cast(train_set_x, tf.float32),
tf.cast(convert_to_one_hot(train_set_y, max_val=10), tf.int32))).repeat(100).batch(global_batch_size)
# print("========>", dataset)
# 这里要分发一下数据
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
# print(dist_dataset.__dict__['_cloned_datasets'])
def train_step(dist_inputs):
def step_fn(inputs):
features, labels = inputs
# print(features)
# print(labels)
logits = model(features)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
# 返回所有gpu的loss
per_replica_losses = mirrored_strategy.experimental_run_v2(step_fn, args=(dist_inputs,))
# reduce loss并返回
mean_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with mirrored_strategy.scope():
input_iterator = dist_dataset.make_initializable_iterator()
iterator_init = input_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step(input_iterator.get_next())
with tf.Session() as sess:
# start = time.time() 1
sess.run([var_init, iterator_init])
# start = time.time()
for _ in range(1000):
# print("here")
sess.run(loss)
# print(sess.run(loss))
end = time.time()
print("running time is %g s"%(end - start))
'''
if __name__ == "__main__":
global_batch_size = 16
mirrored_strategy = tf.distribute.MirroredStrategy()
# 在mirrored_strategy空间下
with mirrored_strategy.scope():
model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
optimizer = tf.train.GradientDescentOptimizer(0.1)
# 在mirrored_strategy空间下
with mirrored_strategy.scope():
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(1000).batch(global_batch_size)
print(dataset)
# 这里要分发一下数据
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
print(dist_dataset.__dict__['_cloned_datasets'])
def train_step(dist_inputs):
def step_fn(inputs):
features, labels = inputs
logits = model(features)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=labels)
loss = tf.reduce_sum(cross_entropy) * (1.0 / global_batch_size)
train_op = optimizer.minimize(loss)
with tf.control_dependencies([train_op]):
return tf.identity(loss)
# 返回所有gpu的loss
per_replica_losses = mirrored_strategy.experimental_run_v2(step_fn, args=(dist_inputs,))
# reduce loss并返回
mean_loss = mirrored_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
return mean_loss
with mirrored_strategy.scope():
input_iterator = dist_dataset.make_initializable_iterator()
iterator_init = input_iterator.initialize()
var_init = tf.global_variables_initializer()
loss = train_step(input_iterator.get_next())
with tf.Session() as sess:
sess.run([var_init, iterator_init])
for _ in range(100):
print(sess.run(loss))
''' | [
"tensorflow.reduce_sum",
"gzip.open",
"tensorflow.control_dependencies",
"tensorflow.keras.layers.Dense",
"tensorflow.global_variables_initializer",
"tensorflow.identity",
"tensorflow.distribute.MirroredStrategy",
"numpy.zeros",
"tensorflow.Session",
"time.time",
"tensorflow.nn.softmax_cross_ent... | [((470, 492), 'os.path.split', 'os.path.split', (['dataset'], {}), '(dataset)\n', (483, 492), False, 'import os\n'), ((1604, 1634), 'numpy.zeros', 'np.zeros', (['(vals.size, max_val)'], {}), '((vals.size, max_val))\n', (1612, 1634), True, 'import numpy as np\n'), ((5214, 5225), 'time.time', 'time.time', ([], {}), '()\n', (5223, 5225), False, 'import time\n'), ((5279, 5364), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {'devices': "['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']"}), "(devices=['/gpu:0', '/gpu:1', '/gpu:2', '/gpu:3']\n )\n", (5309, 5364), True, 'import tensorflow as tf\n'), ((1076, 1119), 'six.moves.urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['origin', 'dataset'], {}), '(origin, dataset)\n', (1102, 1119), False, 'from six.moves import urllib\n'), ((1183, 1207), 'gzip.open', 'gzip.open', (['dataset', '"""rb"""'], {}), "(dataset, 'rb')\n", (1192, 1207), False, 'import gzip\n'), ((5806, 5846), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.001)'], {}), '(0.001)\n', (5839, 5846), True, 'import tensorflow as tf\n'), ((7857, 7890), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7888, 7890), True, 'import tensorflow as tf\n'), ((8249, 8260), 'time.time', 'time.time', ([], {}), '()\n', (8258, 8260), False, 'import time\n'), ((523, 546), 'os.path.isfile', 'os.path.isfile', (['dataset'], {}), '(dataset)\n', (537, 546), False, 'import os\n'), ((715, 739), 'os.path.isfile', 'os.path.isfile', (['new_path'], {}), '(new_path)\n', (729, 739), False, 'import os\n'), ((816, 839), 'os.path.isfile', 'os.path.isfile', (['dataset'], {}), '(dataset)\n', (830, 839), False, 'import os\n'), ((1272, 1305), 'six.moves.cPickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (1283, 1305), True, 'import six.moves.cPickle as pickle\n'), ((1652, 1672), 'numpy.arange', 'np.arange', (['vals.size'], {}), '(vals.size)\n', (1661, 1672), True, 'import numpy as np\n'), ((7111, 7183), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logits', 'labels': 'labels'}), '(logits=logits, labels=labels)\n', (7153, 7183), True, 'import tensorflow as tf\n'), ((7957, 7969), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7967, 7969), True, 'import tensorflow as tf\n'), ((646, 669), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (659, 669), False, 'import os\n'), ((1367, 1381), 'six.moves.cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1378, 1381), True, 'import six.moves.cPickle as pickle\n'), ((5545, 5610), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'input_shape': '(784,)', 'activation': '"""relu"""'}), "(256, input_shape=(784,), activation='relu')\n", (5566, 5610), True, 'import tensorflow as tf\n'), ((5651, 5696), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (5672, 5696), True, 'import tensorflow as tf\n'), ((5737, 5781), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (5758, 5781), True, 'import tensorflow as tf\n'), ((7216, 7244), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['cross_entropy'], {}), '(cross_entropy)\n', (7229, 7244), True, 'import tensorflow as tf\n'), ((7338, 7373), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[train_op]'], {}), '([train_op])\n', (7361, 7373), True, 'import tensorflow as tf\n'), ((7398, 7415), 'tensorflow.identity', 'tf.identity', (['loss'], {}), '(loss)\n', (7409, 7415), True, 'import tensorflow as tf\n'), ((6502, 6534), 'tensorflow.cast', 'tf.cast', (['train_set_x', 'tf.float32'], {}), '(train_set_x, tf.float32)\n', (6509, 6534), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding=utf-8
"""QC Config objects
Module to store the different QC modules in ioos_qc
Attributes:
tw (namedtuple): The TimeWindow namedtuple definition
"""
import io
import logging
import warnings
from pathlib import Path
from copy import deepcopy
from inspect import signature
from functools import partial
from typing import Union, List
from importlib import import_module
from dataclasses import dataclass, field
from collections import namedtuple, OrderedDict as odict
import numpy as np
from shapely.geometry import shape, GeometryCollection
from ioos_qc.results import CallResult, collect_results
from ioos_qc.utils import load_config_as_dict, dict_depth
L = logging.getLogger(__name__) # noqa
ConfigTypes = Union[dict, odict, str, Path, io.StringIO]
tw = namedtuple('TimeWindow', ('starting', 'ending'), defaults=[None, None])
@dataclass(frozen=True)
class Context:
window: tw = field(default_factory=tw)
region: GeometryCollection = field(default=None)
attrs: dict = field(default_factory=dict)
def __eq__(self, other):
if isinstance(other, Context):
return self.window == other.window and self.region == other.region
return False
def __key__(self):
return (
self.window,
getattr(self.region, 'wkb', None)
)
def __hash__(self):
return hash(self.__key__())
def __repr__(self):
return f'<Context window={self.window} region={self.region}>'
@dataclass(frozen=True)
class Call:
stream_id: str
call: partial
context: Context = field(default_factory=Context)
attrs: dict = field(default_factory=dict)
@property
def window(self):
return self.context.window
@property
def region(self):
return self.context.region
@property
def func(self) -> str:
return self.call.func
@property
def module(self) -> str:
return self.func.__module__.replace('ioos_qc.', '')
@property
def method(self) -> str:
return self.func.__name__
@property
def method_path(self) -> str:
return f'{self.module}.{self.method}'
@property
def args(self) -> tuple:
return self.call.args
@property
def kwargs(self) -> dict:
return self.call.keywords
def config(self) -> dict:
return {
self.module: {
self.method: self.kwargs
}
}
@property
def is_aggregate(self) -> bool:
return hasattr(self.func, 'aggregate') and self.func.aggregate is True
def __key__(self):
return (
self.stream_id,
self.context.__hash__(),
self.module,
self.method,
self.args,
tuple(self.kwargs.items())
)
def __hash__(self):
return hash(self.__key__())
def __eq__(self, other):
if isinstance(other, Call):
return self.__key__() == other.__key__()
return NotImplemented
def __repr__(self):
ret = f'<Call stream_id={self.stream_id}'
if self.context.window.starting:
ret += f' starting={self.window.starting}'
if self.context.window.ending:
ret += f' ending={self.window.ending}'
if self.context.region is not None:
ret += ' region=True'
ret += f' {self.module}.{self.method}({self.args}, {self.kwargs})>'
return ret
def run(self, **passedkwargs):
results = []
# Get our own copy of the kwargs object so we can change it
testkwargs = deepcopy(passedkwargs)
# Merges dicts
testkwargs = odict({ **self.kwargs, **testkwargs })
# Get the arguments that the test functions support
sig = signature(self.func)
valid_keywords = [
p.name for p in sig.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD
]
testkwargs = {
k: v for k, v in testkwargs.items()
if k in valid_keywords
}
try:
results.append(
CallResult(
package=self.module,
test=self.method,
function=self.func,
results=self.func(**testkwargs)
)
)
except Exception as e:
L.error(f'Could not run "{self.module}.{self.method}: {e}')
return results
def extract_calls(source) -> List[Call]:
"""
Extracts call objects from a source object
Args:
source ([any]): The source of Call objects, this can be a:
* Call object
* list of Call objects
* list of objects with the 'calls' attribute
* NewConfig object
* Object with the 'calls' attribute
Returns:
List[Call]: List of extracted Call objects
"""
if isinstance(source, Call):
return [source]
elif isinstance(source, (tuple, list)):
# list of Call objects
calls = [ c for c in source if isinstance(c, Call) ]
# list of objects with the 'calls' attribute
[
calls.extend([
x for x in c.calls if isinstance(x, Call)
])
for c in source if hasattr(c, 'calls')
]
return calls
elif isinstance(source, Config):
# Config object
return source.calls
elif hasattr(source, 'calls'):
# Object with the 'calls' attribute
return source.calls
return []
class Config:
""" A class to load any ioos_qc configuration setup into a list of callable objects
that will run quality checks. The resulting list of quality checks parsed from a config
file can be appended and edited until they are ready to be run. On run the checks are
consolidated into an efficient structure for indexing the dataset (stream) it is run against
so things like subsetting by time and space only happen once for each test in the same Context.
How the individual checks are collected is up to each individual Stream implementation, this
class only pares various formats and versions of a config into a list of Call objects.
"""
def __init__(self, source, version=None, default_stream_key='_stream'):
"""
Args:
source: The QC configuration representation in one of the following formats:
python dict or odict
JSON/YAML filepath (str or Path object)
JSON/YAML str
JSON/YAML StringIO
netCDF4/xarray filepath
netCDF4/xarray Dataset
list of Call objects
"""
# A fully encapsulated Call objects that are configured
# There are later grouped by window/region to more efficiently process
# groups of indexes and variables
self._calls = []
# If we are passed an object we can extract calls from do so
# Else, process as a Config object
extracted = extract_calls(source)
if extracted:
self._calls = extracted
else:
# Parse config based on version
# Massage and return the correct type of config object depending on the input
self.config = load_config_as_dict(source)
if 'contexts' in self.config:
# Return a list of ContextConfig
for c in self.config['contexts']:
self._calls.extend(list(ContextConfig(c).calls))
elif 'streams' in self.config:
# Return a list with just one ContextConfig
self._calls += list(ContextConfig(self.config).calls)
elif dict_depth(self.config) >= 4:
# This is a StreamConfig
self._calls += list(ContextConfig(odict(streams=self.config)).calls)
else:
# This is a QcConfig
self._calls += list(ContextConfig(odict(streams={default_stream_key: self.config})).calls)
#raise ValueError("Can not add context to a QC Config object. Create it manually.")
@property
def contexts(self):
"""
Group the calls into context groups and return them
"""
contexts = {}
for c in self._calls:
if c.context in contexts:
contexts[c.context].append(c)
else:
contexts[c.context] = [c]
return contexts
@property
def stream_ids(self):
"""
Return a list of unique stream_ids for the Config
"""
streams = []
stream_map = {}
for c in self._calls:
if c.stream_id not in stream_map:
stream_map[c.stream_id] = True
streams.append(c.stream_id)
return streams
@property
def calls(self):
return self._calls
# Could need this in the future
# return [
# c for c in self._calls
# if not hasattr(c.func, 'aggregate') or c.func.aggregate is False
# ]
@property
def aggregate_calls(self):
return [
c for c in self._calls
if hasattr(c.func, 'aggregate') and c.func.aggregate is True
]
def has(self, stream_id : str, method: Union[callable, str]):
if isinstance(method, str):
for c in self._calls:
if c.stream_id == stream_id and c.method_path == method:
return c
elif isinstance(method, callable):
for c in self._calls:
if (c.stream_id == stream_id and
c.method == method.__module__ and
c.method == method.__name__
):
return c
return False
def calls_by_stream_id(self, stream_id) -> List[Call]:
calls = []
for c in self._calls:
if c.stream_id == stream_id:
calls.append(c)
return calls
def add(self, source) -> None:
"""
Adds a source of calls to this Config. See extract_calls for information on the
types of objects accepted as the source parameter. The changes the internal .calls
attribute and returns None.
Args:
source ([any]): The source of Call objects, this can be a:
* Call object
* list of Call objects
* list of objects with the 'calls' attribute
* Config object
* Object with the 'calls' attribute
"""
extracted = extract_calls(source)
self._calls += extracted
class ContextConfig:
"""A collection of a Region, a TimeWindow and a list of StreamConfig objects
Defines a set of quality checks to run against multiple input streams.
This can include a region and a time window to subset any DataStreams by before running checks.
region: None
window:
starting: 2020-01-01T00:00:00Z
ending: 2020-04-01T00:00:00Z
streams:
variable1: # stream_id
qartod: # StreamConfig
location_test:
bbox: [-80, 40, -70, 60]
variable2: # stream_id
qartod: # StreamConfig
gross_range_test:
suspect_span: [1, 11]
fail_span: [0, 12]
Helper methods exist to run this check against a different inputs:
* pandas.DataFrame, dask.DataFrame, netCDF4.Dataset, xarray.Dataset, ERDDAP URL
Attributes:
config (odict): dict representation of the parsed ContextConfig source
region (GeometryCollection): A `shapely` object representing the valid geographic region
window (namedtuple): A TimeWindow object representing the valid time period
streams (odict): dict representation of the parsed StreamConfig objects
"""
def __init__(self, source: ConfigTypes):
self.config = load_config_as_dict(source)
self._calls = []
self.attrs = self.config.get('attrs', {})
# Region
self.region = None
if 'region' in self.config:
# Convert region to a GeometryCollection Shapely object.
if isinstance(self.config['region'], GeometryCollection):
self.region = self.config['region']
elif self.config['region'] and 'features' in self.config['region']:
# Feature based GeoJSON
self.region = GeometryCollection([
shape(feature['geometry']) for feature in self.config['region']['features']
])
elif self.config['region'] and 'geometry' in self.config['region']:
# Geometry based GeoJSON
self.region = GeometryCollection([
shape(self.config['region']['geometry'])
])
else:
L.warning('Ignoring region because it could not be parsed, is it valid GeoJSON?')
# Window
if 'window' in self.config and isinstance(self.config['window'], tw):
self.window = self.config['window']
elif 'window' in self.config:
self.window = tw(**self.config['window'])
else:
self.window = tw()
self.context = Context(
window=self.window,
region=self.region,
attrs=self.attrs
)
# Extract each Call from the nested JSON
"""
Calls
This parses through available checks and selects the actual test functions
to run, but doesn't actually run anything. It just sets up the object to be
run later by iterating over the configs.
"""
for stream_id, sc in self.config['streams'].items():
for package, modules in sc.items():
try:
testpackage = import_module('ioos_qc.{}'.format(package))
except ImportError:
L.warning(f'No ioos_qc package "{package}" was found, skipping.')
continue
for testname, kwargs in modules.items():
kwargs = kwargs or {}
if not hasattr(testpackage, testname):
L.warning(f'No ioos_qc method "{package}.{testname}" was found, skipping')
continue
else:
runfunc = getattr(testpackage, testname)
self._calls.append(
Call(
stream_id=stream_id,
context=self.context,
call=partial(runfunc, (), **kwargs),
attrs=getattr(sc, 'attrs', {})
)
)
@property
def calls(self):
return self._calls
def add(self, source) -> None:
"""
Adds a source of calls to this ContextConfig. See extract_calls for information on the
types of objects accepted as the source parameter. The changes the internal .calls
attribute and returns None.
Args:
source ([any]): The source of Call objects, this can be a:
* Call object
* list of Call objects
* list of objects with the 'calls' attribute
* Config object
* Object with the 'calls' attribute
"""
extracted = extract_calls(source)
self._calls.extend([ e for e in extracted if e.context == self.context ])
def __str__(self):
# sc = list(self.streams.keys())
return (
f"<ContextConfig "
f"calls={len(self._calls)} "
f"region={self.region is not None} "
f"window={self.window.starting is not None or self.window.ending is not None}"
">"
)
def __repr__(self):
return self.__str__()
class QcConfig(Config):
def __init__(self, source, default_stream_key='_stream'):
"""
A Config objects with no concept of a Stream ID. Typically used when running QC on a single
stream. This just sets up a stream with the name passed in as the "default_stream_key"
parameter.
Args:
source: The QC configuration representation in one of the following formats:
python dict or odict
JSON/YAML filepath (str or Path object)
JSON/YAML str
JSON/YAML StringIO
netCDF4/xarray filepath
netCDF4/xarray Dataset
list of Call objects
default_stream_key: The internal name of the stream, defaults to "_stream"
"""
warnings.warn(
"The QcConfig object is deprecated, please use Config directly",
DeprecationWarning
)
self._default_stream_key = default_stream_key
super().__init__(source, default_stream_key=default_stream_key)
def run(self, **passedkwargs):
from ioos_qc.streams import NumpyStream
# Cleanup kwarg names
passedkwargs['time'] = passedkwargs.pop('tinp', None)
passedkwargs['z'] = passedkwargs.pop('zinp', None)
# Convert input to numpy arrays which is required for NumpySteam
for k in ['inp', 'time', 'z', 'lat', 'lon', 'geom']:
if k not in passedkwargs or passedkwargs[k] is None:
continue
if not isinstance(passedkwargs[k], np.ndarray):
passedkwargs[k] = np.array(passedkwargs[k])
# Run the checks
np_stream = NumpyStream(**passedkwargs)
# Collect the results
results = collect_results(np_stream.run(self), how='dict')
# Strip out the default_stream_key
return results[self._default_stream_key]
class NcQcConfig(Config):
def __init__(self, *args, **kwargs):
raise NotImplementedError(
(
'The NcQcConfig object has been replaced by ioos_qc.config.Config '
'and ioos_qc.streams.XarrayStream'
)
)
| [
"functools.partial",
"copy.deepcopy",
"ioos_qc.streams.NumpyStream",
"ioos_qc.utils.load_config_as_dict",
"ioos_qc.utils.dict_depth",
"logging.getLogger",
"dataclasses.field",
"inspect.signature",
"collections.namedtuple",
"numpy.array",
"collections.OrderedDict",
"warnings.warn",
"shapely.g... | [((698, 725), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (715, 725), False, 'import logging\n'), ((797, 868), 'collections.namedtuple', 'namedtuple', (['"""TimeWindow"""', "('starting', 'ending')"], {'defaults': '[None, None]'}), "('TimeWindow', ('starting', 'ending'), defaults=[None, None])\n", (807, 868), False, 'from collections import namedtuple, OrderedDict as odict\n'), ((872, 894), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (881, 894), False, 'from dataclasses import dataclass, field\n'), ((1502, 1524), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1511, 1524), False, 'from dataclasses import dataclass, field\n'), ((927, 952), 'dataclasses.field', 'field', ([], {'default_factory': 'tw'}), '(default_factory=tw)\n', (932, 952), False, 'from dataclasses import dataclass, field\n'), ((986, 1005), 'dataclasses.field', 'field', ([], {'default': 'None'}), '(default=None)\n', (991, 1005), False, 'from dataclasses import dataclass, field\n'), ((1024, 1051), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1029, 1051), False, 'from dataclasses import dataclass, field\n'), ((1597, 1627), 'dataclasses.field', 'field', ([], {'default_factory': 'Context'}), '(default_factory=Context)\n', (1602, 1627), False, 'from dataclasses import dataclass, field\n'), ((1646, 1673), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1651, 1673), False, 'from dataclasses import dataclass, field\n'), ((3609, 3631), 'copy.deepcopy', 'deepcopy', (['passedkwargs'], {}), '(passedkwargs)\n', (3617, 3631), False, 'from copy import deepcopy\n'), ((3676, 3712), 'collections.OrderedDict', 'odict', (['{**self.kwargs, **testkwargs}'], {}), '({**self.kwargs, **testkwargs})\n', (3681, 3712), True, 'from collections import namedtuple, OrderedDict as odict\n'), ((3790, 3810), 'inspect.signature', 'signature', (['self.func'], {}), '(self.func)\n', (3799, 3810), False, 'from inspect import signature\n'), ((12012, 12039), 'ioos_qc.utils.load_config_as_dict', 'load_config_as_dict', (['source'], {}), '(source)\n', (12031, 12039), False, 'from ioos_qc.utils import load_config_as_dict, dict_depth\n'), ((16790, 16892), 'warnings.warn', 'warnings.warn', (['"""The QcConfig object is deprecated, please use Config directly"""', 'DeprecationWarning'], {}), "('The QcConfig object is deprecated, please use Config directly',\n DeprecationWarning)\n", (16803, 16892), False, 'import warnings\n'), ((17673, 17700), 'ioos_qc.streams.NumpyStream', 'NumpyStream', ([], {}), '(**passedkwargs)\n', (17684, 17700), False, 'from ioos_qc.streams import NumpyStream\n'), ((7314, 7341), 'ioos_qc.utils.load_config_as_dict', 'load_config_as_dict', (['source'], {}), '(source)\n', (7333, 7341), False, 'from ioos_qc.utils import load_config_as_dict, dict_depth\n'), ((17602, 17627), 'numpy.array', 'np.array', (['passedkwargs[k]'], {}), '(passedkwargs[k])\n', (17610, 17627), True, 'import numpy as np\n'), ((7742, 7765), 'ioos_qc.utils.dict_depth', 'dict_depth', (['self.config'], {}), '(self.config)\n', (7752, 7765), False, 'from ioos_qc.utils import load_config_as_dict, dict_depth\n'), ((12579, 12605), 'shapely.geometry.shape', 'shape', (["feature['geometry']"], {}), "(feature['geometry'])\n", (12584, 12605), False, 'from shapely.geometry import shape, GeometryCollection\n'), ((12866, 12906), 'shapely.geometry.shape', 'shape', (["self.config['region']['geometry']"], {}), "(self.config['region']['geometry'])\n", (12871, 12906), False, 'from shapely.geometry import shape, GeometryCollection\n'), ((14708, 14738), 'functools.partial', 'partial', (['runfunc', '()'], {}), '(runfunc, (), **kwargs)\n', (14715, 14738), False, 'from functools import partial\n'), ((7863, 7889), 'collections.OrderedDict', 'odict', ([], {'streams': 'self.config'}), '(streams=self.config)\n', (7868, 7889), True, 'from collections import namedtuple, OrderedDict as odict\n'), ((8003, 8051), 'collections.OrderedDict', 'odict', ([], {'streams': '{default_stream_key: self.config}'}), '(streams={default_stream_key: self.config})\n', (8008, 8051), True, 'from collections import namedtuple, OrderedDict as odict\n')] |
import torch
import re
from PIL import Image
from pathlib import Path
from tabulate import tabulate
from torch.utils.data import Dataset
from torchvision import transforms
from collections import defaultdict
from torch.utils.data.sampler import Sampler
from copy import deepcopy
import numpy as np
import random
class Market1501(Dataset):
def __init__(self, root: str, split: str = 'train', transform=None) -> None:
super().__init__()
self.root = Path(root)
self.split = split
self.transform = transform
if self.split == 'train':
data_path = self.root / 'bounding_box_train'
elif self.split == 'query':
data_path = self.root / 'query'
else:
data_path = self.root / 'bounding_box_test'
self.data, self.pids, self.camids = self.get_data(data_path)
self.print_dataset_info()
self.num_classes = len(self.pids)
self.pidsdict = {pid: i for i, pid in enumerate(self.pids)}
self.img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.496, 0.456), (0.229, 0.256, 0.224))
])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
img_path, pid, camid = self.data[index]
img = Image.open(img_path).convert('RGB')
if self.split == 'train': pid = self.pidsdict[pid]
if self.transform is not None:
img = self.transform(img)
else:
img = self.img_transform(img)
return img, pid, camid
def get_data(self, dir_path: Path):
img_paths = dir_path.glob('*.jpg')
pattern = re.compile(r'([-\d]+)_c(\d)')
data = []
pids = set()
camids = set()
for img_path in img_paths:
pid, camid = map(int, pattern.search(str(img_path)).groups())
if pid == -1: continue # junk images are just ignored
assert 0 <= pid <= 1501 # pid==0 means background
assert 1 <= camid <= 6
camid -= 1 # index starts from 0
pids.add(pid)
camids.add(camid)
data.append((str(img_path), pid, camid))
return data, pids, camids
def print_dataset_info(self):
table = [[self.split, len(self.data), len(self.pids), len(self.camids)]]
print(tabulate(table, headers=['Subset', 'Images', 'Person IDs', 'Cameras'], numalign='right'))
print()
class RandomIdentitySampler(Sampler):
def __init__(self, data_source, batch_size, num_instances) -> None:
super().__init__(data_source)
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.index_dict = defaultdict(list)
for index, (_, pid, _) in enumerate(self.data_source):
self.index_dict[pid].append(index)
self.pids = list(self.index_dict.keys())
self.length = 0
for pid in self.pids:
idxs = self.index_dict[pid]
num = len(idxs)
if num < self.num_instances:
num = self.num_instances
self.length += num - num % self.num_instances
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = deepcopy(self.index_dict[pid])
if len(idxs) < self.num_instances:
idxs = np.random.choice(idxs, self.num_instances, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.num_instances:
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[pid]) == 0:
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length
if __name__ == '__main__':
from torch.utils.data import DataLoader
market = Market1501('C:/Users/sithu/Documents/Datasets/Market-1501-v15.09.15', split='gallery', transform=None)
dataloader = DataLoader(market, batch_size=8, num_workers=4, sampler=RandomIdentitySampler(market.data, 8, 2))
# img, pid, camid = next(iter(dataloader))
# print(img.shape)
# print(pid, camid)
for img, pid, camid in dataloader:
print(pid)
break | [
"copy.deepcopy",
"random.sample",
"random.shuffle",
"torchvision.transforms.ToTensor",
"collections.defaultdict",
"PIL.Image.open",
"pathlib.Path",
"tabulate.tabulate",
"numpy.random.choice",
"torchvision.transforms.Normalize",
"re.compile"
] | [((469, 479), 'pathlib.Path', 'Path', (['root'], {}), '(root)\n', (473, 479), False, 'from pathlib import Path\n'), ((1705, 1735), 're.compile', 're.compile', (['"""([-\\\\d]+)_c(\\\\d)"""'], {}), "('([-\\\\d]+)_c(\\\\d)')\n", (1715, 1735), False, 'import re\n'), ((2896, 2913), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2907, 2913), False, 'from collections import defaultdict\n'), ((3389, 3406), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3400, 3406), False, 'from collections import defaultdict\n'), ((3928, 3947), 'copy.deepcopy', 'deepcopy', (['self.pids'], {}), '(self.pids)\n', (3936, 3947), False, 'from copy import deepcopy\n'), ((2423, 2515), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "['Subset', 'Images', 'Person IDs', 'Cameras']", 'numalign': '"""right"""'}), "(table, headers=['Subset', 'Images', 'Person IDs', 'Cameras'],\n numalign='right')\n", (2431, 2515), False, 'from tabulate import tabulate\n'), ((3457, 3487), 'copy.deepcopy', 'deepcopy', (['self.index_dict[pid]'], {}), '(self.index_dict[pid])\n', (3465, 3487), False, 'from copy import deepcopy\n'), ((3627, 3647), 'random.shuffle', 'random.shuffle', (['idxs'], {}), '(idxs)\n', (3641, 3647), False, 'import random\n'), ((4058, 4107), 'random.sample', 'random.sample', (['avai_pids', 'self.num_pids_per_batch'], {}), '(avai_pids, self.num_pids_per_batch)\n', (4071, 4107), False, 'import random\n'), ((1061, 1082), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1080, 1082), False, 'from torchvision import transforms\n'), ((1096, 1162), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.496, 0.456)', '(0.229, 0.256, 0.224)'], {}), '((0.485, 0.496, 0.456), (0.229, 0.256, 0.224))\n', (1116, 1162), False, 'from torchvision import transforms\n'), ((1334, 1354), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1344, 1354), False, 'from PIL import Image\n'), ((3558, 3614), 'numpy.random.choice', 'np.random.choice', (['idxs', 'self.num_instances'], {'replace': '(True)'}), '(idxs, self.num_instances, replace=True)\n', (3574, 3614), True, 'import numpy as np\n')] |
import os, torch, numpy as np
import pickle
from core.data_utils import graph_algorithms
from core.data_utils.graph_generation import GraphType, generate_graph
from inspect import signature
from torch_geometric.data import InMemoryDataset
from torch_geometric.data.data import Data
from torch_geometric.utils import dense_to_sparse
class GraphPropertyDataset(InMemoryDataset):
# parameters for generating the dataset
seed=1234
graph_type='RANDOM'
extrapolation=False
nodes_labels=["eccentricity", "graph_laplacian_features", "sssp"]
graph_labels = ["is_connected", "diameter", "spectral_radius"]
def __init__(self, root, split, transform=None, pre_transform=None, pre_filter=None):
super().__init__(root, transform, pre_transform, pre_filter)
path = os.path.join(self.processed_dir, f'{split}.pt')
self.data, self.slices = torch.load(path)
@property
def raw_file_names(self):
return ["generated_data.pkl"]
@property
def processed_file_names(self):
return ['train.pt', 'val.pt', 'test.pt']
def download(self):
# generate dataset
print("Generating dataset...")
genereate_dataset(root=self.raw_dir, seed=self.seed, graph_type=self.graph_type,
extrapolation=self.extrapolation,
nodes_labels=self.nodes_labels,
graph_labels=self.graph_labels)
def process(self):
with open(self.raw_paths[0], 'rb') as f:
(adj, features, node_labels, graph_labels) = pickle.load(f)
# normalize labels
max_node_labels = torch.cat([nls.max(0)[0].max(0)[0].unsqueeze(0) for nls in node_labels['train']]).max(0)[0]
max_graph_labels = torch.cat([gls.max(0)[0].unsqueeze(0) for gls in graph_labels['train']]).max(0)[0]
for dset in node_labels.keys():
node_labels[dset] = [nls / max_node_labels for nls in node_labels[dset]]
graph_labels[dset] = [gls / max_graph_labels for gls in graph_labels[dset]]
graphs = to_torch_geom(adj, features, node_labels, graph_labels)
for key, data_list in graphs.items():
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), os.path.join(self.processed_dir, f'{key}.pt'))
def to_torch_geom(adj, features, node_labels, graph_labels):
graphs = {}
for key in adj.keys(): # train, val, test
graphs[key] = []
for i in range(len(adj[key])): # Graph of a given size
batch_i = []
for j in range(adj[key][i].shape[0]): # Number of graphs
graph_adj = adj[key][i][j]
graph = Data(x=features[key][i][j],
edge_index=dense_to_sparse(graph_adj)[0],
y=graph_labels[key][i][j].unsqueeze(0),
pos=node_labels[key][i][j])
batch_i.append(graph)
graphs[key].extend(batch_i)
return graphs
def genereate_dataset(root='data', seed=1234, graph_type='RANDOM', extrapolation=False,
nodes_labels=["eccentricity", "graph_laplacian_features", "sssp"],
graph_labels = ["is_connected", "diameter", "spectral_radius"]):
if not os.path.exists(root):
os.makedirs(root)
if 'sssp' in nodes_labels:
sssp = True
nodes_labels.remove('sssp')
else:
sssp = False
nodes_labels_algs = list(map(lambda s: getattr(graph_algorithms, s), nodes_labels))
graph_labels_algs = list(map(lambda s: getattr(graph_algorithms, s), graph_labels))
def get_nodes_labels(A, F, initial=None):
labels = [] if initial is None else [initial]
for f in nodes_labels_algs:
params = signature(f).parameters
labels.append(f(A, F) if 'F' in params else f(A))
return np.swapaxes(np.stack(labels), 0, 1)
def get_graph_labels(A, F):
labels = []
for f in graph_labels_algs:
params = signature(f).parameters
labels.append(f(A, F) if 'F' in params else f(A))
return np.asarray(labels).flatten()
GenerateGraphPropertyDataset(n_graphs={'train': [512] * 10, 'val': [128] * 5, 'default': [256] * 5},
N={**{'train': range(15, 25), 'val': range(15, 25)}, **(
{'test-(20,25)': range(20, 25), 'test-(25,30)': range(25, 30),
'test-(30,35)': range(30, 35), 'test-(35,40)': range(35, 40),
'test-(40,45)': range(40, 45), 'test-(45,50)': range(45, 50),
'test-(60,65)': range(60, 65), 'test-(75,80)': range(75, 80),
'test-(95,100)': range(95, 100)} if extrapolation else
{'test': range(15, 25)})},
seed=seed, graph_type=getattr(GraphType, graph_type),
get_nodes_labels=get_nodes_labels, get_graph_labels=get_graph_labels,
sssp=True, filename=f"{root}/generated_data.pkl")
class GenerateGraphPropertyDataset:
def __init__(self, n_graphs, N, seed, graph_type, get_nodes_labels, get_graph_labels, print_every=20, sssp=True, filename="./data/multitask_dataset.pkl"):
self.adj = {}
self.features = {}
self.nodes_labels = {}
self.graph_labels = {}
def progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█', printEnd=""):
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r{} |{}| {}% {}'.format(prefix, bar, percent, suffix), end=printEnd)
def to_categorical(x, N):
v = np.zeros(N)
v[x] = 1
return v
for dset in N.keys():
if dset not in n_graphs:
n_graphs[dset] = n_graphs['default']
total_n_graphs = sum(n_graphs[dset])
set_adj = [[] for _ in n_graphs[dset]]
set_features = [[] for _ in n_graphs[dset]]
set_nodes_labels = [[] for _ in n_graphs[dset]]
set_graph_labels = [[] for _ in n_graphs[dset]]
generated = 0
progress_bar(0, total_n_graphs, prefix='Generating {:20}\t\t'.format(dset),
suffix='({} of {})'.format(0, total_n_graphs))
for batch, batch_size in enumerate(n_graphs[dset]):
for i in range(batch_size):
# generate a random graph of type graph_type and size N
seed += 1
adj, features, type = generate_graph(N[dset][batch], graph_type, seed=seed)
while np.min(np.max(adj, 0)) == 0.0:
# remove graph with singleton nodes
seed += 1
adj, features, _ = generate_graph(N[dset][batch], type, seed=seed)
generated += 1
if generated % print_every == 0:
progress_bar(generated, total_n_graphs, prefix='Generating {:20}\t\t'.format(dset),
suffix='({} of {})'.format(generated, total_n_graphs))
# make sure there are no self connection
assert np.all(
np.multiply(adj, np.eye(N[dset][batch])) == np.zeros((N[dset][batch], N[dset][batch])))
if sssp:
# define the source node
source_node = np.random.randint(0, N[dset][batch])
# compute the labels with graph_algorithms; if sssp add the sssp
node_labels = get_nodes_labels(adj, features,
graph_algorithms.all_pairs_shortest_paths(adj, 0)[source_node]
if sssp else None)
graph_labels = get_graph_labels(adj, features)
if sssp:
# add the 1-hot feature determining the starting node
features = np.stack([to_categorical(source_node, N[dset][batch]), features], axis=1)
set_adj[batch].append(adj)
set_features[batch].append(features)
set_nodes_labels[batch].append(node_labels)
set_graph_labels[batch].append(graph_labels)
self.adj[dset] = [torch.from_numpy(np.asarray(adjs)).float() for adjs in set_adj]
self.features[dset] = [torch.from_numpy(np.asarray(fs)).float() for fs in set_features]
self.nodes_labels[dset] = [torch.from_numpy(np.asarray(nls)).float() for nls in set_nodes_labels]
self.graph_labels[dset] = [torch.from_numpy(np.asarray(gls)).float() for gls in set_graph_labels]
progress_bar(total_n_graphs, total_n_graphs, prefix='Generating {:20}\t\t'.format(dset),
suffix='({} of {})'.format(total_n_graphs, total_n_graphs), printEnd='\n')
self.save_as_pickle(filename)
def save_as_pickle(self, filename):
"""" Saves the data into a pickle file at filename """
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, 'wb') as f:
pickle.dump((self.adj, self.features, self.nodes_labels, self.graph_labels), f)
if __name__ == '__main__':
dataset = GraphPropertyDataset(root='data/pna-simulation', split='train')
| [
"numpy.stack",
"core.data_utils.graph_generation.generate_graph",
"pickle.dump",
"os.makedirs",
"torch.load",
"os.path.dirname",
"os.path.exists",
"numpy.zeros",
"numpy.asarray",
"numpy.max",
"pickle.load",
"inspect.signature",
"numpy.random.randint",
"core.data_utils.graph_algorithms.all_... | [((797, 844), 'os.path.join', 'os.path.join', (['self.processed_dir', 'f"""{split}.pt"""'], {}), "(self.processed_dir, f'{split}.pt')\n", (809, 844), False, 'import os, torch, numpy as np\n'), ((878, 894), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (888, 894), False, 'import os, torch, numpy as np\n'), ((3560, 3580), 'os.path.exists', 'os.path.exists', (['root'], {}), '(root)\n', (3574, 3580), False, 'import os, torch, numpy as np\n'), ((3590, 3607), 'os.makedirs', 'os.makedirs', (['root'], {}), '(root)\n', (3601, 3607), False, 'import os, torch, numpy as np\n'), ((9734, 9759), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (9749, 9759), False, 'import os, torch, numpy as np\n'), ((1566, 1580), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1577, 1580), False, 'import pickle\n'), ((4176, 4192), 'numpy.stack', 'np.stack', (['labels'], {}), '(labels)\n', (4184, 4192), True, 'import os, torch, numpy as np\n'), ((6256, 6267), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (6264, 6267), True, 'import os, torch, numpy as np\n'), ((9775, 9800), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (9789, 9800), False, 'import os, torch, numpy as np\n'), ((9814, 9836), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (9825, 9836), False, 'import os, torch, numpy as np\n'), ((9890, 9969), 'pickle.dump', 'pickle.dump', (['(self.adj, self.features, self.nodes_labels, self.graph_labels)', 'f'], {}), '((self.adj, self.features, self.nodes_labels, self.graph_labels), f)\n', (9901, 9969), False, 'import pickle\n'), ((2512, 2557), 'os.path.join', 'os.path.join', (['self.processed_dir', 'f"""{key}.pt"""'], {}), "(self.processed_dir, f'{key}.pt')\n", (2524, 2557), False, 'import os, torch, numpy as np\n'), ((4063, 4075), 'inspect.signature', 'signature', (['f'], {}), '(f)\n', (4072, 4075), False, 'from inspect import signature\n'), ((4310, 4322), 'inspect.signature', 'signature', (['f'], {}), '(f)\n', (4319, 4322), False, 'from inspect import signature\n'), ((4411, 4429), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (4421, 4429), True, 'import os, torch, numpy as np\n'), ((7153, 7206), 'core.data_utils.graph_generation.generate_graph', 'generate_graph', (['N[dset][batch]', 'graph_type'], {'seed': 'seed'}), '(N[dset][batch], graph_type, seed=seed)\n', (7167, 7206), False, 'from core.data_utils.graph_generation import GraphType, generate_graph\n'), ((7402, 7449), 'core.data_utils.graph_generation.generate_graph', 'generate_graph', (['N[dset][batch]', 'type'], {'seed': 'seed'}), '(N[dset][batch], type, seed=seed)\n', (7416, 7449), False, 'from core.data_utils.graph_generation import GraphType, generate_graph\n'), ((8065, 8101), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N[dset][batch]'], {}), '(0, N[dset][batch])\n', (8082, 8101), True, 'import os, torch, numpy as np\n'), ((3028, 3054), 'torch_geometric.utils.dense_to_sparse', 'dense_to_sparse', (['graph_adj'], {}), '(graph_adj)\n', (3043, 3054), False, 'from torch_geometric.utils import dense_to_sparse\n'), ((7241, 7255), 'numpy.max', 'np.max', (['adj', '(0)'], {}), '(adj, 0)\n', (7247, 7255), True, 'import os, torch, numpy as np\n'), ((7904, 7946), 'numpy.zeros', 'np.zeros', (['(N[dset][batch], N[dset][batch])'], {}), '((N[dset][batch], N[dset][batch]))\n', (7912, 7946), True, 'import os, torch, numpy as np\n'), ((9003, 9019), 'numpy.asarray', 'np.asarray', (['adjs'], {}), '(adjs)\n', (9013, 9019), True, 'import os, torch, numpy as np\n'), ((9102, 9116), 'numpy.asarray', 'np.asarray', (['fs'], {}), '(fs)\n', (9112, 9116), True, 'import os, torch, numpy as np\n'), ((9206, 9221), 'numpy.asarray', 'np.asarray', (['nls'], {}), '(nls)\n', (9216, 9221), True, 'import os, torch, numpy as np\n'), ((9316, 9331), 'numpy.asarray', 'np.asarray', (['gls'], {}), '(gls)\n', (9326, 9331), True, 'import os, torch, numpy as np\n'), ((7877, 7899), 'numpy.eye', 'np.eye', (['N[dset][batch]'], {}), '(N[dset][batch])\n', (7883, 7899), True, 'import os, torch, numpy as np\n'), ((8305, 8354), 'core.data_utils.graph_algorithms.all_pairs_shortest_paths', 'graph_algorithms.all_pairs_shortest_paths', (['adj', '(0)'], {}), '(adj, 0)\n', (8346, 8354), False, 'from core.data_utils import graph_algorithms\n')] |
import torch
from envs import shipping_assignment_state
from envs.shipping_assignment_state import ShippingAssignmentState
from network.physical_network import Node
from shipping_allocation import PhysicalNetwork
import numpy as np
from experiment_utils.Order import Order
def test_state_to_demand_per_warehouse_commodity():
# Given
physical_network = PhysicalNetwork(3, 5, 2, 100, 50, num_commodities=3)
dummy_customer = Node(4, 100, 0, 0, "dc")
dc_0 = Node(0, 100, 0, 0, "dc")
dc_1 = Node(1, 100, 0, 0, "dc")
fixed_orders = [
# A total 105,60,60 to DC 0
Order(
np.array([50.0, 30.0, 30.0]),
dc_0,
dummy_customer,
0,
"someord",
),
Order(
np.array([55.0, 30.0, 30.0]),
dc_1,
dummy_customer,
0,
"someord",
),
]
state = ShippingAssignmentState(
0,
physical_network,
fixed=fixed_orders,
open=[],
inventory=[],
state_vector=None,
big_m_counter_per_commodity=0,
optimization_cost=0,
big_m_units_per_commodity=0,
)
# When
demand_per_warehouse_commodity = (
shipping_assignment_state.state_to_demand_per_warehouse_commodity(state)
)
# Then
assert (
demand_per_warehouse_commodity
== np.array([50.0, 30.0, 30.0, 55.0, 30.0, 30.0, 0.0, 0.0, 0.0])
).all()
# Todo test that one commodity still works.
| [
"envs.shipping_assignment_state.state_to_demand_per_warehouse_commodity",
"numpy.array",
"envs.shipping_assignment_state.ShippingAssignmentState",
"network.physical_network.Node",
"shipping_allocation.PhysicalNetwork"
] | [((362, 414), 'shipping_allocation.PhysicalNetwork', 'PhysicalNetwork', (['(3)', '(5)', '(2)', '(100)', '(50)'], {'num_commodities': '(3)'}), '(3, 5, 2, 100, 50, num_commodities=3)\n', (377, 414), False, 'from shipping_allocation import PhysicalNetwork\n'), ((436, 460), 'network.physical_network.Node', 'Node', (['(4)', '(100)', '(0)', '(0)', '"""dc"""'], {}), "(4, 100, 0, 0, 'dc')\n", (440, 460), False, 'from network.physical_network import Node\n'), ((472, 496), 'network.physical_network.Node', 'Node', (['(0)', '(100)', '(0)', '(0)', '"""dc"""'], {}), "(0, 100, 0, 0, 'dc')\n", (476, 496), False, 'from network.physical_network import Node\n'), ((508, 532), 'network.physical_network.Node', 'Node', (['(1)', '(100)', '(0)', '(0)', '"""dc"""'], {}), "(1, 100, 0, 0, 'dc')\n", (512, 532), False, 'from network.physical_network import Node\n'), ((913, 1108), 'envs.shipping_assignment_state.ShippingAssignmentState', 'ShippingAssignmentState', (['(0)', 'physical_network'], {'fixed': 'fixed_orders', 'open': '[]', 'inventory': '[]', 'state_vector': 'None', 'big_m_counter_per_commodity': '(0)', 'optimization_cost': '(0)', 'big_m_units_per_commodity': '(0)'}), '(0, physical_network, fixed=fixed_orders, open=[],\n inventory=[], state_vector=None, big_m_counter_per_commodity=0,\n optimization_cost=0, big_m_units_per_commodity=0)\n', (936, 1108), False, 'from envs.shipping_assignment_state import ShippingAssignmentState\n'), ((1238, 1310), 'envs.shipping_assignment_state.state_to_demand_per_warehouse_commodity', 'shipping_assignment_state.state_to_demand_per_warehouse_commodity', (['state'], {}), '(state)\n', (1303, 1310), False, 'from envs import shipping_assignment_state\n'), ((617, 645), 'numpy.array', 'np.array', (['[50.0, 30.0, 30.0]'], {}), '([50.0, 30.0, 30.0])\n', (625, 645), True, 'import numpy as np\n'), ((769, 797), 'numpy.array', 'np.array', (['[55.0, 30.0, 30.0]'], {}), '([55.0, 30.0, 30.0])\n', (777, 797), True, 'import numpy as np\n'), ((1392, 1453), 'numpy.array', 'np.array', (['[50.0, 30.0, 30.0, 55.0, 30.0, 30.0, 0.0, 0.0, 0.0]'], {}), '([50.0, 30.0, 30.0, 55.0, 30.0, 30.0, 0.0, 0.0, 0.0])\n', (1400, 1453), True, 'import numpy as np\n')] |
__all__ = ['PDataset']
import os
from typing import Union, List
import warnings
import numpy as np
from .rt_fastarray import FastArray
from .rt_enum import (
TypeRegister,
DisplayJustification,
)
from .rt_numpy import (
unique,
empty,
cumsum,
searchsorted,
max,
)
from .rt_dataset import Dataset
from .rt_sds import load_sds
from .rt_itemcontainer import ItemContainer
from .rt_groupby import GroupBy
class PDataset(Dataset):
'''
The PDataset class inherits from Dataset. It holds multiple datasets (preivously stacked together) in contiguous slices.
Each partition has a name and a contiguous slice that can be used to extract it from the larger Dataset.
Extracting a partition is zero-copy. Partitions can be extracted using partition(), or bracket [] indexing.
A PDataset is often returned when:
Multiple Datasets are hstacked, i.e. hstack([ds1, ds2, ds3])
Calling load_sds with stack=True, i.e. load_sds([file1, file2, file3], stack=True)
Properties: prows, pdict, pnames, pcount, pgb, pgbu, pgroupby, pslices, piter, pcutoffs
Methods: partition(), pslice(), showpartitions()
pds['20190204'] or pds[20190204] will return a dataset for the given partition name
Construction:
-------------
inputval : -list of files to load and stack
-list of datasets to stack
-regular dataset inputval (will only have one partition)
PDataset([path1, path2, path3], (pnames))
-call load_sds(stack=True)
-paths become filenames
-if pnames specified, use those, otherwise look for dates
-if no dates, auto generate pnames
PDataset([ds1, ds2, ds3], (filenames, pnames))
PDataset(ds, (filenames, pnames))
-call Dataset.hstack()
-if pnames specified, use those
-if filenames, look for dates
-if no dates, auto generate pnames
PDataset(arraydict, cutoffs, (filenames, pnames))
-constructor from load_sds()
-if pnames specified, use those
-if filenames, look for dates
-if no dates, auto generate pnames
'''
# ------------------------------------------------------------
def __init__(
self,
inputval: Union[list, dict, 'Dataset', 'ItemContainer'] = None,
cutoffs=None,
filenames: List[str] = None,
pnames=None,
showpartitions=True,
**kwargs,
):
if inputval is None:
inputval = dict()
if filenames is None:
filenames = list()
if type(inputval) == TypeRegister.Dataset:
inputval = [inputval]
# stack datasets or load from list of files
if isinstance(inputval, list):
inputval, cutoffs, filenames, pnames = self._init_from_list(
inputval, filenames, pnames
)
self._pre_init()
# fast track for itemcontainer
if isinstance(inputval, ItemContainer):
self._init_from_itemcontainer(inputval)
# load items from object that can be turned into dictionary
else:
inputval = self._init_columns_as_dict(inputval)
self._init_from_dict(inputval)
self._post_init(
cutoffs=cutoffs,
filenames=filenames,
pnames=pnames,
showpartitions=showpartitions,
)
# ------------------------------------------------------------
def _pre_init(self):
'''
Keep this in for chaining pre-inits in parent classes.
'''
super()._pre_init()
# ------------------------------------------------------------
def _post_init(self, cutoffs, filenames, pnames, showpartitions):
'''
Final initializer for variables specific to PDataset.
Also initializes variables from parent class.
'''
super()._post_init()
self._showpartitions = showpartitions
# cutoffs will be the same for dataset columns
if cutoffs is not None:
self._pcutoffs = list(cutoffs.values())[0]
else:
# assume one row, init from dataset
self._pcutoffs = FastArray([self._nrows])
# number of rows in each partition
self._prows = self._pcutoffs.copy()
if len(self._prows) > 1:
# calculate row length
self._prows[1:] -= self._pcutoffs[:-1]
# look for dates in filenames or autogenerate names
if pnames is None:
pnames, filenames = self._init_pnames_filenames(
len(self._prows), pnames, filenames
)
self._pfilenames = filenames
self._pnames = {p: i for i, p in enumerate(pnames)}
# use provided pnames
else:
self._pfilenames = filenames
if isinstance(pnames, list):
pnames = {p: i for i, p in enumerate(pnames)}
self._pnames = pnames
self._pcat = None
# ------------------------------------------------------------
@classmethod
def _filenames_to_pnames(cls, filenames):
'''
At least two filenames must be present to compare
Algo will reverse the string on the assumption that pathnames can vary in the front of the string
It also assumes that the filenames end similarly, such as ".SDS"
It will search for the difference and look for digits, then try to extract the digits
'''
# reverse all the filenames
if len(filenames) > 0:
rfilenames = [f[::-1] for f in filenames]
str_arr = TypeRegister.FastArray(rfilenames)
str_numba = str_arr.numbastring
if len(filenames) > 1:
match_mask = str_numba[0] != str_numba[1]
str_len = len(match_mask)
for i in range(len(filenames) - 2):
# inplace OR loop so that the TRUE propagates
match_mask += str_numba[0] != str_numba[i + 2]
for i in range(str_len):
if match_mask[i]:
break
start = i
for i in range(start + 1, str_len):
if not match_mask[i]:
break
end = i
# expand start if possible
while start > 0:
char = str_numba[0][start - 1]
# as long as a numeric digit, keep expanding
if char >= 48 and char <= 58:
start = start - 1
else:
break
# expand end if possible
while end < str_len:
char = str_numba[0][end]
if char >= 48 and char <= 58:
end = end + 1
else:
break
# check to see if we captured a number
firstchar = str_numba[0][start]
lastchar = str_numba[0][end - 1]
if (
start < end
and firstchar >= 48
and firstchar <= 58
and lastchar >= 48
and lastchar <= 58
):
pnames = []
viewtype = 'S' + str(end - start)
for i in range(len(filenames)):
newstring = str_numba[i][start:end].view(viewtype)
newstring = newstring[0].astype('U')
# append the reverse
pnames.append(newstring[::-1])
u = unique(pnames)
if len(u) == len(filenames):
return pnames
# removed, prints during every column index/copy
# print(f"Failed to find unique numbers in filenames {pnames}")
else:
# only one file
filename = str(rfilenames[0])
start = -1
stop = -1
# search for first number
for i in range(len(filename)):
if filename[i].isdigit():
if start == -1:
start = i
elif start != -1:
stop = i
break
if start != -1:
if stop == -1:
stop = start + 1
# extract just the number
filename = filename[start:stop]
return [filename[::-1]]
# failed to find unique strings in filenames
# default to p0, p1, p2
pnames = cls._auto_pnames(len(filenames))
return pnames
# ------------------------------------------------------------
@classmethod
def _init_from_list(cls, dlist, filenames, pnames):
'''
Construct a PDataset from multiple datasets, or by loading multiple files.
'''
# make sure only one type
listtype = {type(i) for i in dlist}
if len(listtype) == 1:
listtype = list(listtype)[0]
else:
raise TypeError(f'Found multiple types in constructor list {listtype}')
# hstack datasets
if listtype == Dataset:
start = 0
cutoffs = cumsum([ds.shape[0] for ds in dlist])
cutoffs = {'cutoffs': cutoffs}
ds = TypeRegister.Dataset.concat_rows(dlist)
# extract itemcontainer
ds = ds._all_items
pnames, filenames = cls._init_pnames_filenames(
len(dlist), pnames, filenames
)
# perform a .sds load from multiple files
elif issubclass(listtype, (str, bytes, os.PathLike)):
ds = load_sds(dlist, stack=True)
cutoffs = {'cutoffs': ds._pcutoffs}
filenames = ds._pfilenames
if pnames is None:
pnames = ds._pnames # dict
# extract itemcontainer
ds = ds._all_items
else:
raise TypeError(f'Cannot construct from list of type {listtype}')
return ds, cutoffs, filenames, pnames
# ------------------------------------------------------------
@classmethod
def _auto_pnames(cls, pcount):
'''
Auto generate partition names if none provided and no date found in filenames.
'''
return ['p' + str(i) for i in range(pcount)]
# ------------------------------------------------------------
def _autocomplete(self) -> str:
return f'PDataset{self.shape}'
# ------------------------------------------------------------
@classmethod
def _init_pnames_filenames(cls, pcount, pnames, filenames):
'''
Initialize filenames, pnames based on what was provided to the constructor.
If no pnames provided, try to derive a date from filenames
If no date found, or no filenames provided, use default names [p0, p1, p2 ...]
Parameters
----------
pcount : int
number of partitions, in case names need to be auto generated
pnames : list of str, optional
list of partition names or None
filenames : sequence of str, optional
list of file paths (possibly empty)
'''
if pnames is None:
if filenames is None or len(filenames) == 0:
filenames = []
pnames = cls._auto_pnames(pcount)
else:
pnames = cls._filenames_to_pnames(filenames)
return pnames, filenames
# ------------------------------------------------------------
def _copy(self, deep=False, rows=None, cols=None, base_index=0, cls=None):
''' returns a PDataset if no row selection, otherwise Dataset'''
if rows is None:
newcols = self._as_itemcontainer(
deep=deep, rows=rows, cols=cols, base_index=base_index
)
# create a new PDataset
pds = type(self)(
newcols,
cutoffs={'cutoffs': self.pcutoffs},
filenames=self._pfilenames,
pnames=self._pnames,
base_index=base_index,
)
pds = self._copy_attributes(pds, deep=deep)
else:
# row slicing will break partitions, return a regular Dataset
cls = TypeRegister.Dataset
pds = super()._copy(
deep=deep, rows=rows, cols=cols, base_index=base_index, cls=cls
)
return pds
# ------------------------------------------------------------
def _ipython_key_completions_(self):
# For tab autocomplete with __getitem__
# NOTE: %config IPCompleter.greedy=True might have to be set
# autocompleter will sort the keys
return self.keys() + self.pnames
# ------------------------------------------------------------
@property
def pcutoffs(self):
'''
Returns
-------
Cutoffs for partition. For slicing, maintain contiguous arrays.
Examples
--------
>>> pds.pcutoffs
FastArray([1447138, 3046565, 5344567], dtype=int64)
'''
return self._pcutoffs
# ------------------------------------------------------------
@property
def prows(self):
'''
Returns
-------
An array with the number of rows in each partition.
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.prows
FastArray([1447138, 2599427, 1909895], dtype=int64)
'''
return self._prows
# ------------------------------------------------------------
@property
def pcount(self):
'''
Returns
-------
Number of partitions
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pcount
3
'''
return len(self._prows)
# ------------------------------------------------------------
@property
def pnames(self):
'''
Returns
-------
A list with the names of the partitions
Example
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pnames
['20190205', '20190206', '20190207']
'''
return [*self._pnames.keys()]
def set_pnames(self, pnames):
'''
Input
-----
A list of strings
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pnames
['20190205', '20190206', '20190207']
>>> pds.set_pnames(['Jane', 'John', 'Jill'])
['Jane', 'John', 'Jill']
'''
if isinstance(pnames, list):
if len(pnames) == len(self._pnames):
newpnames = {}
for i in range(len(pnames)):
newpnames[pnames[i]] = i
if len(newpnames) == len(self._pnames):
self._pnames = newpnames
else:
raise ValueError(f'The new pnames must be unique names: {pnames}')
else:
raise ValueError(
f'The length of the new pnames must match the length of the old pnames: {len(self._pnames)}'
)
else:
raise ValueError(f'A list of string must be passed in')
return [*self._pnames.keys()]
# ------------------------------------------------------------
@property
def pdict(self):
'''
Returns
--------
A dictionary with the partition names and the partition slices.
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1, file2, file3], stack=True)
>>> pds.pdict
{'20190204': slice(0, 1447138, None),
'20190205': slice(1447138, 3046565, None),
'20190206': slice(3046565, 4509322, None)}
'''
pdict = {name: self.pslice(i) for i, name in enumerate(self.pnames)}
return pdict
# ------------------------------------------------------------
# -------------------------------------------------------
def pgb(self, by, **kwargs):
"""Equivalent to :meth:`~rt.rt_dataset.Dataset.pgroupby`"""
kwargs['sort'] = True
return self.pgroupby(by, **kwargs)
# -------------------------------------------------------
def pgroupby(self, by, **kwargs):
return GroupBy(self, by, cutoffs=self._pcutoffs, **kwargs)
def igroupby(self):
'''
Lazily generate a categorical binned by each partition.
Data will be attached to categorical, so operations can be called without specifying data.
This allows reduce functions to be applied per partion.
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1,file2, file2], stack=True)
>>> pds.pgroupby['AskSize'].sum()
*Partition TradeSize
---------- ---------
20190204 1.561e+07
20190205 1.950e+07
20190206 1.532e+07
See Also: Dataset.groupby, Dataset.gb, Dataset.gbu
'''
reserved_name = 'Partition'
if reserved_name not in self:
self[reserved_name] = self.pcat
self.col_move_to_front(reserved_name)
return self.gb(reserved_name)
@property
def pcat(self):
'''
Lazy generates a categorical for row labels callback or pgroupby
'''
if self._pcat is None:
idx = empty((self.shape[0],), dtype=np.int32)
for i in range(self.pcount):
idx[self.pslice(i)] = i + 1
label = self.pnames
self._pcat = TypeRegister.Categorical(idx, label)
return self._pcat
# ------------------------------------------------------------
def prow_labeler(self, rownumbers, style):
'''
Display calls this routine back to replace row numbers.
rownumbers : fancy index of row numbers being displayed
style : ColumnStyle object - default from DisplayTable, can be changed
Returns: label header, label array, style
'''
if self._showpartitions:
style.align = DisplayJustification.Right
# use the cutoffs to generate which partition index
pindex = searchsorted(self._pcutoffs, rownumbers, side='right')
plabels = TypeRegister.FastArray(self.pnames)[pindex]
# find the maximum string width for the rownumber
if len(rownumbers) > 0: maxnum = max(rownumbers)
else: maxnum = 0
width = len(str(maxnum))
# right justify numbers
rownumbers = rownumbers.astype('S')
rownumbers = np.chararray.rjust(rownumbers, width)
# column header
header = 'partition + #'
rownumbers = plabels + ' ' + rownumbers
# set the style width to override the string trim
style.width = rownumbers.itemsize
return header, rownumbers, style
else:
return '#', rownumbers, style
# ------------------------------------------------------------
@property
def _row_numbers(self):
# display will check for the existence of this method
# return a callback to change the row numbers
return self.prow_labeler
# ------------------------------------------------------------
def showpartitions(self, show=True):
''' toggle whether partitions are shown on the left '''
if show:
self._showpartitions = True
else:
self._showpartitions = False
# ------------------------------------------------------------
@property
def piter(self):
'''
Iterate over dictionary of arrays for each partition.
Yields key (load source) -> value (dataset as dictionary)
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1,file2, file2], stack=True)
>>> for name, ds in pds.iter: print(name)
20190204
20190205
20190206
'''
label = self.pnames
start = 0
for i in range(self.pcount):
yield label[i], self.partition(i)
# -------------------------------------------------------
@property
def pslices(self):
'''
Return the slice (start,end) associated with the partition number
See Also
--------
pslices, pdict
Examples
--------
Example below assumes 3 filenames date encoded with datasets
>>> pds = load_sds([file1,file2, file2], stack=True)
>>> pds.pslices
[slice(0, 1447138, None),
slice(1447138, 3046565, None),
slice(3046565, 4509322, None)]
'''
pslices = [self.pslice(i) for i in range(self.pcount)]
return pslices
# -------------------------------------------------------
def pslice(self, index):
'''
Return the slice (start,end) associated with the partition number
See Also
--------
pslices, pdict
Examples
--------
>>> pds.pslice(0)
slice(0, 1447138, None)
'''
if isinstance(index, (int, np.integer)):
if index == 0:
return slice(0, self.pcutoffs[index])
else:
return slice(self.pcutoffs[index - 1], self.pcutoffs[index])
raise IndexError(
f'Cannot slice a partition with type {type(index)!r}. Use an integer instead.'
)
# -------------------------------------------------------
def partition(self, index):
'''
Return the Dataset associated with the partition number
Examples
--------
Example below assumes 3 filenames with datasets
>>> pds = load_sds([file1, file2, file2], stack=True)
>>> pds.partition(0)
'''
if isinstance(index, (int, np.integer)):
# this will route to the dataset
return self._copy(rows=self.pslice(index))
if isinstance(index, str):
# this will loop back if the string is a partition name
return self[index]
raise IndexError(
f'Cannot index a parition with type {type(index)!r}. Use an integer instead.'
)
# -------------------------------------------------------
def __getitem__(self, index):
"""
:param index: (rowspec, colspec) or colspec
:return: the indexed row(s), cols(s), sub-dataset or single value
:raise IndexError:
:raise TypeError:
:raise KeyError:
"""
try:
return super().__getitem__(index)
except:
# if it fails, maybe it was a partition selection
if isinstance(index, (int, np.integer)):
# convert int to string to lookup
index = str(index)
# the string was not a column name, now check for partition name
if isinstance(index, str):
if index in self._pnames:
# return the dataset for that partition
return self.partition(self._pnames[index])
else:
raise KeyError(
f'the key {index!r} was not found as column name or parition name'
)
else:
raise KeyError(f'could not index PDataset with {type(index)}')
# --------------------------------------------------------------------------
def save(
self,
path='',
share=None,
compress=True,
overwrite=True,
name=None,
onefile: bool = False,
bandsize=None,
append=None,
complevel=None,
):
warnings.warn(
f"To be implemented. PDataset will currently be saved / loaded as a Dataset."
)
super().save(
path=path,
share=share,
compress=compress,
overwrite=overwrite,
name=name,
onefile=onefile,
bandsize=bandsize,
append=append,
complevel=complevel,
)
# --------------------------------------------------------------------------
@classmethod
def hstack(cls, pds_list):
'''
Stacks columns from multiple datasets.
see: Dataset.concat_rows
'''
raise NotImplementedError("PDataset does not stack yet")
# ------------------------------------------------------------
@classmethod
def pload(cls, path, start, end, include=None, threads=None, folders=None):
'''
Returns a PDataset of stacked files from multiple days.
Will load all files found within the date range provided.
Parameters:
-----------
path : format string for filepath, {} in place of YYYYMMDD. {} may appear multiple times.
start : integer or string start date in format YYYYMMDD
end : integer or string end date in format YYYYMMDD
'''
# insert date string at each of these
fmtcount = path.count('{}')
# final loader will check if dates exist, kill warnings?
pnames = TypeRegister.Date.range(str(start), str(end)).yyyymmdd.astype('U')
try:
import sotpath
files = [sotpath.path2platform(path.format(*[d] * fmtcount)) for d in pnames]
except:
files = [path.format(*[d] * fmtcount) for d in pnames]
pds = load_sds(
files, include=include, stack=True, threads=threads, folders=folders
)
return pds
# ------------------------------------------------------------
def psave(self):
'''
Does not work yet. Would save backout all the partitions.
'''
raise NotImplementedError(f'not implemented yet')
TypeRegister.PDataset = PDataset
| [
"warnings.warn",
"numpy.chararray.rjust"
] | [((25284, 25386), 'warnings.warn', 'warnings.warn', (['f"""To be implemented. PDataset will currently be saved / loaded as a Dataset."""'], {}), "(\n f'To be implemented. PDataset will currently be saved / loaded as a Dataset.'\n )\n", (25297, 25386), False, 'import warnings\n'), ((19939, 19976), 'numpy.chararray.rjust', 'np.chararray.rjust', (['rownumbers', 'width'], {}), '(rownumbers, width)\n', (19957, 19976), True, 'import numpy as np\n')] |
"""
pore_distribution module handles cumulative and frequency pore distributions
from MIP and data.
Created on 29/10/2019
MIT License - Copyright (c) 2019 <NAME>
@author: PedroMat8
@email: <EMAIL>
Please cite the following DOI: 10.5281/zenodo.3524929
GitHub repository: https://github.com/PedroMat8/micropy
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
from numba import jit
class DataElaboration():
""" Stores psd and cpd of the data set"""
def __init__(self, inputs):
self.inputs = self.Inputs(inputs)
self.psd = self.PSD(self.inputs.intervals)
self.cpd = self.CPD(self.inputs.intervals)
def plot_mip(self, inputs_gtec):
"""Plot cpd and psd against expected void ratio"""
saturated = (input('Is e=wGs? [Y or N]: '))
if saturated in ('YES', 'yes', 'Yes', 'Y', 'y'):
gtec = self.InputsGtec(inputs_gtec)
e = gtec.w*gtec.Gs
else:
e = [float(input('Input void ratio: '))]
cpd_d = self.cpd.d
cpd_e = self.cpd.e
psd_d = self.psd.d
psd_e = self.psd.e
# TODO: This part below partially replicates method plot_data"
fig, axs = plt.subplots(2)
fig.suptitle('Void ratio comparison')
axs[0].semilogx(cpd_d, cpd_e)
axs[0].semilogx(cpd_d, [e] * len(cpd_d))
axs[0].set(xlabel='diameters [um]', ylabel='void ratio [e]')
axs[1].semilogx(psd_d, psd_e)
axs[1].set(xlabel='diameters [um]', ylabel='frequency de/d(logd)')
@staticmethod
def plot_data(cpd_d, cpd_e, psd_d, psd_e):
"""Plot cpd and psd"""
fig, axs = plt.subplots(2)
fig.suptitle('Distributions')
axs[0].semilogx(psd_d, psd_e)
axs[0].set(xlabel='diameters [um]', ylabel='frequency de/d(logd)')
axs[1].semilogx(cpd_d, cpd_e)
axs[1].set(xlabel='diameters [um]', ylabel='void ratio [e]')
return fig, axs
def save_output(self, name='output'):
"""Save outputs"""
matrix = np.column_stack((self.cpd.d, self.cpd.e,
np.append(self.psd.d,0),
np.append(self.psd.e,0)))
header=('diameters_cpd\tvoid_ratio_cpd\tdiameters_psd\tvoid_ratio_psd')
namefile = name + '.txt'
np.savetxt(namefile, matrix,
header=header, delimiter='\t', fmt='%s')
@staticmethod
def interpolate_e(d_min, d_max, d_starting, e_starting, intervals):
delta_log = ((np.log10(d_max)-np.log10(d_min)) / (intervals-1))
d_starting_log = np.log10(d_starting)
e_new = np.empty(intervals)
n = np.arange(0, intervals)
ndlog = np.multiply(n ,delta_log)
d_new_log = np.add(np.log10(d_min), ndlog)
d_new = 10**d_new_log
e_new = np.interp(d_new_log, d_starting_log, e_starting)
return d_new, e_new
def get_cpd_from_array(self, d, e):
"""Get cpd from array"""
[d, e] = self.sort_cpd(d, e)
[self.cpd.d, self.cpd.e] = DataElaboration.interpolate_e(
self.inputs.dmin, self.inputs.dmax, d, e, self.inputs.intervals)
def get_cpd_from_mip(self, input_file, inputs_gtec):
"""Get cpd from MIP"""
gtec = self.InputsGtec(inputs_gtec)
alf = np.loadtxt(input_file, usecols=(0, 1), skiprows=0)
p = alf[:, 0]*0.00689475908677536 # [MPa]
v = alf[:, 1]*1000 # [mm3]
Vs = gtec.Ms/gtec.Gs*1000
e = v/Vs
# 2 for parallel plates and 4 for axial symmetry
dd = -2*0.48*np.cos(np.radians(147))/p
if len(dd) < self.inputs.intervals:
print('Too many intervals. Reduced to ', len(dd))
self.inputs.intervals = len(dd)
print('Max available diameter [um]: ', np.max(dd))
print('Min available diameter [um]: ', np.min(dd))
if self.inputs.dmax > np.max(dd):
new_dmax = input('dmax is too large, input new diameter < ' + str(
round(np.max(dd))) + ': ')
self.inputs.dmax = round(float(new_dmax))
if self.inputs.dmin < np.min(dd):
new_dmin = input('dmin is too small, input new diameter > ' + str(
round(np.min(dd), 4)) + ': ')
self.inputs.dmin = round(float(new_dmin), 4)
self.get_cpd_from_array(dd, e)
def get_cpd_from_file(self, input_file):
"""Get cpd from txt file"""
alf = np.loadtxt(input_file, usecols=(0, 1), skiprows=0)
d = alf[:, 0]
e = alf[:, 1]
print('Max available diameter [um]: ', np.max(d))
print('Min available diameter [um]: ', np.min(d))
if len(d) < self.inputs.intervals:
print('Too many intervals. Reduced to ', len(d))
self.inputs.intervals = len(d)
if self.inputs.dmax > np.max(d):
new_dmax = input('dmax is too large, input new diameter < ' + str(
round(np.max(d))) + ': ')
self.inputs.dmax = round(float(new_dmax))
if self.inputs.dmin < np.min(d):
new_dmin = input('dmin is too small, input new diameter > ' + str(
round(np.min(d), 4)) + ': ')
self.inputs.dmin = round(float(new_dmin), 4)
self.get_cpd_from_array(d, e)
return
@staticmethod
def sort_cpd(d, e):
"""Sort cpd"""
if d[0] >= d[-1]:
if e[0] >= e[-1]:
return np.sort(d), np.sort(e)
else:
return np.sort(d), np.sort(max(e)-e)
else:
if e[0] <= e[-1]:
return np.sort(d), np.sort(e)
else:
return np.sort(d), np.sort(max(e)-e)
class Inputs:
def __init__(self, inputs):
self.intervals = inputs['intervals']
self.dmax = inputs['dmax']
self.dmin = inputs['dmin']
class InputsGtec:
def __init__(self, inputs):
self.Gs = inputs['Gs']
self.Ms = inputs['Ms']
self.w = inputs['w']
self.teta = inputs['teta']
self.surf_tension = inputs['surf_tension']
class CPD():
def __init__(self, intervals):
dim = intervals
self.d = np.empty(dim)
self.e = np.empty(dim)
class PSD(CPD):
def __init__(self, intervals):
dim = intervals
self.d = np.empty(dim)
self.e = np.empty(dim)
# self.norm = np.empty(dim)
@staticmethod
def get_psd_from_cpd(cpd_d, cpd_e):
"""Create psd from cpd.
Before calculating psd, cpd is normalized."""
[cpd_d, cpd_e] = DataElaboration.sort_cpd(cpd_d, cpd_e)
cpd_e /= cpd_e.max()
alf = np.size(cpd_d, 0)
psd_d = np.empty(alf-1)
psd_e = np.empty(alf-1)
for i in range(alf-1):
foo = 10**((np.log10(cpd_d[i]) + np.log10(cpd_d[i+1])) / 2)
foo1 = (cpd_e[i+1]-cpd_e[i])/(np.log10(cpd_d[i+1]/cpd_d[i]))
psd_d[i] = foo
psd_e[i] = foo1
if foo1 < 0:
sys.exit('negative psd')
return psd_d, psd_e
| [
"numpy.radians",
"numpy.size",
"numpy.multiply",
"numpy.empty",
"numpy.savetxt",
"numpy.append",
"numpy.max",
"numpy.min",
"numpy.arange",
"numpy.loadtxt",
"numpy.sort",
"numpy.interp",
"numpy.log10",
"matplotlib.pyplot.subplots",
"sys.exit"
] | [((1193, 1208), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1205, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1656), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1653, 1656), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2376), 'numpy.savetxt', 'np.savetxt', (['namefile', 'matrix'], {'header': 'header', 'delimiter': '"""\t"""', 'fmt': '"""%s"""'}), "(namefile, matrix, header=header, delimiter='\\t', fmt='%s')\n", (2317, 2376), True, 'import numpy as np\n'), ((2585, 2605), 'numpy.log10', 'np.log10', (['d_starting'], {}), '(d_starting)\n', (2593, 2605), True, 'import numpy as np\n'), ((2622, 2641), 'numpy.empty', 'np.empty', (['intervals'], {}), '(intervals)\n', (2630, 2641), True, 'import numpy as np\n'), ((2655, 2678), 'numpy.arange', 'np.arange', (['(0)', 'intervals'], {}), '(0, intervals)\n', (2664, 2678), True, 'import numpy as np\n'), ((2695, 2720), 'numpy.multiply', 'np.multiply', (['n', 'delta_log'], {}), '(n, delta_log)\n', (2706, 2720), True, 'import numpy as np\n'), ((2818, 2866), 'numpy.interp', 'np.interp', (['d_new_log', 'd_starting_log', 'e_starting'], {}), '(d_new_log, d_starting_log, e_starting)\n', (2827, 2866), True, 'import numpy as np\n'), ((3297, 3347), 'numpy.loadtxt', 'np.loadtxt', (['input_file'], {'usecols': '(0, 1)', 'skiprows': '(0)'}), '(input_file, usecols=(0, 1), skiprows=0)\n', (3307, 3347), True, 'import numpy as np\n'), ((4448, 4498), 'numpy.loadtxt', 'np.loadtxt', (['input_file'], {'usecols': '(0, 1)', 'skiprows': '(0)'}), '(input_file, usecols=(0, 1), skiprows=0)\n', (4458, 4498), True, 'import numpy as np\n'), ((2748, 2763), 'numpy.log10', 'np.log10', (['d_min'], {}), '(d_min)\n', (2756, 2763), True, 'import numpy as np\n'), ((3789, 3799), 'numpy.max', 'np.max', (['dd'], {}), '(dd)\n', (3795, 3799), True, 'import numpy as np\n'), ((3848, 3858), 'numpy.min', 'np.min', (['dd'], {}), '(dd)\n', (3854, 3858), True, 'import numpy as np\n'), ((3891, 3901), 'numpy.max', 'np.max', (['dd'], {}), '(dd)\n', (3897, 3901), True, 'import numpy as np\n'), ((4114, 4124), 'numpy.min', 'np.min', (['dd'], {}), '(dd)\n', (4120, 4124), True, 'import numpy as np\n'), ((4591, 4600), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (4597, 4600), True, 'import numpy as np\n'), ((4649, 4658), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (4655, 4658), True, 'import numpy as np\n'), ((4838, 4847), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (4844, 4847), True, 'import numpy as np\n'), ((5059, 5068), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (5065, 5068), True, 'import numpy as np\n'), ((6253, 6266), 'numpy.empty', 'np.empty', (['dim'], {}), '(dim)\n', (6261, 6266), True, 'import numpy as np\n'), ((6288, 6301), 'numpy.empty', 'np.empty', (['dim'], {}), '(dim)\n', (6296, 6301), True, 'import numpy as np\n'), ((6411, 6424), 'numpy.empty', 'np.empty', (['dim'], {}), '(dim)\n', (6419, 6424), True, 'import numpy as np\n'), ((6446, 6459), 'numpy.empty', 'np.empty', (['dim'], {}), '(dim)\n', (6454, 6459), True, 'import numpy as np\n'), ((6780, 6797), 'numpy.size', 'np.size', (['cpd_d', '(0)'], {}), '(cpd_d, 0)\n', (6787, 6797), True, 'import numpy as np\n'), ((6818, 6835), 'numpy.empty', 'np.empty', (['(alf - 1)'], {}), '(alf - 1)\n', (6826, 6835), True, 'import numpy as np\n'), ((6854, 6871), 'numpy.empty', 'np.empty', (['(alf - 1)'], {}), '(alf - 1)\n', (6862, 6871), True, 'import numpy as np\n'), ((2101, 2125), 'numpy.append', 'np.append', (['self.psd.d', '(0)'], {}), '(self.psd.d, 0)\n', (2110, 2125), True, 'import numpy as np\n'), ((2160, 2184), 'numpy.append', 'np.append', (['self.psd.e', '(0)'], {}), '(self.psd.e, 0)\n', (2169, 2184), True, 'import numpy as np\n'), ((2510, 2525), 'numpy.log10', 'np.log10', (['d_max'], {}), '(d_max)\n', (2518, 2525), True, 'import numpy as np\n'), ((2526, 2541), 'numpy.log10', 'np.log10', (['d_min'], {}), '(d_min)\n', (2534, 2541), True, 'import numpy as np\n'), ((3572, 3587), 'numpy.radians', 'np.radians', (['(147)'], {}), '(147)\n', (3582, 3587), True, 'import numpy as np\n'), ((5454, 5464), 'numpy.sort', 'np.sort', (['d'], {}), '(d)\n', (5461, 5464), True, 'import numpy as np\n'), ((5466, 5476), 'numpy.sort', 'np.sort', (['e'], {}), '(e)\n', (5473, 5476), True, 'import numpy as np\n'), ((5518, 5528), 'numpy.sort', 'np.sort', (['d'], {}), '(d)\n', (5525, 5528), True, 'import numpy as np\n'), ((5615, 5625), 'numpy.sort', 'np.sort', (['d'], {}), '(d)\n', (5622, 5625), True, 'import numpy as np\n'), ((5627, 5637), 'numpy.sort', 'np.sort', (['e'], {}), '(e)\n', (5634, 5637), True, 'import numpy as np\n'), ((5679, 5689), 'numpy.sort', 'np.sort', (['d'], {}), '(d)\n', (5686, 5689), True, 'import numpy as np\n'), ((7028, 7061), 'numpy.log10', 'np.log10', (['(cpd_d[i + 1] / cpd_d[i])'], {}), '(cpd_d[i + 1] / cpd_d[i])\n', (7036, 7061), True, 'import numpy as np\n'), ((7172, 7196), 'sys.exit', 'sys.exit', (['"""negative psd"""'], {}), "('negative psd')\n", (7180, 7196), False, 'import sys\n'), ((6934, 6952), 'numpy.log10', 'np.log10', (['cpd_d[i]'], {}), '(cpd_d[i])\n', (6942, 6952), True, 'import numpy as np\n'), ((6955, 6977), 'numpy.log10', 'np.log10', (['cpd_d[i + 1]'], {}), '(cpd_d[i + 1])\n', (6963, 6977), True, 'import numpy as np\n'), ((4008, 4018), 'numpy.max', 'np.max', (['dd'], {}), '(dd)\n', (4014, 4018), True, 'import numpy as np\n'), ((4231, 4241), 'numpy.min', 'np.min', (['dd'], {}), '(dd)\n', (4237, 4241), True, 'import numpy as np\n'), ((4954, 4963), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (4960, 4963), True, 'import numpy as np\n'), ((5175, 5184), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (5181, 5184), True, 'import numpy as np\n')] |
from param import Param
from grid import Grid
from fluid2d import Fluid2d
import numpy as np
""" Reentrant channel with prescribed transport through the
channel. The transport is controlled via psi0, the streamfunction at
the North Wall -- psi=0 on the South Wall. An island with psi=psi0/2
is also set in the middle of the channel. Even though there is no
explicit forcing, this setup creates an island wake. Control parameters
include:
- Rd/L, relative size of the deformation radius beta Rd^2 L / psi0,
- relative importance of the jet speed vs Rossby wave speed """
param = Param('default.xml')
param.modelname = 'quasigeostrophic'
param.expname = 'channel'
# domain and resolution
param.nx = 64*4
param.ny = param.nx/4
param.npy = 1
param.Lx = 4.
param.Ly = param.Lx/4
param.geometry = 'xchannel'
# time
param.tend = 5000.
param.cfl = 1.2
param.adaptable_dt = True
param.dt = 1.
param.dtmax = 100.
# discretization
param.order = 5
# output
param.var_to_save = ['pv', 'u', 'v', 'psi', 'pvanom']
param.list_diag = 'all'
param.freq_his = 20
param.freq_diag = 5.
# plot
param.plot_var = 'pv'
param.freq_plot = 10
a = 0.5
param.cax = [-a, a]
param.plot_interactive = True
param.colorscheme = 'imposed'
param.generate_mp4 = False
# physics
param.beta = 1.
param.Rd = .1
param.forcing = False
param.forcing_module = 'forcing' # not yet implemented
param.noslip = False
param.diffusion = False
param.isisland = True
psi0 = -5e-4 # this sets psi on the Northern wall (psi=0 on the Southern wall)
grid = Grid(param)
nh = grid.nh
def disc(param, grid, x0, y0, sigma):
""" function to set up a circular island
note that this function returns the indices, not the mask """
xr, yr = grid.xr, grid.yr
r = np.sqrt((xr-param.Lx*x0)**2+(yr-param.Ly*y0)**2)
return np.where(r <= sigma)
sigma = 0.08
idx = disc(param, grid, 0.125*param.Lx, 0.5, sigma)
grid.msk[idx] = 0
grid.msknoslip[idx] = 0
grid.island.add(idx, 0.)
if grid.j0 == 0:
msk = grid.msk.copy()*0
msk[:nh, :] = 1
idx = np.where(msk == 1)
grid.island.add(idx, -psi0*.5)
if grid.j0 == param.npy-1:
msk = grid.msk.copy()*0
msk[-nh:-1, :] = 1
idx = np.where(msk == 1)
grid.island.add(idx, psi0*.5)
# tell the code to deactivate the no-slip along the outer walls
if grid.j0 == 0:
grid.msknoslip[:nh, :] = 1
if grid.j0 == param.npy-1:
grid.msknoslip[-nh:, :] = 1
param.Kdiff = 0.5e-4*grid.dx
f2d = Fluid2d(param, grid)
model = f2d.model
xr, yr = grid.xr, grid.yr
yr0 = grid.yr0
pv = model.var.get('pv')
# first, let's put some noise on the pv
np.random.seed(1) # to ensure reproducibility of the results
y = 1e-4*np.random.normal(size=np.shape(pv))*grid.msk
model.ope.fill_halo(y)
pv[:] = y
# to ensure that psi(y) varies linearly between the South and the North wall
# the pv anomaly needs to be set to the correct value
# since pvanom = d^2psi/dy^2 - Rd^-2 psi
# we see that to have psi(y)=a*y we need to set
# pvanom = -Rd^-2 * psi
# this is what does the next line
pv -= param.Rd**(-2) * psi0*yr0/param.Ly*grid.msk
# now we add the background planetary pv 'beta*y'
model.add_backgroundpv()
model.set_psi_from_pv()
f2d.loop()
| [
"numpy.random.seed",
"grid.Grid",
"param.Param",
"numpy.shape",
"numpy.where",
"fluid2d.Fluid2d",
"numpy.sqrt"
] | [((581, 601), 'param.Param', 'Param', (['"""default.xml"""'], {}), "('default.xml')\n", (586, 601), False, 'from param import Param\n'), ((1513, 1524), 'grid.Grid', 'Grid', (['param'], {}), '(param)\n', (1517, 1524), False, 'from grid import Grid\n'), ((2424, 2444), 'fluid2d.Fluid2d', 'Fluid2d', (['param', 'grid'], {}), '(param, grid)\n', (2431, 2444), False, 'from fluid2d import Fluid2d\n'), ((2572, 2589), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2586, 2589), True, 'import numpy as np\n'), ((1728, 1790), 'numpy.sqrt', 'np.sqrt', (['((xr - param.Lx * x0) ** 2 + (yr - param.Ly * y0) ** 2)'], {}), '((xr - param.Lx * x0) ** 2 + (yr - param.Ly * y0) ** 2)\n', (1735, 1790), True, 'import numpy as np\n'), ((1788, 1808), 'numpy.where', 'np.where', (['(r <= sigma)'], {}), '(r <= sigma)\n', (1796, 1808), True, 'import numpy as np\n'), ((2019, 2037), 'numpy.where', 'np.where', (['(msk == 1)'], {}), '(msk == 1)\n', (2027, 2037), True, 'import numpy as np\n'), ((2162, 2180), 'numpy.where', 'np.where', (['(msk == 1)'], {}), '(msk == 1)\n', (2170, 2180), True, 'import numpy as np\n'), ((2665, 2677), 'numpy.shape', 'np.shape', (['pv'], {}), '(pv)\n', (2673, 2677), True, 'import numpy as np\n')] |
import unittest
from CTL.tensor.tensor import Tensor
import numpy as np
from CTL.tests.packedTest import PackedTest
import CTL.funcs.funcs as funcs
from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS
from CTL.tensor.contract.link import makeLink
from CTL.tensor.tensorFunc import isIsometry
from CTL.tensor.contract.optimalContract import contractAndCostWithSequence
class TestMPS(PackedTest):
def __init__(self, methodName = 'runTest'):
super().__init__(methodName = methodName, name = 'MPS')
def test_MPS(self):
# tensorL = Tensor(data = np.random.random_sample((3, 3)), labels = ['o', 'internal'])
# tensor1 = Tensor(data = np.random.random_sample((3, 5, 4)), labels = ['itl', 'oo', 'itr'])
# tensor2 = Tensor(data = np.random.random_sample((4, 2, 4)), labels = ['itl', 'oo', 'itr'])
# tensor3 = Tensor(data = np.random.random_sample((4, 3, 2)), labels = ['itl', 'oo', 'itr'])
# tensorR = Tensor(data = np.random.random_sample((2, 5)), labels = ['internal', 'o'])
# makeLink('internal', 'itl', tensorL, tensor1)
# makeLink('itr', 'itl', tensor1, tensor2)
# makeLink('itr', 'itl', tensor2, tensor3)
# makeLink('itr', 'internal', tensor3, tensorR)
# tensors = [tensorL, tensor1, tensor2, tensor3, tensorR]
# mps = FreeBoundaryMPS(tensorList = tensors, chi = 16)
mps = self.createMPSA()
self.assertEqual(mps.chi, 16)
self.assertEqual(mps.n, 5)
# print(mps)
self.assertTrue(mps.checkMPSProperty(mps._tensors))
mps.canonicalize(idx = 0)
# print(mps)
mps.canonicalize(idx = 0)
# print(mps)
self.assertTrue(mps.checkCanonical(excepIdx = 0))
self.assertFalse(mps.checkCanonical(excepIdx = 2))
mps.canonicalize(idx = 2)
# print(mps)
self.assertTrue(mps.checkCanonical(excepIdx = 2))
self.assertFalse(mps.checkCanonical(excepIdx = 0))
self.assertTrue(mps.checkCanonical())
self.assertEqual(mps.activeIdx, 2)
mps.moveTensor(2, 4)
self.assertTrue(isIsometry(mps.getTensor(0), labels = ['o']))
self.assertTrue(isIsometry(mps.getTensor(1), labels = ['o', 'l']))
def test_singleTensorMPS(self):
tensor = Tensor(data = np.random.random_sample(3), labels = ['oo'])
mps = FreeBoundaryMPS(tensorList = [tensor], chi = 16)
self.assertEqual(mps.n, 1)
mps.canonicalize(0)
self.assertTrue(mps.checkCanonical(excepIdx = 0))
self.assertEqual(mps.getTensor(0).legs[0].name, 'o')
def createMPSA(self, tensorLikeFlag = False):
tensor1L = Tensor(shape = (3, 3), labels = ['o', 'internal'], tensorLikeFlag = tensorLikeFlag)
tensor11 = Tensor(shape = (3, 5, 4), labels = ['itl', 'oo', 'itr'], tensorLikeFlag = tensorLikeFlag)
tensor12 = Tensor(shape = (4, 2, 4), labels = ['itl', 'oo', 'itr'], tensorLikeFlag = tensorLikeFlag)
tensor13 = Tensor(shape = (4, 3, 2), labels = ['itl', 'oo', 'itr'], tensorLikeFlag = tensorLikeFlag)
tensor1R = Tensor(shape = (2, 5), labels = ['internal', 'o'], tensorLikeFlag = tensorLikeFlag)
makeLink('internal', 'itl', tensor1L, tensor11)
makeLink('itr', 'itl', tensor11, tensor12)
makeLink('itr', 'itl', tensor12, tensor13)
makeLink('itr', 'internal', tensor13, tensor1R)
tensorsA = [tensor1L, tensor11, tensor12, tensor13, tensor1R]
mpsA = FreeBoundaryMPS(tensorList = tensorsA, chi = 16)
return mpsA
def createMPSB(self, tensorLikeFlag = False):
tensor2L = Tensor(shape = (3, 3), labels = ['o', 'internal'], tensorLikeFlag = tensorLikeFlag)
tensor21 = Tensor(shape = (3, 5, 4), labels = ['itl', 'oo', 'itr'], tensorLikeFlag = tensorLikeFlag)
tensor22 = Tensor(shape = (4, 2, 4), labels = ['itl', 'oo', 'itr'], tensorLikeFlag = tensorLikeFlag)
tensor2R = Tensor(shape = (4, 5), labels = ['internal', 'o'], tensorLikeFlag = tensorLikeFlag)
makeLink('internal', 'itl', tensor2L, tensor21)
makeLink('itr', 'itl', tensor21, tensor22)
makeLink('itr', 'internal', tensor22, tensor2R)
tensorsB = [tensor2L, tensor21, tensor22, tensor2R]
mpsB = FreeBoundaryMPS(tensorList = tensorsB, chi = 12)
return mpsB
def createMPSFromDim(self, dims, itbRange = (3, 10), tensorLikeFlag = False, chi = 16):
# internal bonds will be automaticall
lastDim = -1
tensors = []
n = len(dims)
if (n == 1):
tensors.append(Tensor(shape = (dims[0], ), labels = ['o'], tensorLikeFlag = tensorLikeFlag))
return FreeBoundaryMPS(tensorList = tensors, chi = chi)
itbLow, itbHigh = itbRange
bondDim = np.random.randint(low = itbLow, high = itbHigh)
tensor = Tensor(shape = (dims[0], bondDim), labels = ['o', 'r'], tensorLikeFlag = tensorLikeFlag)
tensors.append(tensor)
lastDim = bondDim
for i in range(1, n - 1):
bondDim = np.random.randint(low = itbLow, high = itbHigh)
newTensor = Tensor(shape = (lastDim, dims[i], bondDim), labels = ['l', 'o', 'r'], tensorLikeFlag = tensorLikeFlag)
tensors.append(newTensor)
makeLink('r', 'l', tensor, newTensor)
lastDim = bondDim
tensor = newTensor
newTensor = Tensor(shape = (lastDim, dims[-1]), labels = ['l', 'o'], tensorLikeFlag = tensorLikeFlag)
tensors.append(newTensor)
makeLink('r', 'l', tensor, newTensor)
return FreeBoundaryMPS(tensorList = tensors, chi = chi)
def test_MPSContraction(self):
mpsA = self.createMPSA(tensorLikeFlag = False)
mpsB = self.createMPSB(tensorLikeFlag = False)
tensorA2 = mpsA.getTensor(2)
tensorB2 = mpsB.getTensor(2)
makeLink('o', 'o', tensorA2, tensorB2)
# print(mpsA, mpsB)
mps = contractMPS(mpsA, mpsB)
# print(mps)
mps.canonicalize(idx = 2)
self.assertTrue(mps.checkCanonical())
self.assertTrue(mps.n, 7) # 4 + 5 - 2
self.assertTrue(mps.activeIdx, 2)
mpsA = self.createMPSA(tensorLikeFlag = True)
mpsB = self.createMPSB(tensorLikeFlag = True)
tensorA2 = mpsA.getTensor(2)
tensorB2 = mpsB.getTensor(2)
makeLink('o', 'o', tensorA2, tensorB2)
# print(mpsA, mpsB)
mps = contractMPS(mpsA, mpsB)
# print(mps)
mps.canonicalize(idx = 2)
self.assertTrue(mps.checkCanonical())
self.assertTrue(mps.n, 7) # 4 + 5 - 2
self.assertTrue(mps.activeIdx, 2)
def test_MPSMerge(self):
mpsA = self.createMPSA()
mpsB = self.createMPSB()
makeLink('o', 'o', mpsA.getTensor(1), mpsB.getTensor(1))
makeLink('o', 'o', mpsA.getTensor(4), mpsB.getTensor(3))
# print(mpsA, mpsB)
mergeMPS(mpsA, mpsB)
# print(mpsA, mpsB)
self.assertTrue(mpsA.checkCanonical(excepIdx = mpsA.n - 1))
self.assertTrue(mpsB.checkCanonical(excepIdx = mpsB.n - 1))
self.assertEqual(mpsA.n, 4)
self.assertEqual(mpsB.n, 3)
mpsA.moveTensor(mpsA.n - 1, 1)
mpsB.moveTensor(mpsB.n - 1, 0)
mps = contractMPS(mpsB, mpsA)
mps.canonicalize(idx = 2)
self.assertTrue(mps.checkCanonical())
self.assertEqual(mps.n, 5)
mpsA = self.createMPSFromDim(dims = [3, 4, 5, 5, 2])
mpsB = self.createMPSFromDim(dims = [2, 5, 3, 3, 4])
mpsA.canonicalize(idx = 1)
mpsB.canonicalize(idx = 2)
# print(mpsA, mpsB)
makeLink('o', 'o', mpsA.getTensor(1), mpsB.getTensor(4))
makeLink('o', 'o', mpsA.getTensor(0), mpsB.getTensor(2))
makeLink('o', 'o', mpsB.getTensor(0), mpsA.getTensor(4))
mergeMPS(mpsA, mpsB, beginFlag = True)
# print(mpsA, mpsB)
self.assertEqual(mpsA.n, 3)
self.assertEqual(mpsB.n, 3)
mps = contractMPS(mpsA, mpsB)
self.assertEqual(mps.n, 4)
# print(mps)
def test_createMPS(self):
tensor = Tensor(shape = (3, 4, 5))
mps = createMPSFromTensor(tensor = tensor, chi = 16)
self.assertEqual(mps.n, 3)
# print(mps)
with self.assertWarns(RuntimeWarning) as cm:
tensor = Tensor(shape = (3, ))
mps = createMPSFromTensor(tensor = tensor, chi = 16)
self.assertEqual(mps.n, 1)
# print(mps)
self.assertIn('MPS.py', cm.filename)
message = cm.warning.__str__()
self.assertIn('creating MPS for 1-D tensor', message)
def zeroDimensionMPSFunc():
tensor = Tensor(shape = ())
_ = createMPSFromTensor(tensor)
self.assertRaises(AssertionError, zeroDimensionMPSFunc)
tensor = Tensor(shape = (3, 4, 5, 3, 3, 2))
mps = createMPSFromTensor(tensor = tensor, chi = 16)
self.assertEqual(mps.n, 6)
# print(mps)
def createCompleteGraph(self, n, dimRange = (2, 3)):
low, high = dimRange
dims = np.random.randint(low = low, high = high, size = (n, n))
for i in range(n):
for j in range(i, n):
dims[j][i] = dims[i][j]
tensors = []
for i in range(n):
shape = tuple([dims[i][j] for j in range(n) if (j != i)])
labels = [str(j) for j in range(n) if (j != i)]
tensor = Tensor(shape = shape, labels = labels)
tensors.append(tensor)
for i in range(n):
for j in range(i + 1, n):
makeLink(str(j), str(i), tensors[i], tensors[j])
return tensors
def createSpecialTN(self):
a = Tensor(shape = (3, 5, 7), labels = ['a3', 'a5', 'a7'])
b = Tensor(shape = (2, 4, 5), labels = ['b2', 'b4', 'b5'])
c = Tensor(shape = (2, 7, 7, 7), labels = ['c2', 'c71', 'c72', 'c73'])
d = Tensor(shape = (7, 7, 3, 4), labels = ['d71', 'd72', 'd3', 'd4'])
makeLink('a3', 'd3', a, d)
makeLink('a5', 'b5', a, b)
makeLink('a7', 'c72', a, c)
makeLink('b2', 'c2', b, c)
makeLink('b4', 'd4', b, d)
makeLink('c71', 'd72', c, d)
makeLink('c73', 'd71', c, d)
return [a, b, d, c]
def createSpecialTN2(self):
a = Tensor(shape = (3, 5, 7), labels = ['a3', 'a5', 'a7'])
b = Tensor(shape = (2, 4, 5), labels = ['b2', 'b4', 'b5'])
c = Tensor(shape = (2, 7, 7, 7), labels = ['c2', 'c71', 'c72', 'c73'])
d = Tensor(shape = (7, 7, 3, 4), labels = ['d71', 'd72', 'd3', 'd4'])
e = Tensor(shape = (3, 3, 5), labels = ['e31', 'e32', 'e5'])
f = Tensor(shape = (2, 2, 5), labels = ['f21', 'f22', 'f5'])
g = Tensor(shape = (4, 4, 3, 3), labels = ['g41', 'g42', 'g31', 'g32'])
makeLink('a3', 'e31', a, e)
makeLink('a5', 'b5', a, b)
makeLink('a7', 'c72', a, c)
makeLink('b2', 'f21', b, f)
makeLink('b4', 'g41', b, g)
makeLink('c2', 'f22', c, f)
makeLink('c71', 'd72', c, d)
makeLink('c73', 'd71', c, d)
makeLink('d3', 'g31', d, g)
makeLink('d4', 'g42', d, g)
makeLink('e5', 'f5', e, f)
makeLink('e32', 'g32', e, g)
return [a, b, d, c, g, f, e]
def makeMPSContractionTest(self, tensors, eps = 1e-8):
res, cost = contractAndCostWithSequence(tensors)
print('res = {}, cost = {}'.format(res.single(), cost))
mpsRes = contractWithMPS(tensors, chi = 32)
print('res from mps = {}'.format(mpsRes.single()))
eps = 1e-8
self.assertTrue(funcs.floatEqual(res.single(), mpsRes.single(), eps = eps))
def test_MPSTNContraction(self):
self.makeMPSContractionTest(self.createCompleteGraph(n = 6))
self.makeMPSContractionTest(self.createSpecialTN())
self.makeMPSContractionTest(self.createSpecialTN2())
| [
"CTL.examples.MPS.contractMPS",
"numpy.random.random_sample",
"CTL.tensor.contract.optimalContract.contractAndCostWithSequence",
"CTL.examples.MPS.mergeMPS",
"CTL.tensor.tensor.Tensor",
"numpy.random.randint",
"CTL.tensor.contract.link.makeLink",
"CTL.examples.MPS.createMPSFromTensor",
"CTL.examples... | [((2420, 2464), 'CTL.examples.MPS.FreeBoundaryMPS', 'FreeBoundaryMPS', ([], {'tensorList': '[tensor]', 'chi': '(16)'}), '(tensorList=[tensor], chi=16)\n', (2435, 2464), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((2730, 2807), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 3)', 'labels': "['o', 'internal']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(3, 3), labels=['o', 'internal'], tensorLikeFlag=tensorLikeFlag)\n", (2736, 2807), False, 'from CTL.tensor.tensor import Tensor\n'), ((2833, 2921), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 5, 4)', 'labels': "['itl', 'oo', 'itr']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(3, 5, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=\n tensorLikeFlag)\n", (2839, 2921), False, 'from CTL.tensor.tensor import Tensor\n'), ((2942, 3030), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(4, 2, 4)', 'labels': "['itl', 'oo', 'itr']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(4, 2, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=\n tensorLikeFlag)\n", (2948, 3030), False, 'from CTL.tensor.tensor import Tensor\n'), ((3051, 3139), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(4, 3, 2)', 'labels': "['itl', 'oo', 'itr']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(4, 3, 2), labels=['itl', 'oo', 'itr'], tensorLikeFlag=\n tensorLikeFlag)\n", (3057, 3139), False, 'from CTL.tensor.tensor import Tensor\n'), ((3160, 3237), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(2, 5)', 'labels': "['internal', 'o']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(2, 5), labels=['internal', 'o'], tensorLikeFlag=tensorLikeFlag)\n", (3166, 3237), False, 'from CTL.tensor.tensor import Tensor\n'), ((3253, 3300), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""internal"""', '"""itl"""', 'tensor1L', 'tensor11'], {}), "('internal', 'itl', tensor1L, tensor11)\n", (3261, 3300), False, 'from CTL.tensor.contract.link import makeLink\n'), ((3309, 3351), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""itr"""', '"""itl"""', 'tensor11', 'tensor12'], {}), "('itr', 'itl', tensor11, tensor12)\n", (3317, 3351), False, 'from CTL.tensor.contract.link import makeLink\n'), ((3360, 3402), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""itr"""', '"""itl"""', 'tensor12', 'tensor13'], {}), "('itr', 'itl', tensor12, tensor13)\n", (3368, 3402), False, 'from CTL.tensor.contract.link import makeLink\n'), ((3411, 3458), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""itr"""', '"""internal"""', 'tensor13', 'tensor1R'], {}), "('itr', 'internal', tensor13, tensor1R)\n", (3419, 3458), False, 'from CTL.tensor.contract.link import makeLink\n'), ((3546, 3590), 'CTL.examples.MPS.FreeBoundaryMPS', 'FreeBoundaryMPS', ([], {'tensorList': 'tensorsA', 'chi': '(16)'}), '(tensorList=tensorsA, chi=16)\n', (3561, 3590), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((3685, 3762), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 3)', 'labels': "['o', 'internal']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(3, 3), labels=['o', 'internal'], tensorLikeFlag=tensorLikeFlag)\n", (3691, 3762), False, 'from CTL.tensor.tensor import Tensor\n'), ((3788, 3876), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 5, 4)', 'labels': "['itl', 'oo', 'itr']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(3, 5, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=\n tensorLikeFlag)\n", (3794, 3876), False, 'from CTL.tensor.tensor import Tensor\n'), ((3897, 3985), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(4, 2, 4)', 'labels': "['itl', 'oo', 'itr']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(4, 2, 4), labels=['itl', 'oo', 'itr'], tensorLikeFlag=\n tensorLikeFlag)\n", (3903, 3985), False, 'from CTL.tensor.tensor import Tensor\n'), ((4006, 4083), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(4, 5)', 'labels': "['internal', 'o']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(4, 5), labels=['internal', 'o'], tensorLikeFlag=tensorLikeFlag)\n", (4012, 4083), False, 'from CTL.tensor.tensor import Tensor\n'), ((4099, 4146), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""internal"""', '"""itl"""', 'tensor2L', 'tensor21'], {}), "('internal', 'itl', tensor2L, tensor21)\n", (4107, 4146), False, 'from CTL.tensor.contract.link import makeLink\n'), ((4155, 4197), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""itr"""', '"""itl"""', 'tensor21', 'tensor22'], {}), "('itr', 'itl', tensor21, tensor22)\n", (4163, 4197), False, 'from CTL.tensor.contract.link import makeLink\n'), ((4206, 4253), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""itr"""', '"""internal"""', 'tensor22', 'tensor2R'], {}), "('itr', 'internal', tensor22, tensor2R)\n", (4214, 4253), False, 'from CTL.tensor.contract.link import makeLink\n'), ((4330, 4374), 'CTL.examples.MPS.FreeBoundaryMPS', 'FreeBoundaryMPS', ([], {'tensorList': 'tensorsB', 'chi': '(12)'}), '(tensorList=tensorsB, chi=12)\n', (4345, 4374), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((4868, 4911), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'itbLow', 'high': 'itbHigh'}), '(low=itbLow, high=itbHigh)\n', (4885, 4911), True, 'import numpy as np\n'), ((4933, 5020), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(dims[0], bondDim)', 'labels': "['o', 'r']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(dims[0], bondDim), labels=['o', 'r'], tensorLikeFlag=\n tensorLikeFlag)\n", (4939, 5020), False, 'from CTL.tensor.tensor import Tensor\n'), ((5490, 5578), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(lastDim, dims[-1])', 'labels': "['l', 'o']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(lastDim, dims[-1]), labels=['l', 'o'], tensorLikeFlag=\n tensorLikeFlag)\n", (5496, 5578), False, 'from CTL.tensor.tensor import Tensor\n'), ((5622, 5659), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""r"""', '"""l"""', 'tensor', 'newTensor'], {}), "('r', 'l', tensor, newTensor)\n", (5630, 5659), False, 'from CTL.tensor.contract.link import makeLink\n'), ((5677, 5721), 'CTL.examples.MPS.FreeBoundaryMPS', 'FreeBoundaryMPS', ([], {'tensorList': 'tensors', 'chi': 'chi'}), '(tensorList=tensors, chi=chi)\n', (5692, 5721), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((5964, 6002), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""o"""', '"""o"""', 'tensorA2', 'tensorB2'], {}), "('o', 'o', tensorA2, tensorB2)\n", (5972, 6002), False, 'from CTL.tensor.contract.link import makeLink\n'), ((6045, 6068), 'CTL.examples.MPS.contractMPS', 'contractMPS', (['mpsA', 'mpsB'], {}), '(mpsA, mpsB)\n', (6056, 6068), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((6452, 6490), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""o"""', '"""o"""', 'tensorA2', 'tensorB2'], {}), "('o', 'o', tensorA2, tensorB2)\n", (6460, 6490), False, 'from CTL.tensor.contract.link import makeLink\n'), ((6533, 6556), 'CTL.examples.MPS.contractMPS', 'contractMPS', (['mpsA', 'mpsB'], {}), '(mpsA, mpsB)\n', (6544, 6556), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((7013, 7033), 'CTL.examples.MPS.mergeMPS', 'mergeMPS', (['mpsA', 'mpsB'], {}), '(mpsA, mpsB)\n', (7021, 7033), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((7364, 7387), 'CTL.examples.MPS.contractMPS', 'contractMPS', (['mpsB', 'mpsA'], {}), '(mpsB, mpsA)\n', (7375, 7387), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((7930, 7966), 'CTL.examples.MPS.mergeMPS', 'mergeMPS', (['mpsA', 'mpsB'], {'beginFlag': '(True)'}), '(mpsA, mpsB, beginFlag=True)\n', (7938, 7966), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((8085, 8108), 'CTL.examples.MPS.contractMPS', 'contractMPS', (['mpsA', 'mpsB'], {}), '(mpsA, mpsB)\n', (8096, 8108), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((8213, 8236), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 4, 5)'}), '(shape=(3, 4, 5))\n', (8219, 8236), False, 'from CTL.tensor.tensor import Tensor\n'), ((8253, 8295), 'CTL.examples.MPS.createMPSFromTensor', 'createMPSFromTensor', ([], {'tensor': 'tensor', 'chi': '(16)'}), '(tensor=tensor, chi=16)\n', (8272, 8295), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((8932, 8964), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 4, 5, 3, 3, 2)'}), '(shape=(3, 4, 5, 3, 3, 2))\n', (8938, 8964), False, 'from CTL.tensor.tensor import Tensor\n'), ((8981, 9023), 'CTL.examples.MPS.createMPSFromTensor', 'createMPSFromTensor', ([], {'tensor': 'tensor', 'chi': '(16)'}), '(tensor=tensor, chi=16)\n', (9000, 9023), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((9186, 9236), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'high': 'high', 'size': '(n, n)'}), '(low=low, high=high, size=(n, n))\n', (9203, 9236), True, 'import numpy as np\n'), ((9833, 9883), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 5, 7)', 'labels': "['a3', 'a5', 'a7']"}), "(shape=(3, 5, 7), labels=['a3', 'a5', 'a7'])\n", (9839, 9883), False, 'from CTL.tensor.tensor import Tensor\n'), ((9900, 9950), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(2, 4, 5)', 'labels': "['b2', 'b4', 'b5']"}), "(shape=(2, 4, 5), labels=['b2', 'b4', 'b5'])\n", (9906, 9950), False, 'from CTL.tensor.tensor import Tensor\n'), ((9967, 10029), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(2, 7, 7, 7)', 'labels': "['c2', 'c71', 'c72', 'c73']"}), "(shape=(2, 7, 7, 7), labels=['c2', 'c71', 'c72', 'c73'])\n", (9973, 10029), False, 'from CTL.tensor.tensor import Tensor\n'), ((10046, 10107), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(7, 7, 3, 4)', 'labels': "['d71', 'd72', 'd3', 'd4']"}), "(shape=(7, 7, 3, 4), labels=['d71', 'd72', 'd3', 'd4'])\n", (10052, 10107), False, 'from CTL.tensor.tensor import Tensor\n'), ((10120, 10146), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""a3"""', '"""d3"""', 'a', 'd'], {}), "('a3', 'd3', a, d)\n", (10128, 10146), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10155, 10181), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""a5"""', '"""b5"""', 'a', 'b'], {}), "('a5', 'b5', a, b)\n", (10163, 10181), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10190, 10217), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""a7"""', '"""c72"""', 'a', 'c'], {}), "('a7', 'c72', a, c)\n", (10198, 10217), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10227, 10253), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""b2"""', '"""c2"""', 'b', 'c'], {}), "('b2', 'c2', b, c)\n", (10235, 10253), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10262, 10288), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""b4"""', '"""d4"""', 'b', 'd'], {}), "('b4', 'd4', b, d)\n", (10270, 10288), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10297, 10325), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""c71"""', '"""d72"""', 'c', 'd'], {}), "('c71', 'd72', c, d)\n", (10305, 10325), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10334, 10362), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""c73"""', '"""d71"""', 'c', 'd'], {}), "('c73', 'd71', c, d)\n", (10342, 10362), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10438, 10488), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 5, 7)', 'labels': "['a3', 'a5', 'a7']"}), "(shape=(3, 5, 7), labels=['a3', 'a5', 'a7'])\n", (10444, 10488), False, 'from CTL.tensor.tensor import Tensor\n'), ((10505, 10555), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(2, 4, 5)', 'labels': "['b2', 'b4', 'b5']"}), "(shape=(2, 4, 5), labels=['b2', 'b4', 'b5'])\n", (10511, 10555), False, 'from CTL.tensor.tensor import Tensor\n'), ((10572, 10634), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(2, 7, 7, 7)', 'labels': "['c2', 'c71', 'c72', 'c73']"}), "(shape=(2, 7, 7, 7), labels=['c2', 'c71', 'c72', 'c73'])\n", (10578, 10634), False, 'from CTL.tensor.tensor import Tensor\n'), ((10651, 10712), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(7, 7, 3, 4)', 'labels': "['d71', 'd72', 'd3', 'd4']"}), "(shape=(7, 7, 3, 4), labels=['d71', 'd72', 'd3', 'd4'])\n", (10657, 10712), False, 'from CTL.tensor.tensor import Tensor\n'), ((10729, 10781), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3, 3, 5)', 'labels': "['e31', 'e32', 'e5']"}), "(shape=(3, 3, 5), labels=['e31', 'e32', 'e5'])\n", (10735, 10781), False, 'from CTL.tensor.tensor import Tensor\n'), ((10798, 10850), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(2, 2, 5)', 'labels': "['f21', 'f22', 'f5']"}), "(shape=(2, 2, 5), labels=['f21', 'f22', 'f5'])\n", (10804, 10850), False, 'from CTL.tensor.tensor import Tensor\n'), ((10867, 10930), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(4, 4, 3, 3)', 'labels': "['g41', 'g42', 'g31', 'g32']"}), "(shape=(4, 4, 3, 3), labels=['g41', 'g42', 'g31', 'g32'])\n", (10873, 10930), False, 'from CTL.tensor.tensor import Tensor\n'), ((10943, 10970), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""a3"""', '"""e31"""', 'a', 'e'], {}), "('a3', 'e31', a, e)\n", (10951, 10970), False, 'from CTL.tensor.contract.link import makeLink\n'), ((10979, 11005), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""a5"""', '"""b5"""', 'a', 'b'], {}), "('a5', 'b5', a, b)\n", (10987, 11005), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11014, 11041), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""a7"""', '"""c72"""', 'a', 'c'], {}), "('a7', 'c72', a, c)\n", (11022, 11041), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11052, 11079), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""b2"""', '"""f21"""', 'b', 'f'], {}), "('b2', 'f21', b, f)\n", (11060, 11079), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11088, 11115), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""b4"""', '"""g41"""', 'b', 'g'], {}), "('b4', 'g41', b, g)\n", (11096, 11115), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11133, 11160), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""c2"""', '"""f22"""', 'c', 'f'], {}), "('c2', 'f22', c, f)\n", (11141, 11160), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11169, 11197), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""c71"""', '"""d72"""', 'c', 'd'], {}), "('c71', 'd72', c, d)\n", (11177, 11197), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11206, 11234), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""c73"""', '"""d71"""', 'c', 'd'], {}), "('c73', 'd71', c, d)\n", (11214, 11234), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11245, 11272), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""d3"""', '"""g31"""', 'd', 'g'], {}), "('d3', 'g31', d, g)\n", (11253, 11272), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11281, 11308), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""d4"""', '"""g42"""', 'd', 'g'], {}), "('d4', 'g42', d, g)\n", (11289, 11308), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11326, 11352), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""e5"""', '"""f5"""', 'e', 'f'], {}), "('e5', 'f5', e, f)\n", (11334, 11352), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11361, 11389), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""e32"""', '"""g32"""', 'e', 'g'], {}), "('e32', 'g32', e, g)\n", (11369, 11389), False, 'from CTL.tensor.contract.link import makeLink\n'), ((11508, 11544), 'CTL.tensor.contract.optimalContract.contractAndCostWithSequence', 'contractAndCostWithSequence', (['tensors'], {}), '(tensors)\n', (11535, 11544), False, 'from CTL.tensor.contract.optimalContract import contractAndCostWithSequence\n'), ((11627, 11659), 'CTL.examples.MPS.contractWithMPS', 'contractWithMPS', (['tensors'], {'chi': '(32)'}), '(tensors, chi=32)\n', (11642, 11659), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((4756, 4800), 'CTL.examples.MPS.FreeBoundaryMPS', 'FreeBoundaryMPS', ([], {'tensorList': 'tensors', 'chi': 'chi'}), '(tensorList=tensors, chi=chi)\n', (4771, 4800), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((5136, 5179), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'itbLow', 'high': 'itbHigh'}), '(low=itbLow, high=itbHigh)\n', (5153, 5179), True, 'import numpy as np\n'), ((5208, 5308), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(lastDim, dims[i], bondDim)', 'labels': "['l', 'o', 'r']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(lastDim, dims[i], bondDim), labels=['l', 'o', 'r'],\n tensorLikeFlag=tensorLikeFlag)\n", (5214, 5308), False, 'from CTL.tensor.tensor import Tensor\n'), ((5361, 5398), 'CTL.tensor.contract.link.makeLink', 'makeLink', (['"""r"""', '"""l"""', 'tensor', 'newTensor'], {}), "('r', 'l', tensor, newTensor)\n", (5369, 5398), False, 'from CTL.tensor.contract.link import makeLink\n'), ((8431, 8449), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (8437, 8449), False, 'from CTL.tensor.tensor import Tensor\n'), ((8471, 8513), 'CTL.examples.MPS.createMPSFromTensor', 'createMPSFromTensor', ([], {'tensor': 'tensor', 'chi': '(16)'}), '(tensor=tensor, chi=16)\n', (8490, 8513), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((8787, 8803), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '()'}), '(shape=())\n', (8793, 8803), False, 'from CTL.tensor.tensor import Tensor\n'), ((8822, 8849), 'CTL.examples.MPS.createMPSFromTensor', 'createMPSFromTensor', (['tensor'], {}), '(tensor)\n', (8841, 8849), False, 'from CTL.examples.MPS import FreeBoundaryMPS, mergeMPS, contractMPS, createMPSFromTensor, contractWithMPS\n'), ((9552, 9586), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': 'shape', 'labels': 'labels'}), '(shape=shape, labels=labels)\n', (9558, 9586), False, 'from CTL.tensor.tensor import Tensor\n'), ((2361, 2387), 'numpy.random.random_sample', 'np.random.random_sample', (['(3)'], {}), '(3)\n', (2384, 2387), True, 'import numpy as np\n'), ((4659, 4728), 'CTL.tensor.tensor.Tensor', 'Tensor', ([], {'shape': '(dims[0],)', 'labels': "['o']", 'tensorLikeFlag': 'tensorLikeFlag'}), "(shape=(dims[0],), labels=['o'], tensorLikeFlag=tensorLikeFlag)\n", (4665, 4728), False, 'from CTL.tensor.tensor import Tensor\n')] |
# built-in
import os
# third-party
import numpy as np
from scipy.stats import skew, kurtosis
def signal_stats(signal=None, hist=True):
"""Compute statistical metrics describing the signal.
Parameters
----------
signal : array
Input signal.
Returns
-------
mean : float
Mean of the signal.
median : float
Median of the signal.
var : float
Signal variance (unbiased).
std : float
Standard signal deviation (unbiased).
abs_dev : float
Absolute signal deviation.
kurtosis : float
Signal kurtosis (unbiased).
skewness : float
Signal skewness (unbiased).
iqr : float
Interquartile Range.
meanadev : float
Mean absolute deviation.
medadev : float
Median absolute deviation.
rms : float
Root Mean Square.
_hist : list
Histogram.
References
----------
TSFEL library: https://github.com/fraunhoferportugal/tsfel
<NAME>. (2004). A large set of audio features for sound description (similarity and classification) in the CUIDADO project.
"""
# check inputs
# if signal is None or signal == []:
# print("Signal is empty.")
# ensure numpy
signal = np.array(signal)
if len(signal) == 0:
return 0., 0., 0., 0., 0., 0. ,0. ,0. ,0.
else:
mean = np.mean(signal)
median = np.median(signal)
var = np.var(signal)
std = np.std(signal)
abs_dev = np.sum(np.abs(signal - median))
kurtosis_ = kurtosis(signal)
skewness = skew(signal)
iqr = np.percentile(signal, 75) - np.percentile(signal, 25)
rms = np.sqrt(np.sum(np.array(signal) ** 2) / len(signal))
return np.hstack((mean, median, var, std, abs_dev, kurtosis_, skewness, iqr, rms))
| [
"numpy.abs",
"numpy.median",
"numpy.std",
"numpy.hstack",
"numpy.percentile",
"scipy.stats.skew",
"numpy.mean",
"numpy.array",
"scipy.stats.kurtosis",
"numpy.var"
] | [((1401, 1417), 'numpy.array', 'np.array', (['signal'], {}), '(signal)\n', (1409, 1417), True, 'import numpy as np\n'), ((1519, 1534), 'numpy.mean', 'np.mean', (['signal'], {}), '(signal)\n', (1526, 1534), True, 'import numpy as np\n'), ((1552, 1569), 'numpy.median', 'np.median', (['signal'], {}), '(signal)\n', (1561, 1569), True, 'import numpy as np\n'), ((1584, 1598), 'numpy.var', 'np.var', (['signal'], {}), '(signal)\n', (1590, 1598), True, 'import numpy as np\n'), ((1613, 1627), 'numpy.std', 'np.std', (['signal'], {}), '(signal)\n', (1619, 1627), True, 'import numpy as np\n'), ((1698, 1714), 'scipy.stats.kurtosis', 'kurtosis', (['signal'], {}), '(signal)\n', (1706, 1714), False, 'from scipy.stats import skew, kurtosis\n'), ((1734, 1746), 'scipy.stats.skew', 'skew', (['signal'], {}), '(signal)\n', (1738, 1746), False, 'from scipy.stats import skew, kurtosis\n'), ((1899, 1974), 'numpy.hstack', 'np.hstack', (['(mean, median, var, std, abs_dev, kurtosis_, skewness, iqr, rms)'], {}), '((mean, median, var, std, abs_dev, kurtosis_, skewness, iqr, rms))\n', (1908, 1974), True, 'import numpy as np\n'), ((1653, 1676), 'numpy.abs', 'np.abs', (['(signal - median)'], {}), '(signal - median)\n', (1659, 1676), True, 'import numpy as np\n'), ((1761, 1786), 'numpy.percentile', 'np.percentile', (['signal', '(75)'], {}), '(signal, 75)\n', (1774, 1786), True, 'import numpy as np\n'), ((1789, 1814), 'numpy.percentile', 'np.percentile', (['signal', '(25)'], {}), '(signal, 25)\n', (1802, 1814), True, 'import numpy as np\n'), ((1844, 1860), 'numpy.array', 'np.array', (['signal'], {}), '(signal)\n', (1852, 1860), True, 'import numpy as np\n')] |
"""
Bit array or vector representations of Paulis for 3Di codes.
Although qecsim already has such an implementation, some of these extra
routines are useful specifically for dealing with the 3D code.
:Author:
<NAME>
"""
from typing import Union, List
import numpy as np
from . import bsparse
from scipy.sparse import csr_matrix
def bcommute(a, b) -> np.ndarray:
"""Array of 0 for commutes and 1 for anticommutes bvectors."""
# If lists, convert to numpy
if isinstance(a, list):
a = np.array(a, dtype='uint8')
if isinstance(b, list):
b = np.array(b, dtype='uint8')
# Determine the output shape.
# In particular, flatten array where needed.
output_shape = None
if len(a.shape) == 2 and len(b.shape) == 1:
output_shape = a.shape[0]
elif len(a.shape) == 1 and len(b.shape) == 2:
output_shape = b.shape[0]
# If only singles, then convert to 2D array.
if len(a.shape) == 1:
a = np.reshape(a, (1, a.shape[0]))
if len(b.shape) == 1:
b = np.reshape(b, (1, b.shape[0]))
# Check the shapes are correct.
if a.shape[1] % 2 != 0:
raise ValueError(
f'Length {a.shape[1]} binary vector not of even length.'
)
if b.shape[1] % 2 != 0:
raise ValueError(
f'Length {b.shape[1]} binary vector not of even length.'
)
if b.shape[1] != a.shape[1]:
raise ValueError(
f'Length {a.shape[1]} bvector cannot be '
'composed with length {b.shape[1]}'
)
if bsparse.is_sparse(a) or bsparse.is_sparse(b):
commutes = _bcommute_sparse(a, b)
else:
# Number of qubits.
n = int(a.shape[1]/2)
# Commute commutator by binary symplectic form.
a_X = a[:, :n]
a_Z = a[:, n:]
b_X = b[:, :n]
b_Z = b[:, n:]
commutes = (a_X.dot(b_Z.T) + a_Z.dot(b_X.T)) % 2
if output_shape is not None:
commutes = commutes.reshape(output_shape)
return commutes
def _bcommute_sparse(a, b):
"""Array of 0 for commutes and 1 for anticommutes bvectors."""
# Commute commutator by binary symplectic form.
n = int(a.shape[1]/2)
if not bsparse.is_sparse(a):
a = bsparse.from_array(a)
if not bsparse.is_sparse(b):
b = bsparse.from_array(b)
a_X = a[:, :n]
a_Z = a[:, n:]
b_X = b[:, :n]
b_Z = b[:, n:]
commutes = (a_X.dot(b_Z.T) + a_Z.dot(b_X.T))
commutes.data %= 2
if commutes.shape[0] == 1:
return commutes.toarray()[0, :]
elif commutes.shape[1] == 1:
return commutes.toarray()[:, 0]
else:
return commutes.toarray()
def pauli_to_bsf(error_pauli):
ps = np.array(list(error_pauli))
xs = (ps == 'X') + (ps == 'Y')
zs = (ps == 'Z') + (ps == 'Y')
error = np.hstack((xs, zs)).astype('uint8')
return error
def pauli_string_to_bvector(pauli_string: str) -> np.ndarray:
X_block = []
Z_block = []
for character in pauli_string:
if character == 'I':
X_block.append(0)
Z_block.append(0)
elif character == 'X':
X_block.append(1)
Z_block.append(0)
elif character == 'Y':
X_block.append(1)
Z_block.append(1)
elif character == 'Z':
X_block.append(0)
Z_block.append(1)
bvector = np.concatenate([X_block, Z_block]).astype(np.uint)
return bvector
def bvector_to_pauli_string(bvector: np.ndarray) -> str:
n = int(bvector.shape[0]/2)
pauli_string = ''
for i in range(n):
pauli_string += {
(0, 0): 'I',
(1, 0): 'X',
(1, 1): 'Y',
(0, 1): 'Z'
}[(bvector[i], bvector[i + n])]
return pauli_string
def get_effective_error(
total_error,
logicals_x,
logicals_z,
) -> np.ndarray:
"""Effective Pauli error on logical qubits after decoding."""
if logicals_x.shape != logicals_z.shape:
raise ValueError('Logical Xs and Zs must be of same shape.')
# Number of pairs of logical operators.
if len(logicals_x.shape) == 1:
n_logical = 1
else:
n_logical = int(logicals_x.shape[0])
# Get the number of total errors given.
num_total_errors = 1
if len(total_error.shape) > 1:
num_total_errors = total_error.shape[0]
# The shape of the array to be returned.
final_shape: tuple = (num_total_errors, 2*n_logical)
if num_total_errors == 1:
final_shape = (2*n_logical, )
effective_Z = bcommute(logicals_x, total_error)
effective_X = bcommute(logicals_z, total_error)
if num_total_errors == 1:
effective = np.concatenate([effective_X, effective_Z])
elif n_logical == 1:
effective = np.array([effective_X, effective_Z]).T
else:
effective = np.array([
np.concatenate([effective_X[:, i], effective_Z[:, i]])
for i in range(num_total_errors)
])
# Flatten the array if only one total error is given.
effective = effective.reshape(final_shape)
return effective
def bvector_to_int(bvector: np.ndarray) -> int:
"""Convert bvector to integer for effecient storage."""
return int(''.join(map(str, bvector)), 2)
def int_to_bvector(int_rep: int, n: int) -> np.ndarray:
"""Convert integer representation to n-qubit Pauli bvector."""
binary_string = ('{:0%db}' % (2*n)).format(int_rep)
bvector = np.array(tuple(binary_string), dtype=np.uint)
return bvector
def bvectors_to_ints(bvector_list: list) -> list:
"""List of bvectors to integers for efficient storage."""
return list(map(
bvector_to_int,
bvector_list
))
def ints_to_bvectors(int_list: list, n: int) -> list:
"""Convert list of integers back to bvectors."""
bvectors = []
for int_rep in int_list:
bvectors.append(int_to_bvector(int_rep, n))
return bvectors
def gf2_rank(rows):
"""Find rank of a matrix over GF2 given as list of binary ints.
From https://stackoverflow.com/questions/56856378
"""
rank = 0
while rows:
pivot_row = rows.pop()
if pivot_row:
rank += 1
lsb = pivot_row & -pivot_row
for index, row in enumerate(rows):
if row & lsb:
rows[index] = row ^ pivot_row
return rank
def brank(matrix):
"""Rank of a binary matrix."""
matrix = bsparse.to_array(matrix)
# Convert to list of binary numbers.
rows = [int(''.join(map(str, row)), 2) for row in matrix.astype(int)]
return gf2_rank(rows)
def apply_deformation(
deformation_indices: Union[List[bool], np.ndarray], bsf: np.ndarray
) -> np.ndarray:
"""Return Hadamard-deformed bsf at given indices."""
n = len(deformation_indices)
deformed = np.zeros_like(bsf)
if len(bsf.shape) == 1:
if bsf.shape[0] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be {(2*n,)}'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[i] = bsf[i + n]
deformed[i + n] = bsf[i]
else:
deformed[i] = bsf[i]
deformed[i + n] = bsf[i + n]
else:
if bsf.shape[1] != 2*n:
raise ValueError(
f'Deformation index length {n} does not match '
f'bsf shape {bsf.shape}, which should be '
f'{(bsf.shape[0], 2*n)}.'
)
for i, deform in enumerate(deformation_indices):
if deform:
deformed[:, i] = bsf[:, i + n]
deformed[:, i + n] = bsf[:, i]
else:
deformed[:, i] = bsf[:, i]
deformed[:, i + n] = bsf[:, i + n]
return deformed
def bsf_wt(bsf):
"""
Return weight of given binary symplectic form.
:param bsf: Binary symplectic vector or matrix.
:type bsf: numpy.array (1d or 2d) or csr_matrix
:return: Weight
:rtype: int
"""
if isinstance(bsf, np.ndarray):
assert np.array_equal(bsf % 2, bsf), \
'BSF {} is not in binary form'.format(bsf)
return np.count_nonzero(sum(np.hsplit(bsf, 2)))
elif isinstance(bsf, csr_matrix):
assert np.all(bsf.data == 1), \
'BSF {} is not in binary form'.format(bsf)
n = bsf.shape[1] // 2
x_indices = bsf.indices[bsf.indices < n]
z_indices = bsf.indices[bsf.indices >= n] - n
return len(np.union1d(x_indices, z_indices))
else:
raise TypeError(
f"bsf matrix should be a numpy array or "
f"csr_matrix, not {type(bsf)}"
)
def bsf_to_pauli(bsf):
"""
Convert the given binary symplectic form to Pauli operator(s).
(1 0 0 0 1 | 0 0 1 0 1) -> XIZIY
Assumptions:
* bsf is a numpy.array (1d or 2d) in binary symplectic form.
:param bsf: Binary symplectic vector or matrix.
:type bsf: numpy.array (1d or 2d)
:return: Pauli operators.
:rtype: str or list of str
"""
if isinstance(bsf, np.ndarray):
assert np.array_equal(bsf % 2, bsf), \
'BSF {} is not in binary form'.format(bsf)
def _to_pauli(b, t=str.maketrans('0123', 'IXZY')): # noqa: B008,E501 (deliberately reuse t)
xs, zs = np.hsplit(b, 2)
ps = (xs + zs * 2).astype(str) # 0=I, 1=X, 2=Z, 3=Y
return ''.join(ps).translate(t)
if bsf.ndim == 1:
return _to_pauli(bsf)
else:
return [_to_pauli(b) for b in bsf]
else:
assert np.all(bsf.data == 1), \
'BSF {} is not in binary form'.format(bsf)
def _to_pauli(b):
n = bsf.shape[1] // 2
pauli_string = ['I' for _ in range(n)]
for i in b.indices:
if i < n:
pauli_string[i] = 'X'
elif i >= n:
if pauli_string[i - n] == 'X':
pauli_string[i - n] = 'Y'
else:
pauli_string[i - n] = 'Z'
return ''.join(pauli_string)
return [_to_pauli(b) for b in bsf]
| [
"numpy.zeros_like",
"numpy.all",
"numpy.hsplit",
"numpy.hstack",
"numpy.array",
"numpy.reshape",
"numpy.array_equal",
"numpy.union1d",
"numpy.concatenate"
] | [((6835, 6853), 'numpy.zeros_like', 'np.zeros_like', (['bsf'], {}), '(bsf)\n', (6848, 6853), True, 'import numpy as np\n'), ((511, 537), 'numpy.array', 'np.array', (['a'], {'dtype': '"""uint8"""'}), "(a, dtype='uint8')\n", (519, 537), True, 'import numpy as np\n'), ((578, 604), 'numpy.array', 'np.array', (['b'], {'dtype': '"""uint8"""'}), "(b, dtype='uint8')\n", (586, 604), True, 'import numpy as np\n'), ((967, 997), 'numpy.reshape', 'np.reshape', (['a', '(1, a.shape[0])'], {}), '(a, (1, a.shape[0]))\n', (977, 997), True, 'import numpy as np\n'), ((1036, 1066), 'numpy.reshape', 'np.reshape', (['b', '(1, b.shape[0])'], {}), '(b, (1, b.shape[0]))\n', (1046, 1066), True, 'import numpy as np\n'), ((4689, 4731), 'numpy.concatenate', 'np.concatenate', (['[effective_X, effective_Z]'], {}), '([effective_X, effective_Z])\n', (4703, 4731), True, 'import numpy as np\n'), ((8185, 8213), 'numpy.array_equal', 'np.array_equal', (['(bsf % 2)', 'bsf'], {}), '(bsf % 2, bsf)\n', (8199, 8213), True, 'import numpy as np\n'), ((9230, 9258), 'numpy.array_equal', 'np.array_equal', (['(bsf % 2)', 'bsf'], {}), '(bsf % 2, bsf)\n', (9244, 9258), True, 'import numpy as np\n'), ((9716, 9737), 'numpy.all', 'np.all', (['(bsf.data == 1)'], {}), '(bsf.data == 1)\n', (9722, 9737), True, 'import numpy as np\n'), ((2820, 2839), 'numpy.hstack', 'np.hstack', (['(xs, zs)'], {}), '((xs, zs))\n', (2829, 2839), True, 'import numpy as np\n'), ((3383, 3417), 'numpy.concatenate', 'np.concatenate', (['[X_block, Z_block]'], {}), '([X_block, Z_block])\n', (3397, 3417), True, 'import numpy as np\n'), ((8386, 8407), 'numpy.all', 'np.all', (['(bsf.data == 1)'], {}), '(bsf.data == 1)\n', (8392, 8407), True, 'import numpy as np\n'), ((9444, 9459), 'numpy.hsplit', 'np.hsplit', (['b', '(2)'], {}), '(b, 2)\n', (9453, 9459), True, 'import numpy as np\n'), ((4777, 4813), 'numpy.array', 'np.array', (['[effective_X, effective_Z]'], {}), '([effective_X, effective_Z])\n', (4785, 4813), True, 'import numpy as np\n'), ((8312, 8329), 'numpy.hsplit', 'np.hsplit', (['bsf', '(2)'], {}), '(bsf, 2)\n', (8321, 8329), True, 'import numpy as np\n'), ((8624, 8656), 'numpy.union1d', 'np.union1d', (['x_indices', 'z_indices'], {}), '(x_indices, z_indices)\n', (8634, 8656), True, 'import numpy as np\n'), ((4869, 4923), 'numpy.concatenate', 'np.concatenate', (['[effective_X[:, i], effective_Z[:, i]]'], {}), '([effective_X[:, i], effective_Z[:, i]])\n', (4883, 4923), True, 'import numpy as np\n')] |
import csv
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
from datasets import Dataset
from tqdm import tqdm
from transformers import (AutoModelForTokenClassification, AutoTokenizer,
DataCollatorForTokenClassification, Trainer,
TrainingArguments)
class ArgumentativeElementIdentifier:
def __init__(self, saved_model):
self.discourse_types = ["O",
"B-Position",
"I-Position",
"B-Evidence",
"I-Evidence",
"B-Counterclaim",
"I-Counterclaim",
"B-Rebuttal",
"I-Rebuttal",
"B-Claim",
"I-Claim",
"B-ConcludingStatement",
"I-ConcludingStatement",
"B-Lead",
"I-Lead",
]
# Config
self.batch_size = 1
self.min_tokens = 5
self.tok_checkpoint = saved_model
self.model_checkpoint = os.path.join(saved_model, "pytorch_model.bin")
self.tokenizer = AutoTokenizer.from_pretrained(self.tok_checkpoint, add_prefix_space=True)
# Load model
self.model = AutoModelForTokenClassification.from_pretrained(self.tok_checkpoint, num_labels=len(self.discourse_types))
self.model.load_state_dict(torch.load(self.model_checkpoint, map_location=torch.device('cpu')))
self.model.eval()
self.data_collator = DataCollatorForTokenClassification(self.tokenizer)
self.trainer = Trainer(
self.model,
data_collator=self.data_collator,
tokenizer=self.tokenizer
)
def tokenize(self, examples):
tokenized_inputs = self.tokenizer(examples["Tokens"], truncation=True, is_split_into_words=True, padding=True, return_offsets_mapping=True)
return tokenized_inputs
def create_tokens(self, text):
tokenized_text = ""
token_list = []
text_tokens = text.split() #Split into word tokens
for token in text_tokens:
token_list.append(token)
tokenized_text += (" ".join(token_list) + "\n") #Separating each token with a space and each essay with a newline
return tokenized_text
def get_discourse_type(self, idx):
discourse_type = self.discourse_types[int(idx)]
if discourse_type != "O":
discourse_type = discourse_type[2:] #Slice to remove IOB TAG
if discourse_type == "ConcludingStatement":
discourse_type = "Concluding Statement"
return discourse_type
def predict(self, text):
tokenized_text = self.create_tokens(text)
test_tokens_df = pd.DataFrame([x for x in tokenized_text.split("\n")], columns = ["Tokens"])
# test_tokens_df.name = "Tokens"
test_tokens_df.Tokens = test_tokens_df.Tokens.str.split()
test_dataset = Dataset.from_pandas(test_tokens_df)
tokenized_test = test_dataset.map(self.tokenize, batched=True)
predictions, _, _ = self.trainer.predict(tokenized_test)
preds = np.argmax(predictions, axis=-1)
return self.get_pred_token_ids(preds, tokenized_test["input_ids"])
def get_pred_token_ids(self, preds, token_ids):
discourse_type_with_string_list = []
previous_discourse_type = ""
for text_idx, text_preds in enumerate(preds):
current_discourse_token_idx_list = []
for i, discourse_pred in tqdm(enumerate(text_preds)):
try:
token_id_list = token_ids[text_idx][i]
except:
break
if "<" in self.tokenizer.decode(token_id_list):
continue
discourse_type = self.get_discourse_type(discourse_pred)
if discourse_type != previous_discourse_type and i > 0:
try:
if len(self.tokenizer.decode(current_discourse_token_idx_list).split()) > 5:
# new_idx = current_idx + len(tokenizer.decode(current_discourse_token_idx_list).split())
discourse_type_with_string_list.append({
previous_discourse_type : self.tokenizer.decode(current_discourse_token_idx_list)
})
# discourse_type_list.append(previous_discourse_type)
current_discourse_token_idx_list = []
except:
continue
current_discourse_token_idx_list.append(int(token_ids[text_idx][i]))
previous_discourse_type = discourse_type
return discourse_type_with_string_list
if __name__ == "__main__":
element_identifier = ArgumentativeElementIdentifier("../saved_model")
input_text = input()
print(element_identifier.predict(input_text))
| [
"numpy.argmax",
"transformers.DataCollatorForTokenClassification",
"transformers.AutoTokenizer.from_pretrained",
"torch.device",
"datasets.Dataset.from_pandas",
"transformers.Trainer",
"os.path.join"
] | [((1048, 1094), 'os.path.join', 'os.path.join', (['saved_model', '"""pytorch_model.bin"""'], {}), "(saved_model, 'pytorch_model.bin')\n", (1060, 1094), False, 'import os\n'), ((1121, 1194), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.tok_checkpoint'], {'add_prefix_space': '(True)'}), '(self.tok_checkpoint, add_prefix_space=True)\n', (1150, 1194), False, 'from transformers import AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, Trainer, TrainingArguments\n'), ((1507, 1557), 'transformers.DataCollatorForTokenClassification', 'DataCollatorForTokenClassification', (['self.tokenizer'], {}), '(self.tokenizer)\n', (1541, 1557), False, 'from transformers import AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, Trainer, TrainingArguments\n'), ((1582, 1661), 'transformers.Trainer', 'Trainer', (['self.model'], {'data_collator': 'self.data_collator', 'tokenizer': 'self.tokenizer'}), '(self.model, data_collator=self.data_collator, tokenizer=self.tokenizer)\n', (1589, 1661), False, 'from transformers import AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, Trainer, TrainingArguments\n'), ((3005, 3040), 'datasets.Dataset.from_pandas', 'Dataset.from_pandas', (['test_tokens_df'], {}), '(test_tokens_df)\n', (3024, 3040), False, 'from datasets import Dataset\n'), ((3193, 3224), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (3202, 3224), True, 'import numpy as np\n'), ((1428, 1447), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1440, 1447), False, 'import torch\n')] |
import os
import random
import numpy as np
import torch
from utils.dataset import *
from utils.basenet import *
from utils.model import *
from utils.datautils import *
from train import *
from test import *
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
def train_net(training_loader, validation_loader, epochs):
"""
training
"""
# Set random seed
seed = 0
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
# Set data position
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Build model and optimizer
model=Basenet_okutama()
# if cfg.use_multi_gpu:
# model=nn.DataParallel(model)
model=model.to(device=device)
model.train()
model.apply(set_bn_eval)
train_learning_rate = 1e-2 #initial learning rate
lr_plan = {41:1e-4, 81:5e-5, 121:1e-5} #change learning rate in these epochs
train_dropout_prob = 0.3 #dropout probability
weight_decay = 0
optimizer=optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),lr=train_learning_rate,weight_decay=weight_decay)
# if cfg.test_before_train:
# test_info=test(validation_loader, model, device, 0, cfg)
# print(test_info)
# Training iteration
best_result={'epoch':0, 'actions_acc':0}
start_epoch=1
max_epoch = epochs
for epoch in range(start_epoch, start_epoch+max_epoch):
print("Epoch number = ", epoch)
if epoch in lr_plan:
adjust_lr(optimizer, lr_plan[epoch])
# One epoch of forward and backward
train_info=train_okutama(training_loader, model, device, optimizer, epoch)
# show_epoch_info('Train', cfg.log_path, train_info)
print(train_info)
"""
TODO: debug error in `test_okutama()`
"""
# # Test
# test_interval_epoch = 2
# if epoch % test_interval_epoch == 0:
# test_info=test_okutama(validation_loader, model, device, epoch)
# # show_epoch_info('Test', cfg.log_path, test_info)
# print(test_info)
# if test_info['actions_acc']>best_result['actions_acc']:
# best_result=test_info
# # print_log(cfg.log_path,
# # 'Best group activity accuracy: %.2f%% at epoch #%d.'%(best_result['activities_acc'], best_result['epoch']))
# print('Best accuracy: %.2f%% at epoch #%d.'%(best_result['actions_acc'], best_result['epoch']))
# # Save model
# if cfg.training_stage==2:
# state = {
# 'epoch': epoch,
# 'state_dict': model.state_dict(),
# 'optimizer': optimizer.state_dict(),
# }
# filepath=cfg.result_path+'/stage%d_epoch%d_%.2f%%.pth'%(cfg.training_stage,epoch,test_info['activities_acc'])
# torch.save(state, filepath)
# print('model saved to:',filepath)
# elif cfg.training_stage==1:
# for m in model.modules():
# if isinstance(m, Basenet):
# filepath=cfg.result_path+'/epoch%d_%.2f%%.pth'%(epoch,test_info['actions_acc'])
# m.savemodel(filepath)
# print('model saved to:',filepath)
# else:
# assert False | [
"numpy.random.seed",
"torch.manual_seed",
"random.seed",
"torch.cuda.is_available",
"torch.device"
] | [((448, 468), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (462, 468), True, 'import numpy as np\n'), ((474, 497), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (491, 497), False, 'import torch\n'), ((503, 520), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (514, 520), False, 'import random\n'), ((556, 581), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (579, 581), False, 'import torch\n'), ((601, 621), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (613, 621), False, 'import torch\n'), ((651, 670), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (663, 670), False, 'import torch\n')] |
import pygame
import random
import numpy as np
pygame.font.init()
fontsize = 20
myfont = pygame.font.SysFont('Comic Sans MS', fontsize)
textstrings = ["Click and drag the mouse near your blue planet to throw a rock.", "Hit the red planet with the rock, and don't hit yours!", "Hit ESC to exit."]
fps = 60
dt = 1.
G = 1.
pygame.init()
display_height = 800.
display_width = 800.
center = (display_height/2., display_width/2.)
screen = pygame.display.set_mode((int(display_height), int(display_width)))
pygame.display.set_caption('Big Bang! Bang!')
circle = pygame.image.load('pics/circle.png')
def color_surface(surface, red, green, blue):
# https://gamedev.stackexchange.com/questions/26550/how-can-a-pygame-image-be-colored
arr = pygame.surfarray.pixels3d(surface)
arr[:,:,0] = red
arr[:,:,1] = green
arr[:,:,2] = blue
class Body:
def __init__(self, mass=1., radius=1, position=(center[0], center[1]), velocity=(0,0), color=(255,255,255)):
self.mass = mass
self.radius = radius
self.position = np.array(position)
self.velocity = np.array(velocity)
self.pic = pygame.transform.scale(circle.copy(), (2*radius, 2*radius))
self.pic.set_colorkey((0,0,0))
self.pic.convert_alpha()
color_surface(self.pic, color[0], color[1], color[2])
def draw(self):
screen.blit(self.pic, (int(round(self.position[0])) - self.radius, int(round(self.position[1])) - self.radius))
def compute_force(self, other):
r = self.position - other.position
force = G*self.mass*other.mass*r/np.sqrt(r[0]**2 + r[1]**2)**3
return force
def move(self, force):
self.position += self.velocity*dt
a = -force/self.mass
self.velocity += a*dt
def distance(self, position):
r = self.position - position
return np.sqrt(r.dot(r))
def near(self, position):
return (self.radius < self.distance(position)) and (self.distance(position) < 2*self.radius)
def struck(self, other):
return self.distance(other.position) < (self.radius + other.radius)
star = Body(mass=200, radius=50)
us = Body(mass=10., radius=25, position=(center[0] + display_width/5., center[1]), velocity=(0., -1.), color=(0, 0, 255))
them = Body(mass=20., radius=20, position=(30, center[1]), velocity=(0., 0.7), color=(255, 0, 0))
clock = pygame.time.Clock()
frozen = False
done = False
while done == False:
clock.tick(fps)
screen.fill(0)
text = []
for string in textstrings:
text.append(myfont.render(string, False, (255, 255, 255)))
linepos = 0
for line in text:
screen.blit(line, (10,linepos))
linepos += 1.5*fontsize
star.draw()
for body in [us, them]:
body.draw()
if not frozen:
force = body.compute_force(star)
body.move(force)
if 'rock' in locals():
rock.draw()
force = rock.compute_force(star) + rock.compute_force(us) + rock.compute_force(them)
if not frozen:
rock.move(force)
for body in us, star, them:
if rock.struck(body):
frozen = False # @todo Design a context for frozen
if body == them:
frozen = True
textstrings = ["You win!", "Hit ESC to exit."]
if body == us:
frozen = True
textstrings = ["You lose!", "Hit ESC to exit."]
else:
del rock
break
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
press_position = np.array(event.pos).astype(float)
if us.near(press_position):
frozen = True
rock = Body(mass=0.001, radius=5, position=press_position, color=(0, 255, 0))
if event.type == pygame.MOUSEBUTTONUP:
if frozen:
release_position = np.array(event.pos).astype(float)
rock.velocity=(press_position - release_position)/float(us.radius)
frozen = False
if event.type == pygame.KEYUP:
if event.key == pygame.K_ESCAPE:
done = True | [
"pygame.font.SysFont",
"pygame.event.get",
"pygame.init",
"pygame.font.init",
"pygame.display.update",
"numpy.array",
"pygame.surfarray.pixels3d",
"pygame.image.load",
"pygame.display.set_caption",
"pygame.time.Clock",
"numpy.sqrt"
] | [((48, 66), 'pygame.font.init', 'pygame.font.init', ([], {}), '()\n', (64, 66), False, 'import pygame\n'), ((90, 136), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Comic Sans MS"""', 'fontsize'], {}), "('Comic Sans MS', fontsize)\n", (109, 136), False, 'import pygame\n'), ((323, 336), 'pygame.init', 'pygame.init', ([], {}), '()\n', (334, 336), False, 'import pygame\n'), ((503, 548), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Big Bang! Bang!"""'], {}), "('Big Bang! Bang!')\n", (529, 548), False, 'import pygame\n'), ((559, 595), 'pygame.image.load', 'pygame.image.load', (['"""pics/circle.png"""'], {}), "('pics/circle.png')\n", (576, 595), False, 'import pygame\n'), ((2444, 2463), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (2461, 2463), False, 'import pygame\n'), ((743, 777), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['surface'], {}), '(surface)\n', (768, 777), False, 'import pygame\n'), ((3749, 3772), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (3770, 3772), False, 'import pygame\n'), ((3795, 3813), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3811, 3813), False, 'import pygame\n'), ((1067, 1085), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (1075, 1085), True, 'import numpy as np\n'), ((1110, 1128), 'numpy.array', 'np.array', (['velocity'], {}), '(velocity)\n', (1118, 1128), True, 'import numpy as np\n'), ((1629, 1659), 'numpy.sqrt', 'np.sqrt', (['(r[0] ** 2 + r[1] ** 2)'], {}), '(r[0] ** 2 + r[1] ** 2)\n', (1636, 1659), True, 'import numpy as np\n'), ((3898, 3917), 'numpy.array', 'np.array', (['event.pos'], {}), '(event.pos)\n', (3906, 3917), True, 'import numpy as np\n'), ((4210, 4229), 'numpy.array', 'np.array', (['event.pos'], {}), '(event.pos)\n', (4218, 4229), True, 'import numpy as np\n')] |
# coding=utf-8
import os
from io import BytesIO
import numpy as np
from PIL import Image
from mindspore.mindrecord import FileWriter
from .segbase import SegDataset
__all__ = ['Cityscapes']
seg_schema = {
"file_name": {"type": "string"},
"data": {"type": "bytes"},
"label": {"type": "bytes"}
}
class Cityscapes(SegDataset):
def __init__(self, root, split='train', shard_num=1, shuffle=False):
super(Cityscapes, self).__init__(root, split, shard_num)
self.images, self.masks = _get_city_pairs(root, split)
assert len(self.images) == len(self.masks)
if shuffle:
state = np.random.get_state()
np.random.shuffle(self.images)
np.random.set_state(state)
np.random.shuffle(self.masks)
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
self._key = np.array([19, 19, 19, 19, 19, 19,
19, 19, 0, 1, 19, 19,
2, 3, 4, 19, 19, 19,
5, 19, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15,
19, 19, 16, 17, 18]) # class 19 should be ignored
self._mapping = np.array(range(-1, len(self._key) - 1))
def _class_to_index(self, mask):
values = np.unique(mask)
for value in values:
assert (value in self._mapping)
index = np.digitize(mask.ravel(), self._mapping, right=True)
return self._key[index].reshape(mask.shape)
def _build_mindrecord(self, mindrecord_path):
writer = FileWriter(file_name=mindrecord_path, shard_num=self.shard_num)
writer.add_schema(seg_schema, "seg_schema")
data = []
cnt = 0
print('number of samples:', self.num_images)
for idx in range(len(self.images)):
sample_ = {'file_name': os.path.basename(self.images[idx])}
with open(self.images[idx], 'rb') as f:
sample_['data'] = f.read()
white_io = BytesIO()
mask = Image.open(self.masks[idx])
mask = Image.fromarray(self._class_to_index(np.array(mask)).astype('uint8'))
mask.save(white_io, 'PNG')
mask_bytes = white_io.getvalue()
sample_['label'] = white_io.getvalue()
data.append(sample_)
cnt += 1
if cnt % 10 == 0:
writer.write_raw_data(data)
data = []
if data:
writer.write_raw_data(data)
writer.commit()
print('number of samples written:', cnt)
def build_data(self, mindrecord_path):
self._build_mindrecord(mindrecord_path)
@property
def num_images(self):
return len(self.images)
@property
def num_masks(self):
return len(self.masks)
def images_list(self):
return self.images
def masks_list(self):
return self.masks
def _get_city_pairs(folder, split='train'):
if split in ('train', 'val', 'test'):
img_folder = os.path.join(folder, 'leftImg8bit/' + split)
mask_folder = os.path.join(folder, 'gtFine/' + split)
img_paths, mask_paths = _get_path_pairs(img_folder, mask_folder)
return img_paths, mask_paths
else:
assert split == 'trainval'
train_img_folder = os.path.join(folder, 'leftImg8bit/train')
train_mask_folder = os.path.join(folder, 'gtFine/train')
val_img_folder = os.path.join(folder, 'leftImg8bit/val')
val_mask_folder = os.path.join(folder, 'gtFine/val')
train_img_paths, train_mask_paths = _get_path_pairs(train_img_folder, train_mask_folder)
val_img_paths, val_mask_paths = _get_path_pairs(val_img_folder, val_mask_folder)
img_paths = train_img_paths + val_img_paths
mask_paths = train_mask_paths + val_mask_paths
return img_paths, mask_paths
def _get_path_pairs(img_folder, mask_folder):
img_paths = []
mask_paths = []
for root, _, files in os.walk(img_folder):
for filename in files:
if filename.endswith(".png"):
imgpath = os.path.join(root, filename)
foldername = os.path.basename(os.path.dirname(imgpath))
maskname = filename.replace('leftImg8bit', 'gtFine_labelIds')
maskpath = os.path.join(mask_folder, foldername, maskname)
if os.path.isfile(imgpath) and os.path.isfile(maskpath):
img_paths.append(imgpath)
mask_paths.append(maskpath)
else:
print('cannot find the mask or image:', imgpath, maskpath)
print('Found {} images in the folder {}'.format(len(img_paths), img_folder))
return img_paths, mask_paths
| [
"mindspore.mindrecord.FileWriter",
"io.BytesIO",
"numpy.random.shuffle",
"os.path.basename",
"numpy.random.get_state",
"os.path.dirname",
"os.walk",
"numpy.random.set_state",
"PIL.Image.open",
"os.path.isfile",
"numpy.array",
"os.path.join",
"numpy.unique"
] | [((4068, 4087), 'os.walk', 'os.walk', (['img_folder'], {}), '(img_folder)\n', (4075, 4087), False, 'import os\n'), ((934, 1078), 'numpy.array', 'np.array', (['[19, 19, 19, 19, 19, 19, 19, 19, 0, 1, 19, 19, 2, 3, 4, 19, 19, 19, 5, 19, \n 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 19, 19, 16, 17, 18]'], {}), '([19, 19, 19, 19, 19, 19, 19, 19, 0, 1, 19, 19, 2, 3, 4, 19, 19, 19,\n 5, 19, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 19, 19, 16, 17, 18])\n', (942, 1078), True, 'import numpy as np\n'), ((1374, 1389), 'numpy.unique', 'np.unique', (['mask'], {}), '(mask)\n', (1383, 1389), True, 'import numpy as np\n'), ((1652, 1715), 'mindspore.mindrecord.FileWriter', 'FileWriter', ([], {'file_name': 'mindrecord_path', 'shard_num': 'self.shard_num'}), '(file_name=mindrecord_path, shard_num=self.shard_num)\n', (1662, 1715), False, 'from mindspore.mindrecord import FileWriter\n'), ((3107, 3151), 'os.path.join', 'os.path.join', (['folder', "('leftImg8bit/' + split)"], {}), "(folder, 'leftImg8bit/' + split)\n", (3119, 3151), False, 'import os\n'), ((3174, 3213), 'os.path.join', 'os.path.join', (['folder', "('gtFine/' + split)"], {}), "(folder, 'gtFine/' + split)\n", (3186, 3213), False, 'import os\n'), ((3396, 3437), 'os.path.join', 'os.path.join', (['folder', '"""leftImg8bit/train"""'], {}), "(folder, 'leftImg8bit/train')\n", (3408, 3437), False, 'import os\n'), ((3466, 3502), 'os.path.join', 'os.path.join', (['folder', '"""gtFine/train"""'], {}), "(folder, 'gtFine/train')\n", (3478, 3502), False, 'import os\n'), ((3528, 3567), 'os.path.join', 'os.path.join', (['folder', '"""leftImg8bit/val"""'], {}), "(folder, 'leftImg8bit/val')\n", (3540, 3567), False, 'import os\n'), ((3594, 3628), 'os.path.join', 'os.path.join', (['folder', '"""gtFine/val"""'], {}), "(folder, 'gtFine/val')\n", (3606, 3628), False, 'import os\n'), ((633, 654), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (652, 654), True, 'import numpy as np\n'), ((667, 697), 'numpy.random.shuffle', 'np.random.shuffle', (['self.images'], {}), '(self.images)\n', (684, 697), True, 'import numpy as np\n'), ((710, 736), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (729, 736), True, 'import numpy as np\n'), ((749, 778), 'numpy.random.shuffle', 'np.random.shuffle', (['self.masks'], {}), '(self.masks)\n', (766, 778), True, 'import numpy as np\n'), ((2089, 2098), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (2096, 2098), False, 'from io import BytesIO\n'), ((2118, 2145), 'PIL.Image.open', 'Image.open', (['self.masks[idx]'], {}), '(self.masks[idx])\n', (2128, 2145), False, 'from PIL import Image\n'), ((1935, 1969), 'os.path.basename', 'os.path.basename', (['self.images[idx]'], {}), '(self.images[idx])\n', (1951, 1969), False, 'import os\n'), ((4188, 4216), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (4200, 4216), False, 'import os\n'), ((4394, 4441), 'os.path.join', 'os.path.join', (['mask_folder', 'foldername', 'maskname'], {}), '(mask_folder, foldername, maskname)\n', (4406, 4441), False, 'import os\n'), ((4263, 4287), 'os.path.dirname', 'os.path.dirname', (['imgpath'], {}), '(imgpath)\n', (4278, 4287), False, 'import os\n'), ((4461, 4484), 'os.path.isfile', 'os.path.isfile', (['imgpath'], {}), '(imgpath)\n', (4475, 4484), False, 'import os\n'), ((4489, 4513), 'os.path.isfile', 'os.path.isfile', (['maskpath'], {}), '(maskpath)\n', (4503, 4513), False, 'import os\n'), ((2202, 2216), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (2210, 2216), True, 'import numpy as np\n')] |
import os
import shutil
import requests
import traceback
import tarfile
import pickle
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
cifar_dataset_path = "./cifar_dataset/"
def getCifar10Dataset(path=None):
if path is not None:
global cifar_dataset_path
cifar_dataset_path = path
# constants
cifar_10_url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
tarfile_name = 'cifar-10.tar.gz'
# check if the directory that holds the dataset exists
if os.path.isdir(cifar_dataset_path):
# remove directory to make sure we start clean
try:
shutil.rmtree(cifar_dataset_path)
# create directory
os.mkdir(cifar_dataset_path)
except:
print("Deletion of folder not possible - something went wrong.")
traceback.print_exc()
else:
# create directory
os.mkdir(cifar_dataset_path)
# download the cifar-10 dataset
r = requests.get(cifar_10_url, allow_redirects=True)
open(cifar_dataset_path + tarfile_name, 'wb').write(r.content)
cifar_tar_file = tarfile.open(cifar_dataset_path + tarfile_name)
cifar_tar_file.extractall(cifar_dataset_path)
cifar_tar_file.close()
# remove tar file
os.remove(cifar_dataset_path + tarfile_name)
# rearrange file structure
dir_to_batches = cifar_dataset_path + 'cifar-10-batches-py/'
files = os.listdir(dir_to_batches)
for f in files:
shutil.move(dir_to_batches+f, cifar_dataset_path)
try:
# remove 'cifar-10-batches-py' directory
os.rmdir(dir_to_batches)
except:
print("Deletion of folder not possible - something went wrong.")
traceback.print_exc()
# remove .html file in "cifar_dataset_path"
for f in os.listdir(cifar_dataset_path):
if f.endswith('html'):
os.remove(cifar_dataset_path + f)
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def getTrainDatasets():
# create empty np arrays that it's going to be used to concatenate
# on it all the training images and training labels
train_images = np.empty([0,3,32,32])
train_labels = np.array([], dtype=int)
for i in range(1,6):
# unpickle the 'data_batch'.py in turn
dicc = unpickle(cifar_dataset_path + 'data_batch_{}'.format(i))
# the dicc[b'data'] list has a shape of (10000,3072), so it needs to be reshaped
train_images = np.concatenate((train_images, dicc[b'data'].reshape((10000,3,32,32))), axis=0).astype('uint8')
train_labels = np.concatenate((train_labels, dicc[b'labels']), axis=0).astype('uint8')
return train_images, train_labels
def getTestDataset():
# now unpickle the test batch and process it as it was done for a training batch
tdicc = unpickle(cifar_dataset_path + 'test_batch')
test_images = tdicc[b'data'].reshape((10000,3,32,32)).astype('uint8')
test_labels = np.array(tdicc[b'labels']).astype('uint8')
return test_images, test_labels
# CifarDataset is a class that construct a dataset out of training images
# stored as numpy arrays and its corresponding training labels - also
# stored as numpy arrays
# This CifarDataset class serves also to provide testing images converted
# Tensors in a way that they can be provided via a DataLoader
class CifarDataset(Dataset):
def __init__(self, data, target=None, transform=None):
self.data = data
self.target = target if target is None else torch.from_numpy(target).long()
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.target[index] if self.target is not None else ""
if self.transform:
x = self.transform(x)
if self.target is None:
return x
else:
return x, y
def __len__(self):
return len(self.data) | [
"os.mkdir",
"os.remove",
"traceback.print_exc",
"shutil.rmtree",
"os.path.isdir",
"numpy.empty",
"pickle.load",
"numpy.array",
"requests.get",
"shutil.move",
"tarfile.open",
"os.rmdir",
"os.listdir",
"numpy.concatenate",
"torch.from_numpy"
] | [((537, 570), 'os.path.isdir', 'os.path.isdir', (['cifar_dataset_path'], {}), '(cifar_dataset_path)\n', (550, 570), False, 'import os\n'), ((1004, 1052), 'requests.get', 'requests.get', (['cifar_10_url'], {'allow_redirects': '(True)'}), '(cifar_10_url, allow_redirects=True)\n', (1016, 1052), False, 'import requests\n'), ((1142, 1189), 'tarfile.open', 'tarfile.open', (['(cifar_dataset_path + tarfile_name)'], {}), '(cifar_dataset_path + tarfile_name)\n', (1154, 1189), False, 'import tarfile\n'), ((1294, 1338), 'os.remove', 'os.remove', (['(cifar_dataset_path + tarfile_name)'], {}), '(cifar_dataset_path + tarfile_name)\n', (1303, 1338), False, 'import os\n'), ((1448, 1474), 'os.listdir', 'os.listdir', (['dir_to_batches'], {}), '(dir_to_batches)\n', (1458, 1474), False, 'import os\n'), ((1824, 1854), 'os.listdir', 'os.listdir', (['cifar_dataset_path'], {}), '(cifar_dataset_path)\n', (1834, 1854), False, 'import os\n'), ((2225, 2249), 'numpy.empty', 'np.empty', (['[0, 3, 32, 32]'], {}), '([0, 3, 32, 32])\n', (2233, 2249), True, 'import numpy as np\n'), ((2266, 2289), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (2274, 2289), True, 'import numpy as np\n'), ((930, 958), 'os.mkdir', 'os.mkdir', (['cifar_dataset_path'], {}), '(cifar_dataset_path)\n', (938, 958), False, 'import os\n'), ((1504, 1555), 'shutil.move', 'shutil.move', (['(dir_to_batches + f)', 'cifar_dataset_path'], {}), '(dir_to_batches + f, cifar_dataset_path)\n', (1515, 1555), False, 'import shutil\n'), ((1621, 1645), 'os.rmdir', 'os.rmdir', (['dir_to_batches'], {}), '(dir_to_batches)\n', (1629, 1645), False, 'import os\n'), ((2003, 2036), 'pickle.load', 'pickle.load', (['fo'], {'encoding': '"""bytes"""'}), "(fo, encoding='bytes')\n", (2014, 2036), False, 'import pickle\n'), ((652, 685), 'shutil.rmtree', 'shutil.rmtree', (['cifar_dataset_path'], {}), '(cifar_dataset_path)\n', (665, 685), False, 'import shutil\n'), ((729, 757), 'os.mkdir', 'os.mkdir', (['cifar_dataset_path'], {}), '(cifar_dataset_path)\n', (737, 757), False, 'import os\n'), ((1739, 1760), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1758, 1760), False, 'import traceback\n'), ((1899, 1932), 'os.remove', 'os.remove', (['(cifar_dataset_path + f)'], {}), '(cifar_dataset_path + f)\n', (1908, 1932), False, 'import os\n'), ((3036, 3062), 'numpy.array', 'np.array', (["tdicc[b'labels']"], {}), "(tdicc[b'labels'])\n", (3044, 3062), True, 'import numpy as np\n'), ((863, 884), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (882, 884), False, 'import traceback\n'), ((2664, 2719), 'numpy.concatenate', 'np.concatenate', (["(train_labels, dicc[b'labels'])"], {'axis': '(0)'}), "((train_labels, dicc[b'labels']), axis=0)\n", (2678, 2719), True, 'import numpy as np\n'), ((3588, 3612), 'torch.from_numpy', 'torch.from_numpy', (['target'], {}), '(target)\n', (3604, 3612), False, 'import torch\n')] |
#
# Copyright (c) 2018 <NAME> <<EMAIL>>
#
# See the file LICENSE for your rights.
#
"""
Methods for training scikit-learn models.
"""
from datetime import datetime, timedelta
import pandas as pd
from mosx.util import get_object, to_bool, dewpoint
from mosx.estimators import TimeSeriesEstimator, RainTuningEstimator, BootStrapEnsembleEstimator
import pickle
import numpy as np
def build_estimator(config):
"""
Build the estimator object from the parameters in config.
:param config:
:return:
"""
regressor = config['Model']['regressor']
sklearn_kwargs = config['Model']['Parameters']
train_individual = config['Model']['train_individual']
ada_boost = config['Model'].get('Ada boosting', None)
rain_tuning = config['Model'].get('Rain tuning', None)
bootstrap = config['Model'].get('Bootstrapping', None)
Regressor = get_object('sklearn.%s' % regressor)
if config['verbose']:
print('build_estimator: using sklearn.%s as estimator...' % regressor)
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler as Scaler
from sklearn.pipeline import Pipeline
# Create and train the learning algorithm
if config['verbose']:
print('build_estimator: here are the parameters passed to the learning algorithm...')
print(sklearn_kwargs)
# Create the pipeline list
pipeline = [("imputer", Imputer(missing_values=np.nan, strategy="mean", axis=0))]
if config['Model']['predict_timeseries']:
pipeline_timeseries = [("imputer", Imputer(missing_values=np.nan, strategy="mean", axis=0))]
if not (regressor.startswith('ensemble')):
# Need to add feature scaling
pipeline.append(("scaler", Scaler()))
if config['Model']['predict_timeseries']:
pipeline_timeseries.append(("scaler", Scaler()))
# Create the regressor object
regressor_obj = Regressor(**sklearn_kwargs)
if ada_boost is not None:
if config['verbose']:
print('build_estimator: using Ada boosting...')
from sklearn.ensemble import AdaBoostRegressor
regressor_obj = AdaBoostRegressor(regressor_obj, **ada_boost)
if train_individual:
if config['verbose']:
print('build_estimator: training separate models for each parameter...')
from sklearn.multioutput import MultiOutputRegressor
multi_regressor = MultiOutputRegressor(regressor_obj, 4)
pipeline.append(("regressor", multi_regressor))
else:
pipeline.append(("regressor", regressor_obj))
if config['Model']['predict_timeseries']:
pipeline_timeseries.append(("regressor", regressor_obj))
# Make the final estimator with a Pipeline
if config['Model']['predict_timeseries']:
estimator = TimeSeriesEstimator(Pipeline(pipeline), Pipeline(pipeline_timeseries))
else:
estimator = Pipeline(pipeline)
if rain_tuning is not None and regressor.startswith('ensemble'):
if config['verbose']:
print('build_estimator: using rain tuning...')
rain_kwargs = rain_tuning.copy()
rain_kwargs.pop('use_raw_rain', None)
estimator = RainTuningEstimator(estimator, **rain_kwargs)
# Add bootstrapping if requested
if bootstrap is not None:
if config['verbose']:
print('build_estimator: using bootstrapping ensemble...')
estimator = BootStrapEnsembleEstimator(estimator, **bootstrap)
return estimator
def build_train_data(config, predictor_file, no_obs=False, no_models=False, test_size=0):
"""
Build the array of training (and optionally testing) data.
:param config:
:param predictor_file:
:param no_obs:
:param no_models:
:param test_size:
:return:
"""
from sklearn.model_selection import train_test_split
if config['verbose']:
print('build_train_data: reading predictor file')
rain_tuning = config['Model'].get('Rain tuning', None)
with open(predictor_file, 'rb') as handle:
data = pickle.load(handle)
# Select data
if no_obs and no_models:
no_obs = False
no_models = False
if no_obs:
if config['verbose']:
print('build_train_data: not using observations to train')
predictors = data['BUFKIT']
elif no_models:
if config['verbose']:
print('build_train_data: not using models to train')
predictors = data['OBS']
else:
predictors = np.concatenate((data['BUFKIT'], data['OBS']), axis=1)
if rain_tuning is not None and to_bool(rain_tuning.get('use_raw_rain', False)):
predictors = np.concatenate((predictors, data.rain), axis=1)
rain_shape = data.rain.shape[-1]
targets = data['VERIF']
if test_size > 0:
p_train, p_test, t_train, t_test = train_test_split(predictors, targets, test_size=test_size)
if rain_tuning is not None and to_bool(rain_tuning.get('use_raw_rain', False)):
r_train = p_train[:, -1*rain_shape:]
p_train = p_train[:, :-1*rain_shape]
r_test = p_test[:, -1 * rain_shape:]
p_test = p_test[:, :-1 * rain_shape]
else:
r_train = None
r_test = None
return p_train, t_train, r_train, p_test, t_test, r_test
else:
if rain_tuning is not None and to_bool(rain_tuning.get('use_raw_rain', False)):
return predictors, targets, data.rain
else:
return predictors, targets, None
def train(config, predictor_file, estimator_file=None, no_obs=False, no_models=False, test_size=0):
"""
Generate and train a scikit-learn machine learning estimator. The estimator object is saved as a pickle so that it
may be imported and used for predictions at any time.
:param config:
:param predictor_file: str: full path to saved file of predictor data
:param estimator_file: str: full path to output model file
:param no_obs: bool: if True, generates the model with no OBS data
:param no_models: bool: if True, generates the model with no BUFR data
:param test_size: int: if > 0, returns a subset of the training data of size 'test_size' to test on
:return: matplotlib Figure if plot_learning_curve is True
"""
estimator = build_estimator(config)
rain_tuning = config['Model'].get('Rain tuning', None)
if test_size > 0:
p_train, t_train, r_train, p_test, t_test, r_test = build_train_data(config, predictor_file, no_obs=no_obs,
no_models=no_models, test_size=test_size)
else:
p_train, t_train, r_train = build_train_data(config, predictor_file, no_obs=no_obs, no_models=no_models)
print('train: training the estimator')
if rain_tuning is not None and to_bool(rain_tuning.get('use_raw_rain', False)):
estimator.fit(p_train, t_train, rain_array=r_train)
else:
estimator.fit(p_train, t_train)
if estimator_file is None:
estimator_file = '%s/%s_mosx.pkl' % (config['MOSX_ROOT'], config['station_id'])
print('train: -> exporting to %s' % estimator_file)
with open(estimator_file, 'wb') as handle:
pickle.dump(estimator, handle, protocol=pickle.HIGHEST_PROTOCOL)
if test_size > 0:
return p_test, t_test, r_test
return
def _plot_learning_curve(estimator, X, y, ylim=None, cv=None, scoring=None, title=None, n_jobs=1,
train_sizes=np.linspace(.1, 1.0, 5)):
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
fig = plt.figure()
if title is not None:
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, scoring=scoring, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return fig
def plot_learning_curve(config, predictor_file, no_obs=False, no_models=False, ylim=None, cv=None, scoring=None,
title=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve. From scikit-learn:
http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html
Parameters
----------
config :
predictor_file : string
Full path to file containing predictor data
no_obs : boolean
Train model without observations
no_models : boolean
Train model without model data
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
scoring :
Scoring function for the error calculation; should be a scikit-learn scorer object
title : string
Title for the chart.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
train_sizes : iterable, optional
Sequence of subsets of training data used in learning curve plot
"""
estimator = build_estimator(config)
X, y = build_train_data(config, predictor_file, no_obs=no_obs, no_models=no_models)
fig = _plot_learning_curve(estimator, X, y, ylim=ylim, cv=cv, scoring=scoring, title=title, n_jobs=n_jobs,
train_sizes=train_sizes)
return fig
def combine_train_test(config, train_file, test_file, no_obs=False, no_models=False, return_count_test=True):
"""
Concatenates the arrays of predictors and verification values from the train file and the test file. Useful for
implementing cross-validation using scikit-learn's methods and the SplitConsecutive class.
:param config:
:param train_file: str: full path to predictor file for training
:param test_file: str: full path to predictor file for validation
:param no_obs: bool: if True, generates the model with no OBS data
:param no_models: bool: if True, generates the model with no BUFR data
:param return_count_test: bool: if True, also returns the number of samples in the test set (see SplitConsecutive)
:return: predictors, verifications: concatenated arrays of predictors and verification values; count: number of
samples in the test set
"""
p_train, t_train = build_train_data(config, train_file, no_obs=no_obs, no_models=no_models)
p_test, t_test = build_train_data(config, test_file, no_obs=no_obs, no_models=no_models)
p_combined = np.concatenate((p_train, p_test), axis=0)
t_combined = np.concatenate((t_train, t_test), axis=0)
if return_count_test:
return p_combined, t_combined, t_test.shape[0]
else:
return p_combined, t_combined
class SplitConsecutive(object):
"""
Implements a split method to subset a training set into train and test sets, using the first or last n samples in
the set.
"""
def __init__(self, first=False, n_samples=0.2):
"""
Create an instance of SplitConsecutive.
:param first: bool: if True, gets test data from the beginning of the data set; otherwise from the end
:param n_samples: float or int: if float, subsets a fraction (0 to 1) of the data into the test set; if int,
subsets a specific number of samples.
"""
if type(first) is not bool:
raise TypeError("'first' must be a boolean type.")
try:
n_samples = int(n_samples)
except:
pass
if type(n_samples) is float and (n_samples <= 0. or n_samples >= 1.):
raise ValueError("if float, 'n_samples' must be between 0 and 1.")
if type(n_samples) is not float and type(n_samples) is not int:
raise TypeError("'n_samples' must be float or int type.")
self.first = first
self.n_samples = n_samples
self.n_splits = 1
def split(self, X, y=None, groups=None):
"""
Produces arrays of indices to use for model and test splits.
:param X: array-like, shape (samples, features): predictor data
:param y: array-like, shape (samples, outputs) or None: verification data; ignored
:param groups: ignored
:return: model, test: 1-D arrays of sample indices in the model and test sets
"""
num_samples = X.shape[0]
indices = np.arange(0, num_samples, 1, dtype=np.int32)
if type(self.n_samples) is float:
self.n_samples = int(np.round(num_samples * self.n_samples))
if self.first:
test = indices[:self.n_samples]
train = indices[self.n_samples:]
else:
test = indices[-self.n_samples:]
train = indices[:num_samples - self.n_samples]
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""
Return the number of splits. Dummy function for compatibility.
:param X: ignored
:param y: ignored
:param groups: ignored
:return:
"""
return self.n_splits
def predict_all(config, predictor_file, ensemble=False, time_series_date=None, naive_rain_correction=False,
round_result=False, **kwargs):
"""
Predict forecasts from the estimator in config. Also return probabilities and time series.
:param config:
:param predictor_file: str: file containing predictor data from mosx.model.format_predictors
:param ensemble: bool: if True, return an array of num_trees-by-4 of the predictions of each tree in the estimator
:param time_series_date: datetime: if set, returns a time series prediction from the estimator, where the datetime
provided is the day the forecast is for (only works for single-day runs, or assumes last day)
:param naive_rain_correction: bool: if True, applies manual tuning to the rain forecast
:param round_result: bool: if True, rounds the predicted estimate
:param kwargs: passed to the estimator's 'predict' method
:return:
predicted: ndarray: num_samples x num_predicted_variables predictions
all_predicted: ndarray: num_samples x num_predicted_variables x num_ensemble_members predictions for all trees
predicted_timeseries: DataFrame: time series for final sample
"""
# Load the predictor data and estimator
with open(predictor_file, 'rb') as handle:
predictor_data = pickle.load(handle)
rain_tuning = config['Model'].get('Rain tuning', None)
if config['verbose']:
print('predict: loading estimator %s' % config['Model']['estimator_file'])
with open(config['Model']['estimator_file'], 'rb') as handle:
estimator = pickle.load(handle)
predictors = np.concatenate((predictor_data['BUFKIT'], predictor_data['OBS']), axis=1)
if config['Model']['rain_forecast_type'] == 'pop' and getattr(estimator, 'is_classifier', False):
predict_method = estimator.predict_proba
else:
predict_method = estimator.predict
if rain_tuning is not None and to_bool(rain_tuning.get('use_raw_rain', False)):
predicted = predict_method(predictors, rain_array=predictor_data.rain, **kwargs)
else:
predicted = predict_method(predictors, **kwargs)
precip = predictor_data.rain
# Check for precipitation override
if naive_rain_correction:
for day in range(predicted.shape[0]):
if sum(precip[day]) < 0.01:
if config['verbose']:
print('predict: warning: overriding MOS-X rain prediction of %0.2f on day %s with 0' %
(predicted[day, 3], day))
predicted[day, 3] = 0.
elif predicted[day, 3] > max(precip[day]) or predicted[day, 3] < min(precip[day]):
if config['verbose']:
print('predict: warning: overriding MOS-X prediction of %0.2f on day %s with model mean' %
(predicted[day, 3], day))
predicted[day, 3] = max(0., np.mean(precip[day] + [predicted[day, 3]]))
else:
# At least make sure we aren't predicting negative values...
predicted[:, 3][predicted[:, 3] < 0] = 0.0
# Round off daily values, if selected
if round_result:
predicted[:, :3] = np.round(predicted[:, :3])
predicted[:, 3] = np.round(predicted[:, 3], 2)
# If probabilities are requested and available, get the results from each tree
if ensemble:
num_samples = predictors.shape[0]
if not hasattr(estimator, 'named_steps'):
forest = estimator
else:
imputer = estimator.named_steps['imputer']
forest = estimator.named_steps['regressor']
predictors = imputer.transform(predictors)
# If we generated our own ensemble by bootstrapping, it must be treated as such
if config['Model']['train_individual'] and config['Model'].get('Bootstrapping', None) is None:
num_trees = len(forest.estimators_[0].estimators_)
all_predicted = np.zeros((num_samples, 4, num_trees))
for v in range(4):
for t in range(num_trees):
try:
all_predicted[:, v, t] = forest.estimators_[v].estimators_[t].predict(predictors)
except AttributeError:
# Work around the 2-D array of estimators for GBTrees
all_predicted[:, v, t] = forest.estimators_[v].estimators_[t][0].predict(predictors)
else:
num_trees = len(forest.estimators_)
all_predicted = np.zeros((num_samples, 4, num_trees))
for t in range(num_trees):
try:
all_predicted[:, :, t] = forest.estimators_[t].predict(predictors)[:, :4]
except AttributeError:
# Work around the 2-D array of estimators for GBTrees
all_predicted[:, :, t] = forest.estimators_[t][0].predict(predictors)[:, :4]
else:
all_predicted = None
if config['Model']['predict_timeseries']:
if time_series_date is None:
date_now = datetime.utcnow()
time_series_date = datetime(date_now.year, date_now.month, date_now.day) + timedelta(days=1)
print('predict: warning: set time series start date to %s (was unspecified)' % time_series_date)
num_hours = int(24 / config['time_series_interval']) + 1
predicted_array = predicted[-1, 4:].reshape((4, num_hours)).T
# Get dewpoint
predicted_array[:, 2] = dewpoint(predicted_array[:, 0], predicted_array[:, 2])
times = pd.date_range(time_series_date.replace(hour=6), periods=num_hours,
freq='%dH' % config['time_series_interval']).to_pydatetime().tolist()
variables = ['temperature', 'rain', 'dewpoint', 'windSpeed']
round_dict = {'temperature': 0, 'rain': 2, 'dewpoint': 0, 'windSpeed': 0}
predicted_timeseries = pd.DataFrame(predicted_array, index=times, columns=variables)
predicted_timeseries = predicted_timeseries.round(round_dict)
else:
predicted_timeseries = None
return predicted, all_predicted, predicted_timeseries
def predict(config, predictor_file, naive_rain_correction=False, round=False, **kwargs):
"""
Predict forecasts from the estimator in config. Only returns daily values.
:param config:
:param predictor_file: str: file containing predictor data from mosx.model.format_predictors
:param naive_rain_correction: bool: if True, applies manual tuning to the rain forecast
:param round: bool: if True, rounds the predicted estimate
:param kwargs: passed to the estimator's 'predict' method
:return:
"""
predicted, all_predicted, predicted_timeseries = predict_all(config, predictor_file,
naive_rain_correction=naive_rain_correction,
round_result=round, **kwargs)
return predicted
def predict_rain_proba(config, predictor_file):
"""
Predict probabilistic rain forecasts for 'pop' or 'categorical' types.
:param config:
:param predictor_file: str: file containing predictor data from mosx.model.format_predictors
:return:
"""
if config['Model']['rain_forecast_type'] not in ['pop', 'categorical']:
raise TypeError("'quantity' rain forecast is not probabilistic, cannot get probabilities")
rain_tuning = config['Model'].get('Rain tuning', None)
if rain_tuning is None:
raise TypeError('Probabilistic rain forecasts are only possible with a RainTuningEstimator')
# Load the predictor data and estimator
with open(predictor_file, 'rb') as handle:
predictor_data = pickle.load(handle)
if config['verbose']:
print('predict: loading estimator %s' % config['Model']['estimator_file'])
with open(config['Model']['estimator_file'], 'rb') as handle:
estimator = pickle.load(handle)
predictors = np.concatenate((predictor_data['BUFKIT'], predictor_data['OBS']), axis=1)
if to_bool(rain_tuning.get('use_raw_rain', False)):
rain_proba = estimator.predict_rain_proba(predictors, rain_array=predictor_data.rain)
else:
rain_proba = estimator.predict_rain_proba(predictors)
return rain_proba
| [
"matplotlib.pyplot.title",
"pickle.dump",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"mosx.estimators.BootStrapEnsembleEstimator",
"datetime.datetime.utcnow",
"matplotlib.pyplot.figure",
"numpy.mean",
"pickle.load",
"numpy.arange",
"matplotlib.pyplot.fill... | [((869, 905), 'mosx.util.get_object', 'get_object', (["('sklearn.%s' % regressor)"], {}), "('sklearn.%s' % regressor)\n", (879, 905), False, 'from mosx.util import get_object, to_bool, dewpoint\n'), ((7515, 7539), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (7526, 7539), True, 'import numpy as np\n'), ((7643, 7655), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7653, 7655), True, 'import matplotlib.pyplot as plt\n'), ((7760, 7791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Training examples"""'], {}), "('Training examples')\n", (7770, 7791), True, 'import matplotlib.pyplot as plt\n'), ((7796, 7815), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Score"""'], {}), "('Score')\n", (7806, 7815), True, 'import matplotlib.pyplot as plt\n'), ((7861, 7960), 'sklearn.model_selection.learning_curve', 'learning_curve', (['estimator', 'X', 'y'], {'cv': 'cv', 'scoring': 'scoring', 'n_jobs': 'n_jobs', 'train_sizes': 'train_sizes'}), '(estimator, X, y, cv=cv, scoring=scoring, n_jobs=n_jobs,\n train_sizes=train_sizes)\n', (7875, 7960), False, 'from sklearn.model_selection import learning_curve\n'), ((7990, 8019), 'numpy.mean', 'np.mean', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (7997, 8019), True, 'import numpy as np\n'), ((8043, 8071), 'numpy.std', 'np.std', (['train_scores'], {'axis': '(1)'}), '(train_scores, axis=1)\n', (8049, 8071), True, 'import numpy as np\n'), ((8095, 8123), 'numpy.mean', 'np.mean', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (8102, 8123), True, 'import numpy as np\n'), ((8146, 8173), 'numpy.std', 'np.std', (['test_scores'], {'axis': '(1)'}), '(test_scores, axis=1)\n', (8152, 8173), True, 'import numpy as np\n'), ((8178, 8188), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8186, 8188), True, 'import matplotlib.pyplot as plt\n'), ((8194, 8326), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(train_scores_mean - train_scores_std)', '(train_scores_mean + train_scores_std)'], {'alpha': '(0.1)', 'color': '"""r"""'}), "(train_sizes, train_scores_mean - train_scores_std, \n train_scores_mean + train_scores_std, alpha=0.1, color='r')\n", (8210, 8326), True, 'import matplotlib.pyplot as plt\n'), ((8368, 8496), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['train_sizes', '(test_scores_mean - test_scores_std)', '(test_scores_mean + test_scores_std)'], {'alpha': '(0.1)', 'color': '"""g"""'}), "(train_sizes, test_scores_mean - test_scores_std, \n test_scores_mean + test_scores_std, alpha=0.1, color='g')\n", (8384, 8496), True, 'import matplotlib.pyplot as plt\n'), ((8517, 8603), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'train_scores_mean', '"""o-"""'], {'color': '"""r"""', 'label': '"""Training score"""'}), "(train_sizes, train_scores_mean, 'o-', color='r', label=\n 'Training score')\n", (8525, 8603), True, 'import matplotlib.pyplot as plt\n'), ((8616, 8709), 'matplotlib.pyplot.plot', 'plt.plot', (['train_sizes', 'test_scores_mean', '"""o-"""'], {'color': '"""g"""', 'label': '"""Cross-validation score"""'}), "(train_sizes, test_scores_mean, 'o-', color='g', label=\n 'Cross-validation score')\n", (8624, 8709), True, 'import matplotlib.pyplot as plt\n'), ((8723, 8745), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8733, 8745), True, 'import matplotlib.pyplot as plt\n'), ((8934, 8958), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (8945, 8958), True, 'import numpy as np\n'), ((12016, 12057), 'numpy.concatenate', 'np.concatenate', (['(p_train, p_test)'], {'axis': '(0)'}), '((p_train, p_test), axis=0)\n', (12030, 12057), True, 'import numpy as np\n'), ((12075, 12116), 'numpy.concatenate', 'np.concatenate', (['(t_train, t_test)'], {'axis': '(0)'}), '((t_train, t_test), axis=0)\n', (12089, 12116), True, 'import numpy as np\n'), ((16209, 16282), 'numpy.concatenate', 'np.concatenate', (["(predictor_data['BUFKIT'], predictor_data['OBS'])"], {'axis': '(1)'}), "((predictor_data['BUFKIT'], predictor_data['OBS']), axis=1)\n", (16223, 16282), True, 'import numpy as np\n'), ((22568, 22641), 'numpy.concatenate', 'np.concatenate', (["(predictor_data['BUFKIT'], predictor_data['OBS'])"], {'axis': '(1)'}), "((predictor_data['BUFKIT'], predictor_data['OBS']), axis=1)\n", (22582, 22641), True, 'import numpy as np\n'), ((2150, 2195), 'sklearn.ensemble.AdaBoostRegressor', 'AdaBoostRegressor', (['regressor_obj'], {}), '(regressor_obj, **ada_boost)\n', (2167, 2195), False, 'from sklearn.ensemble import AdaBoostRegressor\n'), ((2423, 2461), 'sklearn.multioutput.MultiOutputRegressor', 'MultiOutputRegressor', (['regressor_obj', '(4)'], {}), '(regressor_obj, 4)\n', (2443, 2461), False, 'from sklearn.multioutput import MultiOutputRegressor\n'), ((2908, 2926), 'sklearn.pipeline.Pipeline', 'Pipeline', (['pipeline'], {}), '(pipeline)\n', (2916, 2926), False, 'from sklearn.pipeline import Pipeline\n'), ((3193, 3238), 'mosx.estimators.RainTuningEstimator', 'RainTuningEstimator', (['estimator'], {}), '(estimator, **rain_kwargs)\n', (3212, 3238), False, 'from mosx.estimators import TimeSeriesEstimator, RainTuningEstimator, BootStrapEnsembleEstimator\n'), ((3427, 3477), 'mosx.estimators.BootStrapEnsembleEstimator', 'BootStrapEnsembleEstimator', (['estimator'], {}), '(estimator, **bootstrap)\n', (3453, 3477), False, 'from mosx.estimators import TimeSeriesEstimator, RainTuningEstimator, BootStrapEnsembleEstimator\n'), ((4057, 4076), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (4068, 4076), False, 'import pickle\n'), ((4664, 4711), 'numpy.concatenate', 'np.concatenate', (['(predictors, data.rain)'], {'axis': '(1)'}), '((predictors, data.rain), axis=1)\n', (4678, 4711), True, 'import numpy as np\n'), ((4847, 4905), 'sklearn.model_selection.train_test_split', 'train_test_split', (['predictors', 'targets'], {'test_size': 'test_size'}), '(predictors, targets, test_size=test_size)\n', (4863, 4905), False, 'from sklearn.model_selection import train_test_split\n'), ((7241, 7305), 'pickle.dump', 'pickle.dump', (['estimator', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(estimator, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (7252, 7305), False, 'import pickle\n'), ((7690, 7706), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7699, 7706), True, 'import matplotlib.pyplot as plt\n'), ((7740, 7755), 'matplotlib.pyplot.ylim', 'plt.ylim', (['*ylim'], {}), '(*ylim)\n', (7748, 7755), True, 'import matplotlib.pyplot as plt\n'), ((13869, 13913), 'numpy.arange', 'np.arange', (['(0)', 'num_samples', '(1)'], {'dtype': 'np.int32'}), '(0, num_samples, 1, dtype=np.int32)\n', (13878, 13913), True, 'import numpy as np\n'), ((15897, 15916), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (15908, 15916), False, 'import pickle\n'), ((16171, 16190), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (16182, 16190), False, 'import pickle\n'), ((17757, 17783), 'numpy.round', 'np.round', (['predicted[:, :3]'], {}), '(predicted[:, :3])\n', (17765, 17783), True, 'import numpy as np\n'), ((17810, 17838), 'numpy.round', 'np.round', (['predicted[:, 3]', '(2)'], {}), '(predicted[:, 3], 2)\n', (17818, 17838), True, 'import numpy as np\n'), ((20058, 20112), 'mosx.util.dewpoint', 'dewpoint', (['predicted_array[:, 0]', 'predicted_array[:, 2]'], {}), '(predicted_array[:, 0], predicted_array[:, 2])\n', (20066, 20112), False, 'from mosx.util import get_object, to_bool, dewpoint\n'), ((20478, 20539), 'pandas.DataFrame', 'pd.DataFrame', (['predicted_array'], {'index': 'times', 'columns': 'variables'}), '(predicted_array, index=times, columns=variables)\n', (20490, 20539), True, 'import pandas as pd\n'), ((22315, 22334), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (22326, 22334), False, 'import pickle\n'), ((22530, 22549), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (22541, 22549), False, 'import pickle\n'), ((1420, 1475), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""', 'axis': '(0)'}), "(missing_values=np.nan, strategy='mean', axis=0)\n", (1427, 1475), False, 'from sklearn.preprocessing import Imputer\n'), ((2827, 2845), 'sklearn.pipeline.Pipeline', 'Pipeline', (['pipeline'], {}), '(pipeline)\n', (2835, 2845), False, 'from sklearn.pipeline import Pipeline\n'), ((2847, 2876), 'sklearn.pipeline.Pipeline', 'Pipeline', (['pipeline_timeseries'], {}), '(pipeline_timeseries)\n', (2855, 2876), False, 'from sklearn.pipeline import Pipeline\n'), ((4505, 4558), 'numpy.concatenate', 'np.concatenate', (["(data['BUFKIT'], data['OBS'])"], {'axis': '(1)'}), "((data['BUFKIT'], data['OBS']), axis=1)\n", (4519, 4558), True, 'import numpy as np\n'), ((18525, 18562), 'numpy.zeros', 'np.zeros', (['(num_samples, 4, num_trees)'], {}), '((num_samples, 4, num_trees))\n', (18533, 18562), True, 'import numpy as np\n'), ((19088, 19125), 'numpy.zeros', 'np.zeros', (['(num_samples, 4, num_trees)'], {}), '((num_samples, 4, num_trees))\n', (19096, 19125), True, 'import numpy as np\n'), ((19636, 19653), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (19651, 19653), False, 'from datetime import datetime, timedelta\n'), ((1567, 1622), 'sklearn.preprocessing.Imputer', 'Imputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""', 'axis': '(0)'}), "(missing_values=np.nan, strategy='mean', axis=0)\n", (1574, 1622), False, 'from sklearn.preprocessing import Imputer\n'), ((1746, 1754), 'sklearn.preprocessing.StandardScaler', 'Scaler', ([], {}), '()\n', (1752, 1754), True, 'from sklearn.preprocessing import StandardScaler as Scaler\n'), ((13989, 14027), 'numpy.round', 'np.round', (['(num_samples * self.n_samples)'], {}), '(num_samples * self.n_samples)\n', (13997, 14027), True, 'import numpy as np\n'), ((19685, 19738), 'datetime.datetime', 'datetime', (['date_now.year', 'date_now.month', 'date_now.day'], {}), '(date_now.year, date_now.month, date_now.day)\n', (19693, 19738), False, 'from datetime import datetime, timedelta\n'), ((19741, 19758), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (19750, 19758), False, 'from datetime import datetime, timedelta\n'), ((1857, 1865), 'sklearn.preprocessing.StandardScaler', 'Scaler', ([], {}), '()\n', (1863, 1865), True, 'from sklearn.preprocessing import StandardScaler as Scaler\n'), ((17492, 17534), 'numpy.mean', 'np.mean', (['(precip[day] + [predicted[day, 3]])'], {}), '(precip[day] + [predicted[day, 3]])\n', (17499, 17534), True, 'import numpy as np\n')] |
"""
@file
@brief Functions about the `Gini coefficient <https://en.wikipedia.org/wiki/Gini_coefficient>`_.
"""
import numpy
def gini(Y, X=None):
"""
Computes the
`Gini coefficients <https://en.wikipedia.org/wiki/Gini_coefficient>`_.
@param Y Y values (or revenues)
@param X None for a uniform population or not None for already order value.
@return a curve ``(x, Gini(x))``
"""
n = len(Y)
couples = numpy.empty((n, 2))
if X is None:
couples[:, 0] = 1
else:
couples[:, 0] = X
couples[:, 1] = Y
couples = couples
couples = numpy.cumsum(couples, axis=0)
couples[:, 0] /= max(couples[n - 1, 0], 1e-7)
couples[:, 1] /= max(couples[n - 1, 1], 1e-7)
g = 0.
n = couples.shape[0]
for i in range(0, n):
dx = couples[i, 0] - couples[i - 1, 0]
y = couples[i - 1, 1] + couples[i, 1]
g += dx * y
return (1. - g) / 2
| [
"numpy.empty",
"numpy.cumsum"
] | [((472, 491), 'numpy.empty', 'numpy.empty', (['(n, 2)'], {}), '((n, 2))\n', (483, 491), False, 'import numpy\n'), ((630, 659), 'numpy.cumsum', 'numpy.cumsum', (['couples'], {'axis': '(0)'}), '(couples, axis=0)\n', (642, 659), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
import itertools
from dipy.io.stateful_tractogram import StatefulTractogram
from dipy.tracking.metrics import length
from dipy.tracking.streamline import set_number_of_points
from dipy.tracking.vox2track import _streamlines_in_mask
from nibabel.affines import apply_affine
import numpy as np
def streamlines_in_mask(sft, target_mask):
"""
Parameters
----------
sft : StatefulTractogram
StatefulTractogram containing the streamlines to segment.
target_mask : numpy.ndarray
Binary mask in which the streamlines should pass.
Returns
-------
ids : list
Ids of the streamlines passing through the mask.
"""
sft.to_vox()
sft.to_corner()
# Copy-Paste from Dipy to get indices
target_mask = np.array(target_mask, dtype=np.uint8, copy=True)
streamlines_case = _streamlines_in_mask(list(sft.streamlines),
target_mask, np.eye(3),
[-0.5, -0.5, -0.5])
return np.where(streamlines_case == [0, 1][True])[0].tolist()
def filter_grid_roi(sft, mask, filter_type, is_exclude):
"""
Parameters
----------
sft : StatefulTractogram
StatefulTractogram containing the streamlines to segment.
target_mask : numpy.ndarray
Binary mask in which the streamlines should pass.
filter_type: str
One of the 3 following choices, 'any', 'either_end', 'both_ends'.
is_exclude: bool
Value to indicate if the ROI is an AND (false) or a NOT (true).
Returns
-------
ids : tuple
Filtered sft.
Ids of the streamlines passing through the mask.
"""
line_based_indices = []
if filter_type == 'any':
line_based_indices = streamlines_in_mask(sft, mask)
else:
sft.to_vox()
sft.to_corner()
streamline_vox = sft.streamlines
# For endpoint filtering, we need to keep 2 separately
# Could be faster for either end, but the code look cleaner like this
line_based_indices_1 = []
line_based_indices_2 = []
for i, line_vox in enumerate(streamline_vox):
voxel_1 = tuple(line_vox[0].astype(np.int16))
voxel_2 = tuple(line_vox[-1].astype(np.int16))
if mask[voxel_1]:
line_based_indices_1.append(i)
if mask[voxel_2]:
line_based_indices_2.append(i)
# Both endpoints need to be in the mask (AND)
if filter_type == 'both_ends':
line_based_indices = np.intersect1d(line_based_indices_1,
line_based_indices_2)
# Only one endpoint need to be in the mask (OR)
elif filter_type == 'either_end':
line_based_indices = np.union1d(line_based_indices_1,
line_based_indices_2)
# If the 'exclude' option is used, the selection is inverted
if is_exclude:
line_based_indices = np.setdiff1d(range(len(sft)),
np.unique(line_based_indices))
line_based_indices = np.asarray(line_based_indices).astype(np.int32)
# From indices to sft
streamlines = sft.streamlines[line_based_indices]
data_per_streamline = sft.data_per_streamline[line_based_indices]
data_per_point = sft.data_per_point[line_based_indices]
new_sft = StatefulTractogram.from_sft(streamlines, sft,
data_per_streamline=data_per_streamline,
data_per_point=data_per_point)
return new_sft, line_based_indices
def pre_filtering_for_geometrical_shape(sft, size,
center, filter_type,
is_in_vox):
"""
Parameters
----------
sft : StatefulTractogram
StatefulTractogram containing the streamlines to segment.
size : numpy.ndarray (3)
Size in mm, x/y/z of the ROI.
center: numpy.ndarray (3)
Center x/y/z of the ROI.
filter_type: str
One of the 3 following choices, 'any', 'either_end', 'both_ends'.
is_in_vox: bool
Value to indicate if the ROI is in voxel space.
Returns
-------
ids : tuple
Filtered sft.
Ids of the streamlines passing through the mask.
"""
transfo, dim, _, _ = sft.space_attributes
inv_transfo = np.linalg.inv(transfo)
# Create relevant info about the ellipsoid in vox/world space
if is_in_vox:
center = np.asarray(apply_affine(transfo, center))
bottom_corner = center - size
top_corner = center + size
x_val = [bottom_corner[0], top_corner[0]]
y_val = [bottom_corner[1], top_corner[1]]
z_val = [bottom_corner[2], top_corner[2]]
corner_world = list(itertools.product(x_val, y_val, z_val))
corner_vox = apply_affine(inv_transfo, corner_world)
# Since the filtering using a grid is so fast, we pre-filter
# using a BB around the ellipsoid
min_corner = np.min(corner_vox, axis=0) - 1.0
max_corner = np.max(corner_vox, axis=0) + 1.5
pre_mask = np.zeros(dim)
min_x, max_x = int(max(min_corner[0], 0)), int(min(max_corner[0], dim[0]))
min_y, max_y = int(max(min_corner[1], 0)), int(min(max_corner[1], dim[1]))
min_z, max_z = int(max(min_corner[2], 0)), int(min(max_corner[2], dim[2]))
pre_mask[min_x:max_x, min_y:max_y, min_z:max_z] = 1
return filter_grid_roi(sft, pre_mask, filter_type, False)
def filter_ellipsoid(sft, ellipsoid_radius, ellipsoid_center,
filter_type, is_exclude, is_in_vox=False):
"""
Parameters
----------
sft : StatefulTractogram
StatefulTractogram containing the streamlines to segment.
ellipsoid_radius : numpy.ndarray (3)
Size in mm, x/y/z of the ellipsoid.
ellipsoid_center: numpy.ndarray (3)
Center x/y/z of the ellipsoid.
filter_type: str
One of the 3 following choices, 'any', 'either_end', 'both_ends'.
is_exclude: bool
Value to indicate if the ROI is an AND (false) or a NOT (true).
is_in_vox: bool
Value to indicate if the ROI is in voxel space.
Returns
-------
ids : tuple
Filtered sft.
Ids of the streamlines passing through the mask.
"""
pre_filtered_sft, pre_filtered_indices = \
pre_filtering_for_geometrical_shape(sft, ellipsoid_radius,
ellipsoid_center, filter_type,
is_in_vox)
pre_filtered_sft.to_rasmm()
pre_filtered_sft.to_center()
pre_filtered_streamlines = pre_filtered_sft.streamlines
transfo, _, res, _ = sft.space_attributes
if is_in_vox:
ellipsoid_center = np.asarray(apply_affine(transfo,
ellipsoid_center))
selected_by_ellipsoid = []
line_based_indices_1 = []
line_based_indices_2 = []
# This is still point based (but resampled), I had a ton of problems trying
# to use something with intersection, but even if I could do it :
# The result won't be identical to MI-Brain since I am not using the
# vtkPolydata. Also it won't be identical to TrackVis either,
# because TrackVis is point-based for Spherical ROI...
ellipsoid_radius = np.asarray(ellipsoid_radius)
ellipsoid_center = np.asarray(ellipsoid_center)
for i, line in enumerate(pre_filtered_streamlines):
if filter_type == 'any':
# Resample to 1/10 of the voxel size
nb_points = max(int(length(line) / np.average(res) * 10), 2)
line = set_number_of_points(line, nb_points)
points_in_ellipsoid = np.sum(
((line - ellipsoid_center) / ellipsoid_radius) ** 2,
axis=1)
if np.argwhere(points_in_ellipsoid <= 1).any():
# If at least one point was in the ellipsoid, we selected
# the streamline
selected_by_ellipsoid.append(pre_filtered_indices[i])
else:
points_in_ellipsoid = np.sum(
((line[0] - ellipsoid_center) / ellipsoid_radius) ** 2)
if points_in_ellipsoid <= 1.0:
line_based_indices_1.append(pre_filtered_indices[i])
points_in_ellipsoid = np.sum(
((line[-1] - ellipsoid_center) / ellipsoid_radius) ** 2)
if points_in_ellipsoid <= 1.0:
line_based_indices_2.append(pre_filtered_indices[i])
# Both endpoints need to be in the mask (AND)
if filter_type == 'both_ends':
selected_by_ellipsoid = np.intersect1d(line_based_indices_1,
line_based_indices_2)
# Only one endpoint needs to be in the mask (OR)
elif filter_type == 'either_end':
selected_by_ellipsoid = np.union1d(line_based_indices_1,
line_based_indices_2)
# If the 'exclude' option is used, the selection is inverted
if is_exclude:
selected_by_ellipsoid = np.setdiff1d(range(len(sft)),
np.unique(selected_by_ellipsoid))
line_based_indices = np.asarray(selected_by_ellipsoid).astype(np.int32)
# From indices to sft
streamlines = sft.streamlines[line_based_indices]
data_per_streamline = sft.data_per_streamline[line_based_indices]
data_per_point = sft.data_per_point[line_based_indices]
new_sft = StatefulTractogram.from_sft(streamlines, sft,
data_per_streamline=data_per_streamline,
data_per_point=data_per_point)
return new_sft, line_based_indices
def filter_cuboid(sft, cuboid_radius, cuboid_center,
filter_type, is_exclude):
"""
Parameters
----------
sft : StatefulTractogram
StatefulTractogram containing the streamlines to segment.
cuboid_radius : numpy.ndarray (3)
Size in mm, x/y/z of the cuboid.
cuboid_center: numpy.ndarray (3)
Center x/y/z of the cuboid.
filter_type: str
One of the 3 following choices, 'any', 'either_end', 'both_ends'.
is_exclude: bool
Value to indicate if the ROI is an AND (false) or a NOT (true).
is_in_vox: bool
Value to indicate if the ROI is in voxel space.
Returns
-------
ids : tuple
Filtered sft.
Ids of the streamlines passing through the mask.
"""
pre_filtered_sft, pre_filtered_indices = \
pre_filtering_for_geometrical_shape(sft, cuboid_radius,
cuboid_center, filter_type,
False)
pre_filtered_sft.to_rasmm()
pre_filtered_sft.to_center()
pre_filtered_streamlines = pre_filtered_sft.streamlines
_, _, res, _ = sft.space_attributes
selected_by_cuboid = []
line_based_indices_1 = []
line_based_indices_2 = []
# Also here I am not using a mathematical intersection and
# I am not using vtkPolyData like in MI-Brain, so not exactly the same
cuboid_radius = np.asarray(cuboid_radius)
cuboid_center = np.asarray(cuboid_center)
for i, line in enumerate(pre_filtered_streamlines):
if filter_type == 'any':
# Resample to 1/10 of the voxel size
nb_points = max(int(length(line) / np.average(res) * 10), 2)
line = set_number_of_points(line, nb_points)
points_in_cuboid = np.abs(line - cuboid_center) / cuboid_radius
points_in_cuboid = np.sum(np.where(points_in_cuboid <= 1, 1, 0),
axis=1)
if np.argwhere(points_in_cuboid == 3).any():
# If at least one point was in the cuboid in x/y/z,
# we selected that streamline
selected_by_cuboid.append(pre_filtered_indices[i])
else:
# Faster to do it twice than trying to do in using an array of 2
points_in_cuboid = np.abs(line[0] - cuboid_center) / cuboid_radius
points_in_cuboid = np.sum(np.where(points_in_cuboid <= 1, 1, 0))
if points_in_cuboid == 3:
line_based_indices_1.append(pre_filtered_indices[i])
points_in_cuboid = np.abs(line[-1] - cuboid_center) / cuboid_radius
points_in_cuboid = np.sum(np.where(points_in_cuboid <= 1, 1, 0))
if points_in_cuboid == 3:
line_based_indices_2.append(pre_filtered_indices[i])
# Both endpoints need to be in the mask (AND)
if filter_type == 'both_ends':
selected_by_cuboid = np.intersect1d(line_based_indices_1,
line_based_indices_2)
# Only one endpoint need to be in the mask (OR)
elif filter_type == 'either_end':
selected_by_cuboid = np.union1d(line_based_indices_1,
line_based_indices_2)
# If the 'exclude' option is used, the selection is inverted
if is_exclude:
selected_by_cuboid = np.setdiff1d(range(len(sft)),
np.unique(selected_by_cuboid))
line_based_indices = np.asarray(selected_by_cuboid).astype(np.int32)
# From indices to sft
streamlines = sft.streamlines[line_based_indices]
data_per_streamline = sft.data_per_streamline[line_based_indices]
data_per_point = sft.data_per_point[line_based_indices]
new_sft = StatefulTractogram.from_sft(streamlines, sft,
data_per_streamline=data_per_streamline,
data_per_point=data_per_point)
return new_sft, line_based_indices
| [
"numpy.sum",
"numpy.abs",
"numpy.unique",
"numpy.max",
"itertools.product",
"numpy.intersect1d",
"dipy.io.stateful_tractogram.StatefulTractogram.from_sft",
"numpy.union1d",
"numpy.average",
"numpy.asarray",
"numpy.min",
"dipy.tracking.streamline.set_number_of_points",
"numpy.linalg.inv",
"... | [((787, 835), 'numpy.array', 'np.array', (['target_mask'], {'dtype': 'np.uint8', 'copy': '(True)'}), '(target_mask, dtype=np.uint8, copy=True)\n', (795, 835), True, 'import numpy as np\n'), ((3427, 3549), 'dipy.io.stateful_tractogram.StatefulTractogram.from_sft', 'StatefulTractogram.from_sft', (['streamlines', 'sft'], {'data_per_streamline': 'data_per_streamline', 'data_per_point': 'data_per_point'}), '(streamlines, sft, data_per_streamline=\n data_per_streamline, data_per_point=data_per_point)\n', (3454, 3549), False, 'from dipy.io.stateful_tractogram import StatefulTractogram\n'), ((4460, 4482), 'numpy.linalg.inv', 'np.linalg.inv', (['transfo'], {}), '(transfo)\n', (4473, 4482), True, 'import numpy as np\n'), ((4911, 4950), 'nibabel.affines.apply_affine', 'apply_affine', (['inv_transfo', 'corner_world'], {}), '(inv_transfo, corner_world)\n', (4923, 4950), False, 'from nibabel.affines import apply_affine\n'), ((5170, 5183), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (5178, 5183), True, 'import numpy as np\n'), ((7383, 7411), 'numpy.asarray', 'np.asarray', (['ellipsoid_radius'], {}), '(ellipsoid_radius)\n', (7393, 7411), True, 'import numpy as np\n'), ((7435, 7463), 'numpy.asarray', 'np.asarray', (['ellipsoid_center'], {}), '(ellipsoid_center)\n', (7445, 7463), True, 'import numpy as np\n'), ((9547, 9669), 'dipy.io.stateful_tractogram.StatefulTractogram.from_sft', 'StatefulTractogram.from_sft', (['streamlines', 'sft'], {'data_per_streamline': 'data_per_streamline', 'data_per_point': 'data_per_point'}), '(streamlines, sft, data_per_streamline=\n data_per_streamline, data_per_point=data_per_point)\n', (9574, 9669), False, 'from dipy.io.stateful_tractogram import StatefulTractogram\n'), ((11210, 11235), 'numpy.asarray', 'np.asarray', (['cuboid_radius'], {}), '(cuboid_radius)\n', (11220, 11235), True, 'import numpy as np\n'), ((11256, 11281), 'numpy.asarray', 'np.asarray', (['cuboid_center'], {}), '(cuboid_center)\n', (11266, 11281), True, 'import numpy as np\n'), ((13557, 13679), 'dipy.io.stateful_tractogram.StatefulTractogram.from_sft', 'StatefulTractogram.from_sft', (['streamlines', 'sft'], {'data_per_streamline': 'data_per_streamline', 'data_per_point': 'data_per_point'}), '(streamlines, sft, data_per_streamline=\n data_per_streamline, data_per_point=data_per_point)\n', (13584, 13679), False, 'from dipy.io.stateful_tractogram import StatefulTractogram\n'), ((961, 970), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (967, 970), True, 'import numpy as np\n'), ((4854, 4892), 'itertools.product', 'itertools.product', (['x_val', 'y_val', 'z_val'], {}), '(x_val, y_val, z_val)\n', (4871, 4892), False, 'import itertools\n'), ((5072, 5098), 'numpy.min', 'np.min', (['corner_vox'], {'axis': '(0)'}), '(corner_vox, axis=0)\n', (5078, 5098), True, 'import numpy as np\n'), ((5122, 5148), 'numpy.max', 'np.max', (['corner_vox'], {'axis': '(0)'}), '(corner_vox, axis=0)\n', (5128, 5148), True, 'import numpy as np\n'), ((8692, 8750), 'numpy.intersect1d', 'np.intersect1d', (['line_based_indices_1', 'line_based_indices_2'], {}), '(line_based_indices_1, line_based_indices_2)\n', (8706, 8750), True, 'import numpy as np\n'), ((12724, 12782), 'numpy.intersect1d', 'np.intersect1d', (['line_based_indices_1', 'line_based_indices_2'], {}), '(line_based_indices_1, line_based_indices_2)\n', (12738, 12782), True, 'import numpy as np\n'), ((2574, 2632), 'numpy.intersect1d', 'np.intersect1d', (['line_based_indices_1', 'line_based_indices_2'], {}), '(line_based_indices_1, line_based_indices_2)\n', (2588, 2632), True, 'import numpy as np\n'), ((3097, 3126), 'numpy.unique', 'np.unique', (['line_based_indices'], {}), '(line_based_indices)\n', (3106, 3126), True, 'import numpy as np\n'), ((3153, 3183), 'numpy.asarray', 'np.asarray', (['line_based_indices'], {}), '(line_based_indices)\n', (3163, 3183), True, 'import numpy as np\n'), ((4596, 4625), 'nibabel.affines.apply_affine', 'apply_affine', (['transfo', 'center'], {}), '(transfo, center)\n', (4608, 4625), False, 'from nibabel.affines import apply_affine\n'), ((6829, 6868), 'nibabel.affines.apply_affine', 'apply_affine', (['transfo', 'ellipsoid_center'], {}), '(transfo, ellipsoid_center)\n', (6841, 6868), False, 'from nibabel.affines import apply_affine\n'), ((7695, 7732), 'dipy.tracking.streamline.set_number_of_points', 'set_number_of_points', (['line', 'nb_points'], {}), '(line, nb_points)\n', (7715, 7732), False, 'from dipy.tracking.streamline import set_number_of_points\n'), ((7767, 7834), 'numpy.sum', 'np.sum', (['(((line - ellipsoid_center) / ellipsoid_radius) ** 2)'], {'axis': '(1)'}), '(((line - ellipsoid_center) / ellipsoid_radius) ** 2, axis=1)\n', (7773, 7834), True, 'import numpy as np\n'), ((8153, 8215), 'numpy.sum', 'np.sum', (['(((line[0] - ellipsoid_center) / ellipsoid_radius) ** 2)'], {}), '(((line[0] - ellipsoid_center) / ellipsoid_radius) ** 2)\n', (8159, 8215), True, 'import numpy as np\n'), ((8381, 8444), 'numpy.sum', 'np.sum', (['(((line[-1] - ellipsoid_center) / ellipsoid_radius) ** 2)'], {}), '(((line[-1] - ellipsoid_center) / ellipsoid_radius) ** 2)\n', (8387, 8444), True, 'import numpy as np\n'), ((8921, 8975), 'numpy.union1d', 'np.union1d', (['line_based_indices_1', 'line_based_indices_2'], {}), '(line_based_indices_1, line_based_indices_2)\n', (8931, 8975), True, 'import numpy as np\n'), ((9211, 9243), 'numpy.unique', 'np.unique', (['selected_by_ellipsoid'], {}), '(selected_by_ellipsoid)\n', (9220, 9243), True, 'import numpy as np\n'), ((9270, 9303), 'numpy.asarray', 'np.asarray', (['selected_by_ellipsoid'], {}), '(selected_by_ellipsoid)\n', (9280, 9303), True, 'import numpy as np\n'), ((11512, 11549), 'dipy.tracking.streamline.set_number_of_points', 'set_number_of_points', (['line', 'nb_points'], {}), '(line, nb_points)\n', (11532, 11549), False, 'from dipy.tracking.streamline import set_number_of_points\n'), ((12946, 13000), 'numpy.union1d', 'np.union1d', (['line_based_indices_1', 'line_based_indices_2'], {}), '(line_based_indices_1, line_based_indices_2)\n', (12956, 13000), True, 'import numpy as np\n'), ((13227, 13256), 'numpy.unique', 'np.unique', (['selected_by_cuboid'], {}), '(selected_by_cuboid)\n', (13236, 13256), True, 'import numpy as np\n'), ((13283, 13313), 'numpy.asarray', 'np.asarray', (['selected_by_cuboid'], {}), '(selected_by_cuboid)\n', (13293, 13313), True, 'import numpy as np\n'), ((1048, 1090), 'numpy.where', 'np.where', (['(streamlines_case == [0, 1][True])'], {}), '(streamlines_case == [0, 1][True])\n', (1056, 1090), True, 'import numpy as np\n'), ((2812, 2866), 'numpy.union1d', 'np.union1d', (['line_based_indices_1', 'line_based_indices_2'], {}), '(line_based_indices_1, line_based_indices_2)\n', (2822, 2866), True, 'import numpy as np\n'), ((11581, 11609), 'numpy.abs', 'np.abs', (['(line - cuboid_center)'], {}), '(line - cuboid_center)\n', (11587, 11609), True, 'import numpy as np\n'), ((11664, 11701), 'numpy.where', 'np.where', (['(points_in_cuboid <= 1)', '(1)', '(0)'], {}), '(points_in_cuboid <= 1, 1, 0)\n', (11672, 11701), True, 'import numpy as np\n'), ((12110, 12141), 'numpy.abs', 'np.abs', (['(line[0] - cuboid_center)'], {}), '(line[0] - cuboid_center)\n', (12116, 12141), True, 'import numpy as np\n'), ((12196, 12233), 'numpy.where', 'np.where', (['(points_in_cuboid <= 1)', '(1)', '(0)'], {}), '(points_in_cuboid <= 1, 1, 0)\n', (12204, 12233), True, 'import numpy as np\n'), ((12375, 12407), 'numpy.abs', 'np.abs', (['(line[-1] - cuboid_center)'], {}), '(line[-1] - cuboid_center)\n', (12381, 12407), True, 'import numpy as np\n'), ((12462, 12499), 'numpy.where', 'np.where', (['(points_in_cuboid <= 1)', '(1)', '(0)'], {}), '(points_in_cuboid <= 1, 1, 0)\n', (12470, 12499), True, 'import numpy as np\n'), ((7883, 7920), 'numpy.argwhere', 'np.argwhere', (['(points_in_ellipsoid <= 1)'], {}), '(points_in_ellipsoid <= 1)\n', (7894, 7920), True, 'import numpy as np\n'), ((11765, 11799), 'numpy.argwhere', 'np.argwhere', (['(points_in_cuboid == 3)'], {}), '(points_in_cuboid == 3)\n', (11776, 11799), True, 'import numpy as np\n'), ((7635, 7647), 'dipy.tracking.metrics.length', 'length', (['line'], {}), '(line)\n', (7641, 7647), False, 'from dipy.tracking.metrics import length\n'), ((7650, 7665), 'numpy.average', 'np.average', (['res'], {}), '(res)\n', (7660, 7665), True, 'import numpy as np\n'), ((11452, 11464), 'dipy.tracking.metrics.length', 'length', (['line'], {}), '(line)\n', (11458, 11464), False, 'from dipy.tracking.metrics import length\n'), ((11467, 11482), 'numpy.average', 'np.average', (['res'], {}), '(res)\n', (11477, 11482), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file is part of the complex_terrain algorithm
<NAME>, <NAME>, <NAME> (IGE, CEN).
"""
import numpy as np
from astropy.convolution import convolve
class Cases:
"""Terrain configuration class.
The class allows the user to set a specific terrain configuration to run
the forward iterative model. The configurations are divided into 5
scenarii that set different options (see the class method for more
information).
Attributes:
flat_terrain (bool): Switch for flat terrain.
atm2sensor (bool): Switch for the atmospheric intrinsic radiance.
terrain_contribution (bool): Switch for terrain reillumination.
atm_coupl (bool): Switch for the coupled atmospheric neighbor effects.
"""
def __init__(self):
self.flat_terrain = bool
self.atm2sensor = bool
self.terrain_contribution = bool
self.atm_coupl = bool
def create_cases(self, case_number):
"""Set the terrain configuration.
The five possible terrain configurations allow to configure the boolean
switches which populate the class. Case 1: consider a flat terrain
only (no slopes, terrain effects, or atmosphere). Case 2: consider
slopes only (no terrain effects or atmosphere). Case 3: consider
slopes and terrain effects (reillumination), but no coupling with the
atmosphere. Case 4: consider slopes, terrain effects and coupling with
the atmosphere but no atmospheric intrinsic radiance. Case 5: full
rugged terrain effects (slopes, reillumination, atmospheric coupling,
atmosphere intrinsic radiance).
Args:
case_number: The first parameter.
Raises:
ValueError: If 'case_number' is not between 1 and 5.
"""
if case_number == 1:
# Case 1: flat terrain, no terrain contribution
self.flat_terrain = True
self.atm2sensor = False
self.terrain_contribution = False
self.atm_coupl = False
elif case_number == 2:
# Case 3: tilted terrain, no terrain contribution
self.flat_terrain = False
self.atm2sensor = False
self.terrain_contribution = False
self.atm_coupl = False
elif case_number == 3:
# Case 3: tilted terrain, no atmospheric coupling
self.flat_terrain = False
self.atm2sensor = False
self.terrain_contribution = True
self.atm_coupl = False
elif case_number == 4:
# Case : tilted terrain, coupling and terrain reflected
self.flat_terrain = False
self.atm2sensor = False
self.terrain_contribution = True
self.atm_coupl = True
elif case_number == 5:
# Case 5: tilted terrain, coupling and terrain reflected, and
# atmosphere intrinsic radiance
self.flat_terrain = False
self.atm2sensor = True
self.terrain_contribution = True
self.atm_coupl = True
else:
raise ValueError("Case number not available")
def iterative_radiance(
topo_bands,
angles,
wavelength,
hdr,
bhr,
rt_model,
rt_options,
brf,
case,
tw=5,
aw=7,
dif_anis=False,
):
"""Iterative solver for TOA radiance
"""
# Build averaging windows used in convolution
terrain_window = np.full((tw, tw), 1 / tw ** 2) # Reflected terrain
atmosphere_window = np.full((aw, aw), 1 / aw ** 2) # Reflected atmosphere
# Calculate the cosine of the SZA and effective SZA
cos_sza = np.cos(angles["SZA"].data)
cos_sza_eff = np.cos(topo_bands["eff_sza"].data)
# Set negative values to 0
cos_sza_eff = np.where(cos_sza_eff < 0, 0, cos_sza_eff)
# Calculate the cosine of effective viewing angle
cos_vza = np.cos(angles["VZA"].data)
cos_vza_eff = np.cos(topo_bands["eff_vza"].data)
# Calculate the pixels viewed by the satellite (not hidden)
view_ground = np.where(cos_vza_eff <= 0, 0, cos_vza_eff)
# Set - values to 0
view_ground = np.where(view_ground > 0, 1, view_ground) # >0 = 1
# Use convolution to obtain the mean terrain configuration factor
ct_nbh = convolve(topo_bands["ct"].data, terrain_window,
boundary="fill", fill_value=np.nan)
# Shadows, in this product bb = 0 if a shadow, 1 if no shadow
bb = topo_bands["all_shadows"]
# Shadows in the neighborhood
bb_nbh = convolve(bb, terrain_window, boundary="fill", fill_value=np.nan)
# Run the iterative reflectance for all bands
print("Running complex terrain reflectance calculation"
" for band: %s nm" % wavelength)
# Initialise environmental reflectance, terrain reflected irradiance
# iterative reflectance, and iterative radiance.
# The bi-hemispherical reflectance (spherical albedo) calculated from
# in-situ SSA measurements is used to initialize both the
# environmental and terrain reflectance.
# The environemental reflectance is averaged over 3X3 pixels (diam =
# 900 m) and terrain reflectance over 7x7 pixels (diam = 2.1 km)
re = convolve(bhr, atmosphere_window, boundary="extend",)
re = np.expand_dims(re, axis=2)
rt = convolve(bhr, terrain_window, boundary="extend",)
rt = np.expand_dims(rt, axis=2)
r = np.zeros_like(bhr)
r = np.expand_dims(r, axis=2)
l_toa = np.zeros_like(bhr)
l_toa = np.expand_dims(l_toa, axis=2)
# Call the radiative transfer class
rt_model.run(angles["SZA"].data.mean(),
angles["SAA"].data.mean(),
angles["VZA"].data.mean(),
angles["VAA"].data.mean(),
wavelength,
np.nanmean(topo_bands["altitude"].data),
rt_options["aerosol_model"],
aod=rt_options["aod"],
refl=bhr.mean(),
water=rt_options["water"],
ozone=rt_options["ozone"],
atmo=rt_options["atmo_model"],
atcor=rt_options["atcor"],
)
atmospheric_data = rt_model.outputs
rt_model.run(angles["VZA"].data.mean(),
angles["VAA"].data.mean(),
angles["SZA"].data.mean(),
angles["SAA"].data.mean(),
wavelength,
np.nanmean(topo_bands["altitude"].data),
rt_options["aerosol_model"],
aod=rt_options["aod"],
refl=bhr.mean(),
water=rt_options["water"],
ozone=rt_options["ozone"],
atmo=rt_options["atmo_model"],
atcor=rt_options["atcor"],
)
atmospheric_inv_data = rt_model.outputs
# Get the variables of interest from the 6s run
# Downward direct solar flux attenuated by the atmosphere
# e_dir = mu_s * E_s * e{-tau/mu_s}
EdP_flat = atmospheric_data.direct_solar_irradiance
# SOlar flux
Eo = atmospheric_data.solar_spectrum
# For viewing direction
EdP_thetav = atmospheric_inv_data.direct_solar_irradiance
# Direct atmospheric transmittance in illumination direction
T_dir_dn = EdP_flat / (Eo * cos_sza)
# Direct atmospheric transmittance in viewing direction
T_dir_up = EdP_thetav / (Eo * cos_vza)
# Downward diffuse solar flux for a flat surface:
EhP_flat = atmospheric_data.diffuse_solar_irradiance
# Downward diffuse solar flux for a flat surface: viewing direction
EhP_flat_thetav = atmospheric_inv_data.diffuse_solar_irradiance
# Total downward flux for a flat surface
EtP_flat = EdP_flat + EhP_flat
# Atmospheric spherical albedo
rs = atmospheric_data.spherical_albedo.total
# Atmospheric path radiance
LtA = atmospheric_data.atmospheric_intrinsic_radiance
# Atmospheric diffuse transmittance. In viewing direction
td = EhP_flat_thetav / (Eo * cos_vza)
# Flat or tilted terrain options
if case.flat_terrain:
# Direct solar irradiance at surface
EdP = EdP_flat # Same quantity as 6S
# Sky view factor
vd = 1
else: # tilted terrain
EdP = bb * Eo * cos_sza_eff * T_dir_dn
# Sky view factor: with or without anisotropy of diffuse irradiance
# at grazing angles
if not dif_anis:
# Assume isotropic irradiance, eq. 15
vd = topo_bands["vt"]
else:
# Account for anisotropy of diffuse irradiance at grazing
# angles, eq. 16
vd = bb * T_dir_dn * (cos_sza_eff / cos_sza) + (
1 - bb * T_dir_dn * topo_bands["vt"])
# Contribution of the neighbouring slopes to the satellite signal
# Set to zero (modified in iteration for full run)
# LtNa = LtGA + LtGGA + LtGAGA
LtNA = 0
# Direct radiation reflected by pixel to the satellite sensor
# (eq. 2)
LdP = view_ground * (brf / np.pi) * EdP * T_dir_up
# Iterative calculation
# Initialise the convergence and iterator
l_difference = 1
i = 1
while l_difference > 1e-3:
print("Iteration number: %s" % i)
# Atmospheric contribution to the pixel (multple reflections),
# eq. 20
# e_flat_ground = e_dir + e_diff_flat; s_atm = rho_s in Modimlab
# R(k-1)(M)dSm = rho_e
EtGAP = EtP_flat * ((re[:, :, i - 1] * rs) /
(1 - re[:, :, i - 1] * rs))
# Atmospheric coupling switch
if case.atm_coupl:
EtP = EtP_flat + EtGAP
else:
EtP = EtP_flat
# Terrain contribution switch
if case.terrain_contribution:
# Add shadow off option in eq. 18
shad = bb_nbh # Disable reillumination from shadows
# # Terrain re-illumination, eq. 18 substituted by eq. 11 from
# # Sirguey et al. 2011
# Terrain reflected irradiance
EtGP = EtP * (rt[:, :, i - 1] * shad * topo_bands["ct"]) / (
1 - rt[:, :, i - 1] * shad * ct_nbh)
else:
EtGP = 0
# Combine parts of the total diffuse irradiance incoming at the
# surface of the pixel, built from eq. 3 (rewritten based on grey
# recaps)
EhP = EhP_flat * vd + EtGP
# If considering the neighbouring slopes contributions directly to
# the satellite signal, "l_dif_dir" and "l_dif_ref_coupl_dif"
# become != 0
if case.atm2sensor:
# LtNA = LtGA + LtGGA + LtGAGA, eq. 23
LtNA = (td * re[:, :, i - 1] *
(EdP_flat + EhP_flat + EtGAP)) / np.pi
# Diffuse radiation reflected by pixel to the satellite sensor
# Eq. 4
LhP = view_ground * (hdr / np.pi) * EhP * T_dir_up
# TOA radiance (equation 1)
l_total = LdP + LhP + LtNA + LtA
# Update the surface hemispherical-conical reflectance
r_current_dividend = np.pi * (l_total - LtNA - LtA)
r_current_divisor = T_dir_up * view_ground * (EdP + EhP)
r_currentstep = np.divide(r_current_dividend, r_current_divisor,
out=np.zeros_like(r_current_dividend),
where=r_current_divisor != 0)
# Update the reflectance stack
r = np.dstack((r, r_currentstep))
# Apply the averaging of the updated reflectance
re_i = convolve(r[:, :, i], atmosphere_window, boundary="extend")
re = np.dstack((re, re_i))
rt_i = convolve(r[:, :, i], terrain_window, boundary="extend")
rt = np.dstack((rt, rt_i))
# Update the radiance stack
l_toa = np.dstack((l_toa, l_total))
# Update the convergence indicator (removing the edges)
l_difference = np.abs(
np.nanmean(l_toa[2:-2, 2:-2, i] - l_toa[2:-2, 2:-2, i - 1])
)
print(
"radiance difference with previous"
" iteration = %s" % l_difference
)
i += 1
return l_toa[:, :, -1]
| [
"numpy.full",
"astropy.convolution.convolve",
"numpy.dstack",
"numpy.zeros_like",
"numpy.expand_dims",
"numpy.where",
"numpy.cos",
"numpy.nanmean"
] | [((3522, 3552), 'numpy.full', 'np.full', (['(tw, tw)', '(1 / tw ** 2)'], {}), '((tw, tw), 1 / tw ** 2)\n', (3529, 3552), True, 'import numpy as np\n'), ((3598, 3628), 'numpy.full', 'np.full', (['(aw, aw)', '(1 / aw ** 2)'], {}), '((aw, aw), 1 / aw ** 2)\n', (3605, 3628), True, 'import numpy as np\n'), ((3724, 3750), 'numpy.cos', 'np.cos', (["angles['SZA'].data"], {}), "(angles['SZA'].data)\n", (3730, 3750), True, 'import numpy as np\n'), ((3769, 3803), 'numpy.cos', 'np.cos', (["topo_bands['eff_sza'].data"], {}), "(topo_bands['eff_sza'].data)\n", (3775, 3803), True, 'import numpy as np\n'), ((3853, 3894), 'numpy.where', 'np.where', (['(cos_sza_eff < 0)', '(0)', 'cos_sza_eff'], {}), '(cos_sza_eff < 0, 0, cos_sza_eff)\n', (3861, 3894), True, 'import numpy as np\n'), ((3964, 3990), 'numpy.cos', 'np.cos', (["angles['VZA'].data"], {}), "(angles['VZA'].data)\n", (3970, 3990), True, 'import numpy as np\n'), ((4009, 4043), 'numpy.cos', 'np.cos', (["topo_bands['eff_vza'].data"], {}), "(topo_bands['eff_vza'].data)\n", (4015, 4043), True, 'import numpy as np\n'), ((4127, 4169), 'numpy.where', 'np.where', (['(cos_vza_eff <= 0)', '(0)', 'cos_vza_eff'], {}), '(cos_vza_eff <= 0, 0, cos_vza_eff)\n', (4135, 4169), True, 'import numpy as np\n'), ((4212, 4253), 'numpy.where', 'np.where', (['(view_ground > 0)', '(1)', 'view_ground'], {}), '(view_ground > 0, 1, view_ground)\n', (4220, 4253), True, 'import numpy as np\n'), ((4348, 4436), 'astropy.convolution.convolve', 'convolve', (["topo_bands['ct'].data", 'terrain_window'], {'boundary': '"""fill"""', 'fill_value': 'np.nan'}), "(topo_bands['ct'].data, terrain_window, boundary='fill', fill_value\n =np.nan)\n", (4356, 4436), False, 'from astropy.convolution import convolve\n'), ((4603, 4667), 'astropy.convolution.convolve', 'convolve', (['bb', 'terrain_window'], {'boundary': '"""fill"""', 'fill_value': 'np.nan'}), "(bb, terrain_window, boundary='fill', fill_value=np.nan)\n", (4611, 4667), False, 'from astropy.convolution import convolve\n'), ((5281, 5332), 'astropy.convolution.convolve', 'convolve', (['bhr', 'atmosphere_window'], {'boundary': '"""extend"""'}), "(bhr, atmosphere_window, boundary='extend')\n", (5289, 5332), False, 'from astropy.convolution import convolve\n'), ((5343, 5369), 'numpy.expand_dims', 'np.expand_dims', (['re'], {'axis': '(2)'}), '(re, axis=2)\n', (5357, 5369), True, 'import numpy as np\n'), ((5380, 5428), 'astropy.convolution.convolve', 'convolve', (['bhr', 'terrain_window'], {'boundary': '"""extend"""'}), "(bhr, terrain_window, boundary='extend')\n", (5388, 5428), False, 'from astropy.convolution import convolve\n'), ((5439, 5465), 'numpy.expand_dims', 'np.expand_dims', (['rt'], {'axis': '(2)'}), '(rt, axis=2)\n', (5453, 5465), True, 'import numpy as np\n'), ((5475, 5493), 'numpy.zeros_like', 'np.zeros_like', (['bhr'], {}), '(bhr)\n', (5488, 5493), True, 'import numpy as np\n'), ((5502, 5527), 'numpy.expand_dims', 'np.expand_dims', (['r'], {'axis': '(2)'}), '(r, axis=2)\n', (5516, 5527), True, 'import numpy as np\n'), ((5541, 5559), 'numpy.zeros_like', 'np.zeros_like', (['bhr'], {}), '(bhr)\n', (5554, 5559), True, 'import numpy as np\n'), ((5572, 5601), 'numpy.expand_dims', 'np.expand_dims', (['l_toa'], {'axis': '(2)'}), '(l_toa, axis=2)\n', (5586, 5601), True, 'import numpy as np\n'), ((5865, 5904), 'numpy.nanmean', 'np.nanmean', (["topo_bands['altitude'].data"], {}), "(topo_bands['altitude'].data)\n", (5875, 5904), True, 'import numpy as np\n'), ((6489, 6528), 'numpy.nanmean', 'np.nanmean', (["topo_bands['altitude'].data"], {}), "(topo_bands['altitude'].data)\n", (6499, 6528), True, 'import numpy as np\n'), ((11465, 11494), 'numpy.dstack', 'np.dstack', (['(r, r_currentstep)'], {}), '((r, r_currentstep))\n', (11474, 11494), True, 'import numpy as np\n'), ((11568, 11626), 'astropy.convolution.convolve', 'convolve', (['r[:, :, i]', 'atmosphere_window'], {'boundary': '"""extend"""'}), "(r[:, :, i], atmosphere_window, boundary='extend')\n", (11576, 11626), False, 'from astropy.convolution import convolve\n'), ((11640, 11661), 'numpy.dstack', 'np.dstack', (['(re, re_i)'], {}), '((re, re_i))\n', (11649, 11661), True, 'import numpy as np\n'), ((11677, 11732), 'astropy.convolution.convolve', 'convolve', (['r[:, :, i]', 'terrain_window'], {'boundary': '"""extend"""'}), "(r[:, :, i], terrain_window, boundary='extend')\n", (11685, 11732), False, 'from astropy.convolution import convolve\n'), ((11746, 11767), 'numpy.dstack', 'np.dstack', (['(rt, rt_i)'], {}), '((rt, rt_i))\n', (11755, 11767), True, 'import numpy as np\n'), ((11821, 11848), 'numpy.dstack', 'np.dstack', (['(l_toa, l_total)'], {}), '((l_toa, l_total))\n', (11830, 11848), True, 'import numpy as np\n'), ((11957, 12016), 'numpy.nanmean', 'np.nanmean', (['(l_toa[2:-2, 2:-2, i] - l_toa[2:-2, 2:-2, i - 1])'], {}), '(l_toa[2:-2, 2:-2, i] - l_toa[2:-2, 2:-2, i - 1])\n', (11967, 12016), True, 'import numpy as np\n'), ((11314, 11347), 'numpy.zeros_like', 'np.zeros_like', (['r_current_dividend'], {}), '(r_current_dividend)\n', (11327, 11347), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import sys
import argparse
import datetime as dt
import numpy as np
import pandas as pd
import hyperopt as hp
from scipy import stats
from scipy.spatial.distance import hamming
import dimod
from dwave.system import EmbeddingComposite, FixedEmbeddingComposite, TilingComposite, DWaveSampler
from quantum_unfolding import compact_vector, laplacian, R0, get_embedding_with_short_chain, get_energy, anneal_sched_custom, merge_substates
np.set_printoptions(precision=1, linewidth=200, suppress=True)
parser = argparse.ArgumentParser("Quantum unfolding optimizer")
parser.add_argument('-n', '--max_evals', default=20)
parser.add_argument('-d', '--dry-run', action='store_true', default=False)
args = parser.parse_args()
hardware_sampler = DWaveSampler()
# parameters to be optimized
lmbd = 0. # regularization strength
nreads = 10 # number of reads
n = 4 # number of bits
# constants
N = x.shape[0]
D = laplacian(N)
def objective(args):
lmdb = args['lmbd']
num_reads = args['num_reads']
n = args['num_bits']
annealing_time = args['annealing_time']
x_b = discretize_vector(x, n)
z_b = discretize_vector(z, n)
d_b = discretize_vector(d, n)
R_b = discretize_matrix(R0, n)
D_b = discretize_matrix(D, n)
# Create QUBO operator
#Q = np.zeros([n*N, n*N])
S = {}
h = {}
J = {}
# linear constraints
for j in range(n * N):
h[(j)] = 0
for i in range(N):
h[(j)] += (R_b[i][j] * R_b[i][j] - 2 * R_b[i][j] * d[i] +
lmbd * D_b[i][j] * D_b[i][j])
S[(j, j)] = h[(j)]
# quadratic constraints
for j in range(n * N):
for k in range(j + 1, n * N):
J[(j, k)] = 0
for i in range(N):
J[(j, k)] += 2 * (R_b[i][j] * R_b[i][k] +
lmbd * D_b[i][j] * D_b[i][k])
S[(j, k)] = J[(j, k)]
bqm = dimod.BinaryQuadraticModel(linear=h,
quadratic=J,
offset=0.0,
vartype=dimod.BINARY)
embedding = get_embedding_with_short_chain(
S, tries=5, processor=hardware_sampler.edgelist, verbose=False)
sampler = FixedEmbeddingComposite(hardware_sampler, embedding)
solver_parameters = {
'num_reads': num_reads,
'auto_scale': True,
'annealing_time': annealing_time, # default: 20 us
#'anneal_schedule': anneal_sched_custom(id=3),
'num_spin_reversal_transforms': 2, # default: 2
}
results = sampler.sample(bqm, **solver_parameters).aggregate()
best_fit = results.first
energy_bestfit = best_fit.energy
q = np.array(list(best_fit.sample.values()))
y = compact_vector(q, n)
dof = N - 1
chi2, p = stats.chisquare(y, z, dof)
chi2dof = chi2 / float(dof)
hamm = hamming(z_b, q)
return {
'loss': hamm, # chi2dof,
'status': hp.STATUS_OK,
'diffxs': y,
'q': q,
'hamming': hamm,
'lmbd': lmbd,
'num_reads': num_reads,
'num_bits': n,
'annealing_time': annealing_time,
}
max_evals = int(args.max_evals)
space = hp.hp.choice(
'unfolder',
[{
'lmbd': hp.hp.choice('lmbd', [0.0, 0.5, 1.0]),
'num_reads': hp.hp.choice('num_reads', [100, 500, 1000]),
'num_bits': hp.hp.choice('num_bits', [4, 8]),
'annealing_time': hp.hp.choice('annealing_time', [20, 50, 100]),
}])
tpe_algo = hp.tpe.suggest
tpe_trials = hp.Trials()
bestfit = hp.fmin(fn=objective,
space=space,
algo=tpe_algo,
trials=tpe_trials,
max_evals=max_evals)
print(bestfit)
for trial in tpe_trials:
print("Trial:")
print(trial)
print(" --- ")
# results = pd.DataFrame({'loss': [r['loss'] for r in tpe_trials.results],
# 'iteration': tpe_trials.idxs_vals[0]['x'],
# 'x': tpe_trials.idxs_vals[1]['x']}
# )
# print(results.head())
| [
"numpy.set_printoptions",
"argparse.ArgumentParser",
"dwave.system.DWaveSampler",
"quantum_unfolding.get_embedding_with_short_chain",
"quantum_unfolding.compact_vector",
"hyperopt.hp.choice",
"hyperopt.fmin",
"scipy.spatial.distance.hamming",
"dimod.BinaryQuadraticModel",
"hyperopt.Trials",
"qua... | [((468, 530), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(1)', 'linewidth': '(200)', 'suppress': '(True)'}), '(precision=1, linewidth=200, suppress=True)\n', (487, 530), True, 'import numpy as np\n'), ((541, 595), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Quantum unfolding optimizer"""'], {}), "('Quantum unfolding optimizer')\n", (564, 595), False, 'import argparse\n'), ((771, 785), 'dwave.system.DWaveSampler', 'DWaveSampler', ([], {}), '()\n', (783, 785), False, 'from dwave.system import EmbeddingComposite, FixedEmbeddingComposite, TilingComposite, DWaveSampler\n'), ((940, 952), 'quantum_unfolding.laplacian', 'laplacian', (['N'], {}), '(N)\n', (949, 952), False, 'from quantum_unfolding import compact_vector, laplacian, R0, get_embedding_with_short_chain, get_energy, anneal_sched_custom, merge_substates\n'), ((3553, 3564), 'hyperopt.Trials', 'hp.Trials', ([], {}), '()\n', (3562, 3564), True, 'import hyperopt as hp\n'), ((3575, 3668), 'hyperopt.fmin', 'hp.fmin', ([], {'fn': 'objective', 'space': 'space', 'algo': 'tpe_algo', 'trials': 'tpe_trials', 'max_evals': 'max_evals'}), '(fn=objective, space=space, algo=tpe_algo, trials=tpe_trials,\n max_evals=max_evals)\n', (3582, 3668), True, 'import hyperopt as hp\n'), ((1933, 2021), 'dimod.BinaryQuadraticModel', 'dimod.BinaryQuadraticModel', ([], {'linear': 'h', 'quadratic': 'J', 'offset': '(0.0)', 'vartype': 'dimod.BINARY'}), '(linear=h, quadratic=J, offset=0.0, vartype=dimod\n .BINARY)\n', (1959, 2021), False, 'import dimod\n'), ((2145, 2244), 'quantum_unfolding.get_embedding_with_short_chain', 'get_embedding_with_short_chain', (['S'], {'tries': '(5)', 'processor': 'hardware_sampler.edgelist', 'verbose': '(False)'}), '(S, tries=5, processor=hardware_sampler.\n edgelist, verbose=False)\n', (2175, 2244), False, 'from quantum_unfolding import compact_vector, laplacian, R0, get_embedding_with_short_chain, get_energy, anneal_sched_custom, merge_substates\n'), ((2264, 2316), 'dwave.system.FixedEmbeddingComposite', 'FixedEmbeddingComposite', (['hardware_sampler', 'embedding'], {}), '(hardware_sampler, embedding)\n', (2287, 2316), False, 'from dwave.system import EmbeddingComposite, FixedEmbeddingComposite, TilingComposite, DWaveSampler\n'), ((2771, 2791), 'quantum_unfolding.compact_vector', 'compact_vector', (['q', 'n'], {}), '(q, n)\n', (2785, 2791), False, 'from quantum_unfolding import compact_vector, laplacian, R0, get_embedding_with_short_chain, get_energy, anneal_sched_custom, merge_substates\n'), ((2823, 2849), 'scipy.stats.chisquare', 'stats.chisquare', (['y', 'z', 'dof'], {}), '(y, z, dof)\n', (2838, 2849), False, 'from scipy import stats\n'), ((2894, 2909), 'scipy.spatial.distance.hamming', 'hamming', (['z_b', 'q'], {}), '(z_b, q)\n', (2901, 2909), False, 'from scipy.spatial.distance import hamming\n'), ((3273, 3310), 'hyperopt.hp.choice', 'hp.hp.choice', (['"""lmbd"""', '[0.0, 0.5, 1.0]'], {}), "('lmbd', [0.0, 0.5, 1.0])\n", (3285, 3310), True, 'import hyperopt as hp\n'), ((3333, 3376), 'hyperopt.hp.choice', 'hp.hp.choice', (['"""num_reads"""', '[100, 500, 1000]'], {}), "('num_reads', [100, 500, 1000])\n", (3345, 3376), True, 'import hyperopt as hp\n'), ((3398, 3430), 'hyperopt.hp.choice', 'hp.hp.choice', (['"""num_bits"""', '[4, 8]'], {}), "('num_bits', [4, 8])\n", (3410, 3430), True, 'import hyperopt as hp\n'), ((3458, 3503), 'hyperopt.hp.choice', 'hp.hp.choice', (['"""annealing_time"""', '[20, 50, 100]'], {}), "('annealing_time', [20, 50, 100])\n", (3470, 3503), True, 'import hyperopt as hp\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file defines a gaussian noise filter that can be used in experiments and evaluations.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020, <NAME>"
__license__ = "MIT License"
__version__ = "1.0.0"
import numpy as np
from skimage.util import random_noise
from skopt.space import Real
class GaussianNoiseFilter(object):
"""
This filter will apply a gaussian noise transformation with the indicated mean and variance.
"""
def __init__(self, random_state):
self.random_state = random_state
@staticmethod
def get_default_dimensions():
return [0.0, 0.05]
@staticmethod
def get_dimensions():
return [
Real(low=-0.25, high=0.25, name='mean'),
Real(low=0.01, high=0.1, name='var')
]
def transform_image(self, dimensions, image):
mean, var = dimensions
previous = np.random.get_state()
result = np.clip(random_noise(np.clip(image/255., 0., 1.), mode='gaussian',
seed=self.random_state, mean=mean, var=var)*255., 0., 255.)
np.random.set_state(previous)
return result
| [
"numpy.random.get_state",
"skopt.space.Real",
"numpy.clip",
"numpy.random.set_state"
] | [((958, 979), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (977, 979), True, 'import numpy as np\n'), ((1170, 1199), 'numpy.random.set_state', 'np.random.set_state', (['previous'], {}), '(previous)\n', (1189, 1199), True, 'import numpy as np\n'), ((757, 796), 'skopt.space.Real', 'Real', ([], {'low': '(-0.25)', 'high': '(0.25)', 'name': '"""mean"""'}), "(low=-0.25, high=0.25, name='mean')\n", (761, 796), False, 'from skopt.space import Real\n'), ((810, 846), 'skopt.space.Real', 'Real', ([], {'low': '(0.01)', 'high': '(0.1)', 'name': '"""var"""'}), "(low=0.01, high=0.1, name='var')\n", (814, 846), False, 'from skopt.space import Real\n'), ((1018, 1050), 'numpy.clip', 'np.clip', (['(image / 255.0)', '(0.0)', '(1.0)'], {}), '(image / 255.0, 0.0, 1.0)\n', (1025, 1050), True, 'import numpy as np\n')] |
import glob
import os
import re
import subprocess
import numpy as np
for L in [600, 800, 1000, 1200, 1400]:
# Make folder to save omf files
SAVE_FOLDER = 'omfs_L{}nm_t200nm'.format(L)
if os.path.exists(SAVE_FOLDER):
os.rmdir(SAVE_FOLDER)
os.mkdir(SAVE_FOLDER)
# for Bz in np.arange(0.05, 0.26, 0.01):
for Bz in np.arange(40, 331, 10):
subprocess.call(('oommf boxsi -threads 6 '
'-parameters '
'"'
'Lx {0}e-9 '
'Ly {0}e-9 '
'Lz 200e-9 '
'Bz {1}e-3 '
'BASENAME '
'{2}/typeII_bubble_Bz{1:03d}mT_field-sweep'
'" '
'oommf_isolated_typeII_bubble.mif').format(int(L),
int(Bz),
SAVE_FOLDER),
shell=True)
| [
"os.rmdir",
"os.mkdir",
"os.path.exists",
"numpy.arange"
] | [((201, 228), 'os.path.exists', 'os.path.exists', (['SAVE_FOLDER'], {}), '(SAVE_FOLDER)\n', (215, 228), False, 'import os\n'), ((264, 285), 'os.mkdir', 'os.mkdir', (['SAVE_FOLDER'], {}), '(SAVE_FOLDER)\n', (272, 285), False, 'import os\n'), ((346, 368), 'numpy.arange', 'np.arange', (['(40)', '(331)', '(10)'], {}), '(40, 331, 10)\n', (355, 368), True, 'import numpy as np\n'), ((238, 259), 'os.rmdir', 'os.rmdir', (['SAVE_FOLDER'], {}), '(SAVE_FOLDER)\n', (246, 259), False, 'import os\n')] |
import argparse
import torch
import numpy as np
import pandas as pd
import torch.nn.functional as F
def main(args):
embedding_file = args.embedding_file
k = args.k
player = args.player
players_file = args.players_file
df = pd.read_csv(players_file)
embeddings = torch.from_numpy(np.load(embedding_file))
x = embeddings[player]
xs = x.repeat(len(embeddings), 1)
distances = F.cosine_similarity(xs, embeddings)
scores, indices = torch.topk(distances, k=k + 1)
sim_df = df.iloc[indices.tolist(), 2].reset_index()
sim_df['scores'] = scores.numpy()
print(sim_df)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='fifa dataset similarity search')
parser.add_argument('--k', type=int, required=False, default=5, help='Top K. Defaults: 5.')
parser.add_argument('--embedding_file', type=str, required=True, help='embedding file')
parser.add_argument('--player', type=int, help='player index')
parser.add_argument('--players_file', type=str, required=True, help='path to players.csv file')
parser.add_argument('--method', type=str, required=True, help='method used to generate embedding: (lrw or node2vec)', choices=['lrw', 'node2vec'])
args = parser.parse_args()
main(args)
| [
"numpy.load",
"torch.topk",
"argparse.ArgumentParser",
"pandas.read_csv",
"torch.nn.functional.cosine_similarity"
] | [((246, 271), 'pandas.read_csv', 'pd.read_csv', (['players_file'], {}), '(players_file)\n', (257, 271), True, 'import pandas as pd\n'), ((420, 455), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['xs', 'embeddings'], {}), '(xs, embeddings)\n', (439, 455), True, 'import torch.nn.functional as F\n'), ((478, 508), 'torch.topk', 'torch.topk', (['distances'], {'k': '(k + 1)'}), '(distances, k=k + 1)\n', (488, 508), False, 'import torch\n'), ((672, 741), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""fifa dataset similarity search"""'}), "(description='fifa dataset similarity search')\n", (695, 741), False, 'import argparse\n'), ((306, 329), 'numpy.load', 'np.load', (['embedding_file'], {}), '(embedding_file)\n', (313, 329), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import gym
import time
class OptimizationEnv(gym.Env):
def __init__(self, fn_name="Rastrigin", agent_mode=False, shuffle=False):
self.max_steps = 32
if "astrigi" in fn_name:
self.fn_name = "rastrigin"
elif "osenbro" in fn_name:
self.fn_name = "rosenbrock"
elif "atyas" in fn_name:
self.fn_name = "matyas"
elif "immmel" in fn_name:
self.fn_name= "himmelblau"
else:
self.fn_name = "quadratic"
self.fn_dict = {"rastrigin": [ lambda x,y:\
20 + (x**2 + y**2) - 10 * (np.cos(2*np.pi * x) + np.cos(2*np.pi*y)),\
-5.12, 5.12, -5.12, 5.12],\
"rosenbrock": [lambda x,y:\
(1-x)**2 + 100 * (y - x**2)**2,\
-4.0, 4.0, -4.0, 4.0],\
"matyas": [lambda x,y:\
0.26 * (x**2 + y**2) - 0.48 * x * y,\
-5.12, 5.12, -5.12, 5.12],\
"himmelblau": [lambda x,y:\
(x**2 + y - 11)**2 + (x + y**2 - 7)**2,
-6.0, 6.0, -6.0, 6.0],
"quadratic": [lambda x,y:\
x**2 + y**2,\
-5.12, 5.12, -5.12, 5.12]\
}
self.shuffle = shuffle
self.action_space = self.my_action_space()
self.observation_space = self.my_observation_space()
self.reset_actions()
class my_action_space():
def __init__(self):
self.range = [(-1.0, 1.0), (-1.0, 1.0)]
self.shape = (2,)
def sample(self):
return 2 * (np.random.random((2)) - 0.5)
def sample_many(self, n = 64):
return [ 2 * (np.random.random((2)) - 0.5) for ii in range(n)]
class my_observation_space():
def __init__(self):
self.range = (-1e5, 0.0)
self.max = 0.0
self.min = -1.e-5
self.shape = (1,)
def sample(self):
return self.min * (np.random.random((1)))
def sample_many(self, n = 64):
return [ self.min * (np.random.random((1))) for ii in range(n)]
def parse_coords(self, coords):
#min_x, max_x = self.fn_dict[self.fn_name][1], self.fn_dict[self.fn_name][2]
#min_y, max_y = self.fn_dict[self.fn_name][3], self.fn_dict[self.fn_name][4]
#coords = [np.tanh(elem) for elem in coords]
#coord_x = (max_x - min_x)/2 * (coords[0] ) + (max_x + min_x) / 2
#coord_y = (max_y - min_y)/2 * (coords[1] ) + (max_y + min_y) / 2
coord_x, coord_y = coords[0]+self.offset[0], coords[1]+self.offset[1]
return [coord_x, coord_y]
def reset(self):
self.steps = 0
if self.shuffle:
self.fn_name = np.random.choice(list(self.fn_dict.keys()), \
p=[1/len(self.fn_dict.keys()) for ii in range(len(self.fn_dict.keys()))])
self.offset = self.action_space.sample()
coord_x, coord_y = self.parse_coords([0.0, 0.0])
reward = -self.fn_dict[self.fn_name][0](coord_x, coord_y)
obs = np.append(reward, np.array([0.0,0.0]))
self.reset_actions()
self.action.append([coord_x, coord_y])
return obs
def reset_actions(self):
self.action = []
def step(self, action):
if type(action) == list and action[0].shape[0] == 2:
multi = True
else:
multi = False
info = {}
if multi:
info["num_actions"] = len(action)
reward = []
obs = []
for coords in action:
[coord_x, coord_y] = self.parse_coords(coords)
reward.append( - self.fn_dict[self.fn_name][0](coord_x, coord_y))
self.action.append([coord_x, coord_y])
obs.append(np.append(reward, coords))
else:
[coord_x, coord_y] = self.parse_coords(action)
reward = - self.fn_dict[self.fn_name][0](coord_x, coord_y)
self.action.append([coord_x, coord_y])
obs = np.append(reward, action)
info["avg_reward"] = np.mean(reward)
self.steps += 1
if self.steps >= self.max_steps:
done = True
else:
done = False
return obs, reward, done, info
def render(self, save_it=False, tag=0):
x = np.linspace(self.fn_dict[self.fn_name][1],self.fn_dict[self.fn_name][2],256)
y = np.linspace(self.fn_dict[self.fn_name][3],self.fn_dict[self.fn_name][4],256)
xx, yy = np.meshgrid(x,y)
plt.figure(figsize=(6,6))
plt.imshow(- self.fn_dict[self.fn_name][0](xx, -yy), extent=(self.fn_dict[self.fn_name][1],\
self.fn_dict[self.fn_name][2], \
self.fn_dict[self.fn_name][3], \
self.fn_dict[self.fn_name][4]),\
cmap="plasma")
my_contour = - self.fn_dict[self.fn_name][0](xx,yy)
plt.contour(xx,yy, my_contour,\
levels = [-elem for elem in \
np.logspace(np.log10(-np.min(my_contour)), np.log10(1e-3 + -np.max(my_contour)), 8)],\
cmap="twilight")
for coords in self.action:
plt.plot(coords[0], coords[1], "o", mfc=[0.85, 0.5, 0.0], markeredgecolor=[0.0,0.0,0.0])
plt.axis([self.fn_dict[self.fn_name][1], self.fn_dict[self.fn_name][2],\
self.fn_dict[self.fn_name][3], self.fn_dict[self.fn_name][4]])
if save_it:
plt.savefig("./results/indirect/cmaes_step{}_{}.png".format(tag, self.fn_name))
else:
plt.show()
plt.close()
if __name__ == "__main__":
# run tests
print("OK")
| [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"numpy.random.random",
"numpy.linspace",
"numpy.cos",
"numpy.min",
"numpy.max"
] | [((4238, 4253), 'numpy.mean', 'np.mean', (['reward'], {}), '(reward)\n', (4245, 4253), True, 'import numpy as np\n'), ((4496, 4574), 'numpy.linspace', 'np.linspace', (['self.fn_dict[self.fn_name][1]', 'self.fn_dict[self.fn_name][2]', '(256)'], {}), '(self.fn_dict[self.fn_name][1], self.fn_dict[self.fn_name][2], 256)\n', (4507, 4574), True, 'import numpy as np\n'), ((4585, 4663), 'numpy.linspace', 'np.linspace', (['self.fn_dict[self.fn_name][3]', 'self.fn_dict[self.fn_name][4]', '(256)'], {}), '(self.fn_dict[self.fn_name][3], self.fn_dict[self.fn_name][4], 256)\n', (4596, 4663), True, 'import numpy as np\n'), ((4680, 4697), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4691, 4697), True, 'import numpy as np\n'), ((4714, 4740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (4724, 4740), True, 'import matplotlib.pyplot as plt\n'), ((5467, 5605), 'matplotlib.pyplot.axis', 'plt.axis', (['[self.fn_dict[self.fn_name][1], self.fn_dict[self.fn_name][2], self.fn_dict\n [self.fn_name][3], self.fn_dict[self.fn_name][4]]'], {}), '([self.fn_dict[self.fn_name][1], self.fn_dict[self.fn_name][2],\n self.fn_dict[self.fn_name][3], self.fn_dict[self.fn_name][4]])\n', (5475, 5605), True, 'import matplotlib.pyplot as plt\n'), ((5777, 5788), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5786, 5788), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3230), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3218, 3230), True, 'import numpy as np\n'), ((4182, 4207), 'numpy.append', 'np.append', (['reward', 'action'], {}), '(reward, action)\n', (4191, 4207), True, 'import numpy as np\n'), ((5369, 5464), 'matplotlib.pyplot.plot', 'plt.plot', (['coords[0]', 'coords[1]', '"""o"""'], {'mfc': '[0.85, 0.5, 0.0]', 'markeredgecolor': '[0.0, 0.0, 0.0]'}), "(coords[0], coords[1], 'o', mfc=[0.85, 0.5, 0.0], markeredgecolor=[\n 0.0, 0.0, 0.0])\n", (5377, 5464), True, 'import matplotlib.pyplot as plt\n'), ((5757, 5767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5765, 5767), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2109), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (2106, 2109), True, 'import numpy as np\n'), ((1694, 1713), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (1710, 1713), True, 'import numpy as np\n'), ((2189, 2208), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (2205, 2208), True, 'import numpy as np\n'), ((3939, 3964), 'numpy.append', 'np.append', (['reward', 'coords'], {}), '(reward, coords)\n', (3948, 3964), True, 'import numpy as np\n'), ((1792, 1811), 'numpy.random.random', 'np.random.random', (['(2)'], {}), '(2)\n', (1808, 1811), True, 'import numpy as np\n'), ((659, 680), 'numpy.cos', 'np.cos', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (665, 680), True, 'import numpy as np\n'), ((681, 702), 'numpy.cos', 'np.cos', (['(2 * np.pi * y)'], {}), '(2 * np.pi * y)\n', (687, 702), True, 'import numpy as np\n'), ((5223, 5241), 'numpy.min', 'np.min', (['my_contour'], {}), '(my_contour)\n', (5229, 5241), True, 'import numpy as np\n'), ((5261, 5279), 'numpy.max', 'np.max', (['my_contour'], {}), '(my_contour)\n', (5267, 5279), True, 'import numpy as np\n')] |
import numpy as np
# pylint: disable=E1101
import matplotlib.pyplot as plt
import random
from math import floor, ceil
from logger import log
def genRoughFault(filename, mesh, surfacePts, centerPts, rseed, length, depth, lambdaMin, alpha, Hurst):
# Input parameters:
# filename: Filename for saving .ply file
# mesh: Boolian value for defining whether to create a mesh
# surfacePts: Empty array for saving the surface points (for creating an intersection)
# centerPts: Empty array for saving the points at central depth (for creating fault receivers)
# rseed: Seed for random number generator
# length: Length of the fault (in km)
# depth: Depth of the fault (in km)
# lambdaMin: Minimal wavelength (in km)
# alpha: Amplitude to wavelength ratio
# Hurst: Hurst exponent
random.seed(rseed)
L = max(length,depth)
lambdaMax = L
Ncutoff = int(L/lambdaMin)
# Factor 2 due to neg & pos. frequencies, refinement, + 1 due to frequency (0, 0)
refine = 4
N = refine * Ncutoff * 2 + 1
freqs = np.zeros(N * N, dtype=complex)
freqs.shape = (N, N)
beta = 2 * (Hurst + 1.)
for kx in range(0, Ncutoff+1):
for kz in range(0, Ncutoff+1):
# No frequency of 0 (constant value)
if max(kx,kz)==0:
continue
# Calculate desired amplitude
k = np.sqrt(kx**2 + kz**2)
fac = pow(k, -beta * 0.5)
# Calculate random phases and scale with desired amplitude
randPhase = random.random()*np.pi*2.
freqs[kx,kz]=fac*np.exp(randPhase*1.j)
# Copy conjugates to other quadrants, to get real values in spacial domain
if kx != 0:
freqs[N-kx, kz] = np.conjugate(freqs[kx,kz])
if kz != 0:
freqs[kx, N-kz] = np.conjugate(freqs[kx,kz])
if min(kx, kz) != 0:
freqs[N-kx, N-kz] = freqs[kx,kz]
Y = np.real(np.fft.ifft2(freqs))
dx=L/(N - 1) # -1 because of point (0,0)
x=np.arange(-length/2,length/2+dx,dx)
z=np.arange(0.,depth+dx,dx)
X, Z = np.meshgrid(x, z)
# Crop square to rectangle
Y=Y[0:len(z),0:len(x)]
# compute hrms roughness
hrms = np.std(Y)
#scale to targeted Hrms
target_hrms = alpha * lambdaMax
# log("hrms = {}, targeted hrms = {}".format(hrms, target_hrms))
Y = Y * target_hrms / hrms
# log("Corrected hrms: {}".format(np.std(Y)))
#for the following study
# freqs=freqs*target_hrms/hrms
# Show color map
# plt.pcolormesh(X,Z,Y)
# plt.colorbar()
# plt.show()
# Save surface points for intersection with box
for i in range(0, len(x)):
surfacePts.append([1e3 * x[i], 1e3 * Y[0,i]])
# Save points at central depth for creating fault receivers
ctrDepth = int(0.5 * len(z))
for i in range(0, len(x)):
centerPts.append([1e3 * x[i], 1e3 * Y[ctrDepth, i], -1e3 * z[ctrDepth]])
Xf=X.flatten()
Yf=Y.flatten()
Zf=Z.flatten()
# Write ply-file
fout=open(filename,'w')
# Header
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex %i\n" % len(Yf))
fout.write("property float32 x\n")
fout.write("property float32 y\n")
fout.write("property float32 z\n")
# Header for faces
if mesh:
fout.write("element face %i\n" % (2 * (len(z) - 1) * (len(x) - 1)))
fout.write("property list uint8 int32 vertex_index\n")
fout.write("end_header\n")
# Vertices
for i in range(0, len(Yf)):
if Zf[i] == 0: # prevent -0.000000
fout.write(str(1e3*Xf[i]) + " " + str(1e3*Yf[i]) + " " + str(0.) + "\n") # do not use the % Operator due to precision consistency
else:
fout.write(str(1e3*Xf[i]) + " " + str(1e3*Yf[i]) + " " + str(-1e3*Zf[i]) + "\n") # do not use the % Operator due to precision consistency
# Faces
if mesh:
for i in range(0, len(z) - 1):
for j in range(0, len(x) - 1):
n = j + i * len(x)
fout.write("3 %i %i %i\n" %(n, n+len(x), n+1))
fout.write("3 %i %i %i\n" %(n+1, n+len(x), n+1+len(x)))
fout.close()
if __name__ == '__main__':
surfPts = []
ctrPts = []
genRoughFault("roughFault.ply", True, surfPts, ctrPts, '0254887388', 40., 20., 1., pow(10.,-1.9), 0.8)
| [
"numpy.meshgrid",
"numpy.std",
"numpy.zeros",
"random.random",
"random.seed",
"numpy.arange",
"numpy.exp",
"numpy.conjugate",
"numpy.fft.ifft2",
"numpy.sqrt"
] | [((839, 857), 'random.seed', 'random.seed', (['rseed'], {}), '(rseed)\n', (850, 857), False, 'import random\n'), ((1094, 1124), 'numpy.zeros', 'np.zeros', (['(N * N)'], {'dtype': 'complex'}), '(N * N, dtype=complex)\n', (1102, 1124), True, 'import numpy as np\n'), ((2110, 2153), 'numpy.arange', 'np.arange', (['(-length / 2)', '(length / 2 + dx)', 'dx'], {}), '(-length / 2, length / 2 + dx, dx)\n', (2119, 2153), True, 'import numpy as np\n'), ((2153, 2183), 'numpy.arange', 'np.arange', (['(0.0)', '(depth + dx)', 'dx'], {}), '(0.0, depth + dx, dx)\n', (2162, 2183), True, 'import numpy as np\n'), ((2193, 2210), 'numpy.meshgrid', 'np.meshgrid', (['x', 'z'], {}), '(x, z)\n', (2204, 2210), True, 'import numpy as np\n'), ((2317, 2326), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (2323, 2326), True, 'import numpy as np\n'), ((2034, 2053), 'numpy.fft.ifft2', 'np.fft.ifft2', (['freqs'], {}), '(freqs)\n', (2046, 2053), True, 'import numpy as np\n'), ((1429, 1455), 'numpy.sqrt', 'np.sqrt', (['(kx ** 2 + kz ** 2)'], {}), '(kx ** 2 + kz ** 2)\n', (1436, 1455), True, 'import numpy as np\n'), ((1645, 1669), 'numpy.exp', 'np.exp', (['(randPhase * 1.0j)'], {}), '(randPhase * 1.0j)\n', (1651, 1669), True, 'import numpy as np\n'), ((1817, 1844), 'numpy.conjugate', 'np.conjugate', (['freqs[kx, kz]'], {}), '(freqs[kx, kz])\n', (1829, 1844), True, 'import numpy as np\n'), ((1904, 1931), 'numpy.conjugate', 'np.conjugate', (['freqs[kx, kz]'], {}), '(freqs[kx, kz])\n', (1916, 1931), True, 'import numpy as np\n'), ((1590, 1605), 'random.random', 'random.random', ([], {}), '()\n', (1603, 1605), False, 'import random\n')] |
############################################ Inputs #############################################
while True:
try:
macroporousSize=int(input("What is the macropore radius (R)? "))
if not 0 < macroporousSize:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
microporousSize=int(input("What is the square width (W)? "))
if macroporousSize >= microporousSize:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
numberMaxStepsMacropore=int(input("What is the maximum number of steps (macropore)? "))
if not 0 < numberMaxStepsMacropore:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
numberMaxStepsMicropore=int(input("What is the maximum number of steps (micropore)? "))
if not 0 < numberMaxStepsMicropore:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
numberWalks=int(input("How many random walks? "))
if not 0 < numberWalks:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
choose=0
while True:
try:
print("Choose an option:")
print("1. Random walker should start in the middle")
print("2. Random walker should start inside the macropore")
print("3. Random walker should start anywhere (micro and macroporosity)?")
choose=int(input("Enter your option number: "))
if not ((choose==1) or (choose==2)or (choose==3)):
raise ValueError(choose)
except ValueError as e:
print("Invalid value:", e)
else:
break
followingRule ="no"
if choose==3:
while True:
try:
followingRule=input("Should microporosity random walkers difuse into the macroporosity? [yes/no] ").lower()
if not ((followingRule=="yes") or (followingRule=="no")):
raise ValueError(followingRule)
except ValueError as e:
print("Invalid value:", e)
else:
break
porcetageMicropore=0
if choose==3:
while True:
try:
porcetageMicropore=int(input("What is the percentage (%) of random walkers in the micropore? [only number] "))
if not ((porcetageMicropore>=0) or (porcetageMicropore<=100)):
raise ValueError(porcetageMicropore)
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
minimumTime2=float(input(u"What is the T2min? "))
if not 0 < minimumTime2:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
maximumTime2=int(input(u"What is the T2max? "))
if not minimumTime2 < maximumTime2:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
while True:
try:
numberBins=int(input(u"What is the number of bins? "))
if not 0 < numberBins:
raise ValueError("Enter a valid value")
except ValueError as e:
print("Invalid value:", e)
else:
break
if followingRule=="no":
followingRule=False
else:
followingRule=True
############################################ imports #########################################
# I changed the location to become the code more fast
import numpy
import pylab
import random
from openpyxl import Workbook
from openpyxl.drawing.image import Image
from openpyxl.styles import Alignment
############################################ Class #############################################
class Environment:
def __init__(self, solidShape, size, numberMaxSteps):
self.size = size
self.solidShape = solidShape
self.numberMaxSteps = numberMaxSteps
def inside(self, x, y):
if self.solidShape =="micropore":
widthMiddle = self.size/2
if (x>widthMiddle) or (x<(-1*widthMiddle)) or (y>widthMiddle) or (y<(-1*widthMiddle)):
return False
return True
elif self.solidShape =="macropore":
if numpy.sqrt((x**2)+(y**2))>=self.size:
return False
return True
class RandomWalk():
def __init__(self, countStepsMacropore, countStepsMicropore, micropore, macropore, count, x, y, initialStep):
self.countStepsMacropore = countStepsMacropore #total de passos no microporo
self.countStepsMicropore = countStepsMicropore #total de passos no microporo
self.micropore = micropore
self.macropore = macropore
self.count = count #total de passos
self.x = x
self.y = y
self.initialStep = initialStep
def square(self, valueX, valueY):
if(self.macropore.inside(valueX, valueY)):
return 0
if(self.micropore.inside(valueX, valueY)):
return 1
return -1
def createStepInicial(self, choose):
if choose==1:
self.initialStep = [0, 0]
elif choose==2:
radius = self.macropore.size
self.initialStep = [random.randint(-1*radius, radius),random.randint(-1*radius, radius)]
while(self.square(self.initialStep[0],self.initialStep[1])==1):
self.initialStep = [random.randint(-1*radius, radius),random.randint(-1*radius, radius)]
elif choose==3:
width = int(self.micropore.size/2)
self.initialStep = [random.randint(-1*width, width), random.randint(-1*width, width)]
while(self.square(self.initialStep[0],self.initialStep[1])==0):
self.initialStep = [random.randint(-1*width, width), random.randint(-1*width, width)]
return
def walkingMacropore(self):
if(len(self.x)==0): #it never been in micropore
## x0,y0 e z0 are the initial steps
self.x.append(self.initialStep[0])
self.y.append(self.initialStep[1])
## x1, x2, ... e y1, y2,... equal to the previous value plus 1 or minus 1.
## When we draw a random number> = 0 and <0, we add -1
## If we randomly draw a random number> = 0, we add 1
for step in range(1, self.macropore.numberMaxSteps+1):
randomX = random.choice([-1, 1])
randomY = random.choice([-1, 1])
if randomX<0:
valueX = self.x[step-1]-1
else:
valueX = self.x[step-1]+1
if randomY<0:
valueY = self.y[step-1]-1
else:
valueY = self.y[step-1]+1
square = self.square(valueX, valueY)
self.x.append(valueX)
self.y.append(valueY)
self.countStepsMacropore+=1
self.count+=1
if square==1:
self.x = self.x[1:]
self.y = self.y[1:]
return
self.x = self.x[1:len(self.x)]
self.y = self.y[1:len(self.y)]
return
else: #it was in micropore
stepsPrevious = len(self.x)
for step in range(stepsPrevious, stepsPrevious+self.macropore.numberMaxSteps):
randomX = random.choice([-1, 1])
randomY = random.choice([-1, 1])
if randomX<0:
valueX = self.x[step-1]-1
else:
valueX = self.x[step-1]+1
if randomY<0:
valueY = self.y[step-1]-1
else:
valueY = self.y[step-1]+1
square = self.square(valueX, valueY)
self.x.append(valueX)
self.y.append(valueY)
self.countStepsMacropore+=1
self.count+=1
if square==1:
return
return
def walkingMicropore(self, followingRule):
## x0 and y0 are the initial steps
self.x.append(self.initialStep[0])
self.y.append(self.initialStep[1])
## x1, x2, ... e y1, y2,... equal to the previous value plus 1 or minus 1.
## When we draw a random number> = 0 and <0, we add -1
## If we randomly draw a random number> = 0, we add 1
for step in range(1, self.micropore.numberMaxSteps+1):
randomX = random.choice([-1, 1])
randomY = random.choice([-1, 1])
if randomX<0:
valueX = self.x[step-1]-1
else:
valueX = self.x[step-1]+1
if randomY<0:
valueY = self.y[step-1]-1
else:
valueY = self.y[step-1]+1
square = self.square(valueX, valueY)
self.x.append(valueX)
self.y.append(valueY)
self.count+=1
self.countStepsMicropore+=1
if square==0:
self.x = self.x[1:]
self.y = self.y[1:]
if followingRule==True:
self.walkingMacropore()
return
if (square==-1):
self.x = self.x[1:]
self.y = self.y[1:]
return
self.x = self.x[1:]
self.y = self.y[1:]
def walking(self, choose, followingRule):
self.createStepInicial(choose)
square = self.square(self.initialStep[0], self.initialStep[1])
if(square==0):
return self.walkingMacropore()
return self.walkingMicropore(followingRule)
######################################### Write Files ###############################################
class RandomWalkWalking():
def __init__(self, numberWalks, porcetageMicropore,microporousSize, macroporousSize, numberMaxStepsMicropore, numberMaxStepsMacropore, followingRule, numberBins, minimumTime2, maximumTime2, timeTwoList=[], frequencyList=[], fileTxt="", fileExcel="", sheetExcel="", totalWalks=1, sumCountSteps=0, lastStepCount=[], lastStepX=[], lastStepY=[]):
self.numberWalks = numberWalks
self.porcetageMicropore = porcetageMicropore
self.macropore = "" #enviroment macropore
self.micropore = "" #enviroment micropore
self.microporousSize = microporousSize
self.macroporousSize = macroporousSize
self.numberMaxStepsMicropore = numberMaxStepsMicropore
self.numberMaxStepsMacropore = numberMaxStepsMacropore
self.followingRule = followingRule
self.numberBins = numberBins
self.minimumTime2 = minimumTime2
self.maximumTime2 = maximumTime2
self.timeTwoList = timeTwoList #list of times
self.frequencyList =frequencyList #list of time'frequency
self.fileTxt = fileTxt
self.fileExcel = fileExcel
self.sheetExcel = sheetExcel
self.totalWalks = totalWalks #count of walks
self.sumCountSteps = sumCountSteps #sum of count steps
self.lastStepCount = lastStepCount #list with lasts steps count
self.lastStepX = lastStepX
self.lastStepY = lastStepY
def createEnvironment(self):
self.micropore = Environment("micropore", self.microporousSize, self.numberMaxStepsMicropore)
self.macropore = Environment("macropore", self.macroporousSize, self.numberMaxStepsMacropore)
def createWalk(self):
countStepsMacropore=0
countStepsMicropore=0
count=0
x, y, initialStep = [], [], []
walk = RandomWalk(countStepsMacropore, countStepsMicropore, self.micropore, self.macropore, count, x, y, initialStep)
return walk
def walkingOneRandomWalk(self, choose):
self.createEnvironment()
walk = self.createWalk()
walk.walking(choose, self.followingRule)
return walk
def walkinRandomWalkChoose(self, choose):
walk = self.walkingOneRandomWalk(choose)
self.fileTxt.write("Walk: "+str(self.totalWalks)+" x: "+ str(walk.initialStep[0])+" y: "+ str(walk.initialStep[1])+" step initial environment: "+ ("macropore" if walk.macropore.inside(walk.initialStep[0], walk.initialStep[1]) else "micropore")+" square: "+ str(walk.square(walk.initialStep[0], walk.initialStep[1]))+"\n")
self.lastStepCount.append(walk.count) #list with all total step quantities
self.lastStepX.append(walk.x[walk.count-1]) #list with the value of x for the last step of each random walk
self.lastStepY.append(walk.y[walk.count-1]) #list with the value of y for the last step of each random walk
for step in range(walk.count):
self.fileTxt.write("Walk: "+str(self.totalWalks)+" "+"x: "+ str(walk.x[step])+" y: "+ str(walk.y[step])+" step: "+str(step+1)+" environment: "+ ("macropore" if walk.macropore.inside(walk.x[step], walk.y[step]) else "micropore")+" square: "+ str(walk.square(walk.x[step], walk.y[step]))+"\n")
self.fileTxt.write("-------------------------------------------------------------------------------------------\n")
self.totalWalks+=1
self.sumCountSteps+=walk.count
return walk
def startFile(self):
self.fileTxt = open('RandomWalk-R'+str(self.macroporousSize)+'-W'+str(self.microporousSize)+'-S'+str(self.numberMaxStepsMacropore+self.numberMaxStepsMicropore)+'-RW'+str(self.numberWalks)+'.txt', 'w')
self.fileTxt.write("RandomWalk - Raio: "+str(macroporousSize)+" - Wiidth: "+str(self.microporousSize)+ " - Steps: "+str(self.numberMaxStepsMacropore+self.numberMaxStepsMicropore)+" - Random Walks: "+str(self.numberWalks)+"\n")
self.fileTxt.write("===========================================================================================\n\n")
def endFile(self):
mediaSteps = self.sumCountSteps/self.numberWalks
macroporosity = (numpy.pi*(self.macroporousSize**2))/(self.microporousSize**2)
self.fileTxt.write("===========================================================================================\n")
self.fileTxt.write("The average step for "+str(self.numberWalks)+" random walkers is "+str(mediaSteps))
self.fileTxt.write("The macroporosity is: "+"{:.2%}".format(macroporosity))
self.fileTxt.close()
print("The average step for "+str(self.numberWalks)+" random walkers is "+str(mediaSteps))
print("The macroporosity is: "+"{:.2%}".format(macroporosity))
def startfileExcel(self):
self.fileExcel = Workbook()
self.sheetExcel = self.fileExcel.active
self.sheetExcel.title = 'RandomWalk-Bins'+str(self.numberBins)
def createCellfileExcel(self, paramenters):
for cell in paramenters:
self.sheetExcel.cell(row=cell[0], column=cell[1], value=cell[2])
self.sheetExcel.cell(row=cell[0], column=cell[1]).alignment=Alignment(horizontal="center", vertical="center")
def buildingHistogram(self):
for bin in range(self.numberBins):
timeTwoValue = self.minimumTime2*((self.maximumTime2/self.minimumTime2)**(bin/(self.numberBins-1)))
self.timeTwoList.append(timeTwoValue)
#Frequency
self.frequencyList = numpy.zeros((self.numberBins,), dtype=int)
for bin in range(len(self.timeTwoList)):
for walk in range(len(self.lastStepX)):
if (bin==0):
if (self.lastStepCount[walk]>0) and (self.lastStepCount[walk]<=self.timeTwoList[bin]):
self.frequencyList[bin]+=1
if (self.lastStepCount[walk]>self.timeTwoList[bin-1]) and (self.lastStepCount[walk]<=self.timeTwoList[bin]):
self.frequencyList[bin]+=1
#graphic
pylab.plot(self.timeTwoList, self.frequencyList, 'o')
pylab.title('Histogram of Random Walks',fontsize=15)
pylab.xlabel('T2 (msec)', fontsize=12)
scaleY = []
maxFrequencys = max(self.frequencyList)
division = int(maxFrequencys/15)/100
round(division+0.5, 1)
division = division*100
maxEscalay = int(max(self.frequencyList)/division)*division+division
for i in range(int(maxEscalay/division)+1):
scaleY.append(i*division)
pylab.xscale('log')
pylab.yticks(scaleY)
pylab.grid(True)
pylab.savefig('RandomWalk-R'+str(self.macroporousSize)+'-W'+str(self.microporousSize)+'-S'+str(self.numberMaxStepsMacropore+self.numberMaxStepsMicropore)+'-RW'+str(self.numberWalks)+'.png',dpi=600)
pylab.show()
def writingExcel(self):
self.buildingHistogram()
self.startfileExcel()
paramenters = [[1, 1, "Walk"],[1, 2, "X"],[1, 3, "Y"],[1, 4, "Steps"], [1, 7, "T2min"], [1, 8, self.minimumTime2], [2, 7, "T2max"], [2, 8, self.maximumTime2], [3, 7, "numberBins"], [3, 8, self.numberBins], [1, 10, "I"], [1, 11, "T2"], [1, 12, "Freq."]]
self.createCellfileExcel(paramenters)
for walk in range(len(self.lastStepX)):
paramenters = [[walk+2, 1, walk+1], [walk+2, 2, self.lastStepX[walk]], [walk+2, 3, self.lastStepY[walk]],[walk+2, 4, self.lastStepCount[walk]]]
self.createCellfileExcel(paramenters)
for i in range(len(self.frequencyList)):
paramenters = [[i+2, 10, i+1], [i+2, 11, self.timeTwoList[i]], [i+2, 12, self.frequencyList[i]]]
self.createCellfileExcel(paramenters)
histogram=Image('RandomWalk-R'+str(self.macroporousSize)+'-W'+str(self.microporousSize)+'-S'+str(self.numberMaxStepsMacropore+self.numberMaxStepsMicropore)+'-RW'+str(self.numberWalks)+'.png')
histogram.width = histogram.width/7
histogram.height =histogram.height/7
self.sheetExcel.add_image(histogram, 'N2')
self.fileExcel.save('RandomWalk-R'+str(self.macroporousSize)+'-W'+str(self.microporousSize)+'-S'+str(self.numberMaxStepsMacropore+self.numberMaxStepsMicropore)+'-RW'+str(self.numberWalks)+'.xlsx')
def walkingRandomWalks(self, choose):
self.startFile()
numberWalksMicropore = int((self.porcetageMicropore/100)*self.numberWalks)
numberWalksMacropore = self.numberWalks-int((self.porcetageMicropore/100)*self.numberWalks)
if(choose!=1):
for walk in range(numberWalksMicropore):
walk = self.walkinRandomWalkChoose(3)
for walk in range(numberWalksMacropore):
walk = self.walkinRandomWalkChoose(2)
self.endFile()
self.writingExcel()
else:
for walk in range(self.numberWalks):
walk = self.walkinRandomWalkChoose(1)
self.endFile()
self.writingExcel()
groupRandomWalks = RandomWalkWalking(numberWalks, porcetageMicropore,microporousSize, macroporousSize,numberMaxStepsMicropore, numberMaxStepsMacropore, followingRule, numberBins, minimumTime2, maximumTime2)
groupRandomWalks.walkingRandomWalks(choose) | [
"pylab.title",
"pylab.show",
"openpyxl.Workbook",
"random.randint",
"numpy.zeros",
"pylab.grid",
"random.choice",
"pylab.xscale",
"pylab.yticks",
"openpyxl.styles.Alignment",
"pylab.xlabel",
"pylab.plot",
"numpy.sqrt"
] | [((15381, 15391), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (15389, 15391), False, 'from openpyxl import Workbook\n'), ((16089, 16131), 'numpy.zeros', 'numpy.zeros', (['(self.numberBins,)'], {'dtype': 'int'}), '((self.numberBins,), dtype=int)\n', (16100, 16131), False, 'import numpy\n'), ((16626, 16679), 'pylab.plot', 'pylab.plot', (['self.timeTwoList', 'self.frequencyList', '"""o"""'], {}), "(self.timeTwoList, self.frequencyList, 'o')\n", (16636, 16679), False, 'import pylab\n'), ((16689, 16742), 'pylab.title', 'pylab.title', (['"""Histogram of Random Walks"""'], {'fontsize': '(15)'}), "('Histogram of Random Walks', fontsize=15)\n", (16700, 16742), False, 'import pylab\n'), ((16751, 16789), 'pylab.xlabel', 'pylab.xlabel', (['"""T2 (msec)"""'], {'fontsize': '(12)'}), "('T2 (msec)', fontsize=12)\n", (16763, 16789), False, 'import pylab\n'), ((17147, 17166), 'pylab.xscale', 'pylab.xscale', (['"""log"""'], {}), "('log')\n", (17159, 17166), False, 'import pylab\n'), ((17175, 17195), 'pylab.yticks', 'pylab.yticks', (['scaleY'], {}), '(scaleY)\n', (17187, 17195), False, 'import pylab\n'), ((17204, 17220), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (17214, 17220), False, 'import pylab\n'), ((17436, 17448), 'pylab.show', 'pylab.show', ([], {}), '()\n', (17446, 17448), False, 'import pylab\n'), ((9158, 9180), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9171, 9180), False, 'import random\n'), ((9203, 9225), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (9216, 9225), False, 'import random\n'), ((15749, 15798), 'openpyxl.styles.Alignment', 'Alignment', ([], {'horizontal': '"""center"""', 'vertical': '"""center"""'}), "(horizontal='center', vertical='center')\n", (15758, 15798), False, 'from openpyxl.styles import Alignment\n'), ((6916, 6938), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (6929, 6938), False, 'import random\n'), ((6965, 6987), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (6978, 6987), False, 'import random\n'), ((7977, 7999), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (7990, 7999), False, 'import random\n'), ((8026, 8048), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (8039, 8048), False, 'import random\n'), ((4670, 4697), 'numpy.sqrt', 'numpy.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (4680, 4697), False, 'import numpy\n'), ((5748, 5783), 'random.randint', 'random.randint', (['(-1 * radius)', 'radius'], {}), '(-1 * radius, radius)\n', (5762, 5783), False, 'import random\n'), ((5782, 5817), 'random.randint', 'random.randint', (['(-1 * radius)', 'radius'], {}), '(-1 * radius, radius)\n', (5796, 5817), False, 'import random\n'), ((5929, 5964), 'random.randint', 'random.randint', (['(-1 * radius)', 'radius'], {}), '(-1 * radius, radius)\n', (5943, 5964), False, 'import random\n'), ((5963, 5998), 'random.randint', 'random.randint', (['(-1 * radius)', 'radius'], {}), '(-1 * radius, radius)\n', (5977, 5998), False, 'import random\n'), ((6101, 6134), 'random.randint', 'random.randint', (['(-1 * width)', 'width'], {}), '(-1 * width, width)\n', (6115, 6134), False, 'import random\n'), ((6134, 6167), 'random.randint', 'random.randint', (['(-1 * width)', 'width'], {}), '(-1 * width, width)\n', (6148, 6167), False, 'import random\n'), ((6279, 6312), 'random.randint', 'random.randint', (['(-1 * width)', 'width'], {}), '(-1 * width, width)\n', (6293, 6312), False, 'import random\n'), ((6312, 6345), 'random.randint', 'random.randint', (['(-1 * width)', 'width'], {}), '(-1 * width, width)\n', (6326, 6345), False, 'import random\n')] |
# -*- coding: utf-8 -*-
__all__ = ["InferShapeTester"]
import numpy as np
from aesara_theano_fallback import aesara as theano
class InferShapeTester:
def setup_method(self):
# Take into account any mode that may be defined in a child class
# and it can be None
mode = getattr(self, "mode", None)
if mode is None:
mode = theano.compile.get_default_mode()
# This mode seems to be the minimal one including the shape_i
# optimizations, if we don't want to enumerate them explicitly.
self.mode = mode.including("canonicalize")
def _compile_and_check(
self,
inputs,
outputs,
numeric_inputs,
cls,
excluding=None,
warn=True,
check_topo=True,
):
mode = self.mode
if excluding:
mode = mode.excluding(*excluding)
if warn:
for var, inp in zip(inputs, numeric_inputs):
if isinstance(inp, (int, float, list, tuple)):
inp = var.type.filter(inp)
if not hasattr(inp, "shape"):
continue
# remove broadcasted dims as it is sure they can't be
# changed to prevent the same dim problem.
if hasattr(var.type, "broadcastable"):
shp = [
inp.shape[i]
for i in range(inp.ndim)
if not var.type.broadcastable[i]
]
else:
shp = inp.shape
if len(set(shp)) != len(shp):
break
outputs_function = theano.function(inputs, outputs, mode=mode)
shapes_function = theano.function(
inputs, [o.shape for o in outputs], mode=mode
)
# theano.printing.debugprint(shapes_function)
# Check that the Op is removed from the compiled function.
if check_topo:
topo_shape = shapes_function.maker.fgraph.toposort()
assert not any(isinstance(t.op, cls) for t in topo_shape)
topo_out = outputs_function.maker.fgraph.toposort()
assert any(isinstance(t.op, cls) for t in topo_out)
# Check that the shape produced agrees with the actual shape.
numeric_outputs = outputs_function(*numeric_inputs)
numeric_shapes = shapes_function(*numeric_inputs)
for out, shape in zip(numeric_outputs, numeric_shapes):
assert np.all(out.shape == shape), (out.shape, shape)
| [
"aesara_theano_fallback.aesara.function",
"numpy.all",
"aesara_theano_fallback.aesara.compile.get_default_mode"
] | [((1674, 1717), 'aesara_theano_fallback.aesara.function', 'theano.function', (['inputs', 'outputs'], {'mode': 'mode'}), '(inputs, outputs, mode=mode)\n', (1689, 1717), True, 'from aesara_theano_fallback import aesara as theano\n'), ((1744, 1806), 'aesara_theano_fallback.aesara.function', 'theano.function', (['inputs', '[o.shape for o in outputs]'], {'mode': 'mode'}), '(inputs, [o.shape for o in outputs], mode=mode)\n', (1759, 1806), True, 'from aesara_theano_fallback import aesara as theano\n'), ((372, 405), 'aesara_theano_fallback.aesara.compile.get_default_mode', 'theano.compile.get_default_mode', ([], {}), '()\n', (403, 405), True, 'from aesara_theano_fallback import aesara as theano\n'), ((2499, 2525), 'numpy.all', 'np.all', (['(out.shape == shape)'], {}), '(out.shape == shape)\n', (2505, 2525), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
fname = 'model_train.log'
print('Reading:', fname)
df = pd.read_csv(fname)
epoch = df['epoch'].values + 1
loss = df['loss'].values
val_loss = df['val_loss'].values
print('epochs: %8d ... %d' % (np.min(epoch), np.max(epoch)))
print('loss: %.6f ... %.6f' % (np.min(loss), np.max(loss)))
print('val_loss: %.6f ... %.6f' % (np.min(val_loss), np.max(val_loss)))
plt.plot(epoch, loss, label='train loss')
plt.plot(epoch, val_loss, label='validation loss')
plt.xlabel('epoch')
plt.ylabel('mean absolute error (MW m$^{-3}$)')
plt.legend()
plt.grid()
i = np.argmin(val_loss)
min_val_loss = val_loss[i]
min_val_epoch = epoch[i]
print('min_val_loss: %.6f' % min_val_loss)
print('min_val_epoch:', min_val_epoch)
(x_min, x_max) = plt.xlim()
(y_min, y_max) = plt.ylim()
plt.plot([x_min, min_val_epoch], [min_val_loss, min_val_loss], 'k--')
plt.plot([min_val_epoch, min_val_epoch], [0., min_val_loss], 'k--')
plt.xlim(0, x_max)
plt.ylim(0., y_max)
plt.tight_layout()
fname = 'plot_train.png'
print('Writing:', fname)
plt.savefig(fname)
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.argmin",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotl... | [((129, 147), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (140, 147), True, 'import pandas as pd\n'), ((439, 480), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'loss'], {'label': '"""train loss"""'}), "(epoch, loss, label='train loss')\n", (447, 480), True, 'import matplotlib.pyplot as plt\n'), ((481, 531), 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'val_loss'], {'label': '"""validation loss"""'}), "(epoch, val_loss, label='validation loss')\n", (489, 531), True, 'import matplotlib.pyplot as plt\n'), ((533, 552), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (543, 552), True, 'import matplotlib.pyplot as plt\n'), ((553, 600), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean absolute error (MW m$^{-3}$)"""'], {}), "('mean absolute error (MW m$^{-3}$)')\n", (563, 600), True, 'import matplotlib.pyplot as plt\n'), ((602, 614), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (612, 614), True, 'import matplotlib.pyplot as plt\n'), ((615, 625), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (623, 625), True, 'import matplotlib.pyplot as plt\n'), ((631, 650), 'numpy.argmin', 'np.argmin', (['val_loss'], {}), '(val_loss)\n', (640, 650), True, 'import numpy as np\n'), ((804, 814), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (812, 814), True, 'import matplotlib.pyplot as plt\n'), ((832, 842), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (840, 842), True, 'import matplotlib.pyplot as plt\n'), ((844, 913), 'matplotlib.pyplot.plot', 'plt.plot', (['[x_min, min_val_epoch]', '[min_val_loss, min_val_loss]', '"""k--"""'], {}), "([x_min, min_val_epoch], [min_val_loss, min_val_loss], 'k--')\n", (852, 913), True, 'import matplotlib.pyplot as plt\n'), ((914, 982), 'matplotlib.pyplot.plot', 'plt.plot', (['[min_val_epoch, min_val_epoch]', '[0.0, min_val_loss]', '"""k--"""'], {}), "([min_val_epoch, min_val_epoch], [0.0, min_val_loss], 'k--')\n", (922, 982), True, 'import matplotlib.pyplot as plt\n'), ((983, 1001), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'x_max'], {}), '(0, x_max)\n', (991, 1001), True, 'import matplotlib.pyplot as plt\n'), ((1002, 1022), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', 'y_max'], {}), '(0.0, y_max)\n', (1010, 1022), True, 'import matplotlib.pyplot as plt\n'), ((1023, 1041), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1039, 1041), True, 'import matplotlib.pyplot as plt\n'), ((1093, 1111), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (1104, 1111), True, 'import matplotlib.pyplot as plt\n'), ((271, 284), 'numpy.min', 'np.min', (['epoch'], {}), '(epoch)\n', (277, 284), True, 'import numpy as np\n'), ((286, 299), 'numpy.max', 'np.max', (['epoch'], {}), '(epoch)\n', (292, 299), True, 'import numpy as np\n'), ((337, 349), 'numpy.min', 'np.min', (['loss'], {}), '(loss)\n', (343, 349), True, 'import numpy as np\n'), ((351, 363), 'numpy.max', 'np.max', (['loss'], {}), '(loss)\n', (357, 363), True, 'import numpy as np\n'), ((401, 417), 'numpy.min', 'np.min', (['val_loss'], {}), '(val_loss)\n', (407, 417), True, 'import numpy as np\n'), ((419, 435), 'numpy.max', 'np.max', (['val_loss'], {}), '(val_loss)\n', (425, 435), True, 'import numpy as np\n')] |
#!/usr/bin/python
# <NAME>, HKUST, 2019.
# Copyright reserved.
# This file is an example to parse the feature and matching file,
# in accord with our internal format.
from __future__ import print_function
import os
import sys
import glob
import numpy as np
import math
from struct import unpack
from PIL import Image, ImageDraw
# REPLACE these paths with yours
sift_list_path = '/home/tianwei/Data/kitti/odometry/dataset/odometry/sequences/00/sift_list.txt'
match_folder = '/home/tianwei/Data/kitti/odometry/dataset/odometry/sequences/00/match'
def read_feature_repo(file_path):
"""Read feature file (*.sift)."""
with open(file_path, 'rb') as fin:
data = fin.read()
head_length = 20
head = data[0:head_length]
feature_name, _, num_features, loc_dim, des_dim = unpack('5i', head)
keypts_length = loc_dim * num_features * 4
if feature_name == ord('S') + (ord('I') << 8) + (ord('F') << 16) + (ord('T') << 24):
print(Notify.INFO, 'Reading SIFT file',
file_path, '#', num_features, Notify.ENDC)
desc_length = des_dim * num_features
desc_type = 'B'
elif feature_name == 21384864: # L2Net
print(Notify.INFO, 'Reading L2NET file',
file_path, '#', num_features, Notify.ENDC)
else:
print(Notify.FAIL, 'Unknown feature type.', Notify.ENDC)
desc_length = des_dim * num_features * 4
desc_type = 'f'
keypts_data = data[head_length: head_length + keypts_length]
keypts = np.array(unpack('f' * loc_dim * num_features, keypts_data))
keypts = np.reshape(keypts, (num_features, loc_dim))
desc_data = data[head_length +
keypts_length: head_length + keypts_length + desc_length]
desc = np.array(unpack(
desc_type * des_dim * num_features, desc_data))
desc = np.reshape(desc, (num_features, des_dim))
return keypts, desc
def read_match_repo(mat_file):
"""Read .mat file and read matches
Arguments:
mat_file {str} -- .mat file
Returns:
A list of tuples with each of format (second_sift_name (without .sift suffix),
match_num (putative, hinlier, finlier), homograph matrix, fundamental matrix,
match pairs (list of (feat1, feat2, flag)))
"""
match_ret = []
with open(mat_file, 'rb') as fin:
data = fin.read()
if len(data) == 0:
return match_ret
file_end = len(data)
end = 0
while True:
# read filename length
length_bytes = 4
length = data[end:end+length_bytes]
length = unpack('i', length)[0]
end += length_bytes
# read filename
filename_bytes = length
filename = data[end:end+filename_bytes]
filename = unpack('c' * length, filename)
sift_name2 = os.path.splitext(''.join(filename))[0]
end += filename_bytes
# read match number (putative, hinlier, finlier)
match_num_bytes = 4 * 3
match_num = data[end:end+match_num_bytes]
match_num = unpack('3i', match_num)
end += match_num_bytes
# read homograph (3x3) and fundamental matrix (3x3)
mat_bytes = 8 * 18
mat = data[end:end+mat_bytes]
mat = unpack('18d', mat)
hmat = mat[:9]
fmat = mat[9:]
hmat = np.matrix([hmat[:3],hmat[3:6],hmat[6:9]], dtype=np.float32)
fmat = np.matrix([fmat[:3],fmat[3:6],fmat[6:9]], dtype=np.float32)
end += mat_bytes
# read actual match (sift feature index pairs)
struct_bytes = 12 * match_num[0]
struct = data[end:end+struct_bytes]
struct = unpack(match_num[0] * '3i', struct)
struct = np.reshape(struct, (-1, 3))
end += struct_bytes
match_ret.append((sift_name2, match_num, hmat, fmat, struct))
if end == file_end:
break
return match_ret
def get_inlier_image_coords(sift_keys1, sift_keys2, feature_matches, type='f'):
"""Get inlier matches in image coordinates.
Arguments:
sift_keys1 {list of keys (x, y, color, scale, orientation)} -- first sift keys
sift_keys2 {list of keys} -- second sift keys
feature_matches {(first, second, flag)} -- sift key index pairs and flags
Keyword Arguments:
type {str} -- inlier type ('f' for fudamental matrix and 'h' for homography) (default: {'f'})
Returns:
list -- list of (x1, y1, x2, y2)
"""
image_matches = []
if type == 'f':
inlier_type = 2
elif type == 'h':
inlier_type = 1
else:
print('Unknown inlier type, should be "f" or "h"')
exit(-1)
for i in range(feature_matches.shape[0]):
if (feature_matches[i, 2] == inlier_type or feature_matches[i, 2] == 3):
index1 = feature_matches[i, 0]
index2 = feature_matches[i, 1]
image_matches.append([sift_keys1[index1][0], sift_keys1[index1]
[1], sift_keys2[index2][0], sift_keys2[index2][1]])
return np.array(image_matches, dtype=np.float32)
def compute_fmat_error(f, image_matches, homogeneous=False):
points1 = image_matches[:, :2]
points2 = image_matches[:, 2:4]
assert points1.shape == points2.shape
if not homogeneous:
ones = np.ones(shape=[points1.shape[0],1], dtype=points1.dtype)
points1 = np.concatenate((points1, ones), axis=1)
points2 = np.concatenate((points2, ones), axis=1)
epi_lines = np.matmul(f, points1.transpose())
dist_p2l = np.abs(np.sum(np.multiply(epi_lines.transpose(), points2), axis=1))
dist_div = np.sqrt(np.multiply(
epi_lines[0, :], epi_lines[0, :]) + np.multiply(epi_lines[1, :], epi_lines[1, :])) + 1e-6
dist_p2l = np.divide(dist_p2l, dist_div.transpose())
ave_p2l_error = np.mean(dist_p2l)
return ave_p2l_error
if __name__ == '__main__':
sift_list = []
with open(sift_list_path) as f:
lines = f.readlines()
for line in lines:
sift_list.append(line.strip())
match_files = glob.glob(os.path.join(match_folder, '*.mat'))
sift_list.sort()
match_files.sort()
# read all sift at once
sift_file_map = {}
count = 0
for sift_file in sift_list:
sift_name = os.path.splitext(os.path.split(sift_file)[1])[0]
# keypoint: (x, y, color, scale, orientation)
keypts, _ = read_feature_repo(sift_file)
sift_file_map[sift_name] = (count, keypts)
count = count+1
print("Read all sift files")
for one_mat_file in match_files:
print("Read", one_mat_file)
match_ret = read_match_repo(one_mat_file)
sift_name1 = os.path.splitext(os.path.split(one_mat_file)[1])[0]
for i in range(len(match_ret)):
sift_name2 = match_ret[i][0]
match_num = match_ret[i][1]
hmat = match_ret[i][2]
fmat = match_ret[i][3]
match_pairs = match_ret[i][4]
image_coords = get_inlier_image_coords(
sift_file_map[sift_name1][1], sift_file_map[sift_name2][1], match_pairs, 'f')
assert len(image_coords) == match_num[2]
ave_error = compute_fmat_error(fmat, image_coords, homogeneous=False)
| [
"numpy.matrix",
"numpy.multiply",
"struct.unpack",
"numpy.ones",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"os.path.split",
"os.path.join",
"numpy.concatenate"
] | [((794, 812), 'struct.unpack', 'unpack', (['"""5i"""', 'head'], {}), "('5i', head)\n", (800, 812), False, 'from struct import unpack\n'), ((1574, 1617), 'numpy.reshape', 'np.reshape', (['keypts', '(num_features, loc_dim)'], {}), '(keypts, (num_features, loc_dim))\n', (1584, 1617), True, 'import numpy as np\n'), ((1828, 1869), 'numpy.reshape', 'np.reshape', (['desc', '(num_features, des_dim)'], {}), '(desc, (num_features, des_dim))\n', (1838, 1869), True, 'import numpy as np\n'), ((5187, 5228), 'numpy.array', 'np.array', (['image_matches'], {'dtype': 'np.float32'}), '(image_matches, dtype=np.float32)\n', (5195, 5228), True, 'import numpy as np\n'), ((5961, 5978), 'numpy.mean', 'np.mean', (['dist_p2l'], {}), '(dist_p2l)\n', (5968, 5978), True, 'import numpy as np\n'), ((1510, 1559), 'struct.unpack', 'unpack', (["('f' * loc_dim * num_features)", 'keypts_data'], {}), "('f' * loc_dim * num_features, keypts_data)\n", (1516, 1559), False, 'from struct import unpack\n'), ((1753, 1806), 'struct.unpack', 'unpack', (['(desc_type * des_dim * num_features)', 'desc_data'], {}), '(desc_type * des_dim * num_features, desc_data)\n', (1759, 1806), False, 'from struct import unpack\n'), ((5444, 5501), 'numpy.ones', 'np.ones', ([], {'shape': '[points1.shape[0], 1]', 'dtype': 'points1.dtype'}), '(shape=[points1.shape[0], 1], dtype=points1.dtype)\n', (5451, 5501), True, 'import numpy as np\n'), ((5519, 5558), 'numpy.concatenate', 'np.concatenate', (['(points1, ones)'], {'axis': '(1)'}), '((points1, ones), axis=1)\n', (5533, 5558), True, 'import numpy as np\n'), ((5577, 5616), 'numpy.concatenate', 'np.concatenate', (['(points2, ones)'], {'axis': '(1)'}), '((points2, ones), axis=1)\n', (5591, 5616), True, 'import numpy as np\n'), ((6217, 6252), 'os.path.join', 'os.path.join', (['match_folder', '"""*.mat"""'], {}), "(match_folder, '*.mat')\n", (6229, 6252), False, 'import os\n'), ((2808, 2838), 'struct.unpack', 'unpack', (["('c' * length)", 'filename'], {}), "('c' * length, filename)\n", (2814, 2838), False, 'from struct import unpack\n'), ((3113, 3136), 'struct.unpack', 'unpack', (['"""3i"""', 'match_num'], {}), "('3i', match_num)\n", (3119, 3136), False, 'from struct import unpack\n'), ((3328, 3346), 'struct.unpack', 'unpack', (['"""18d"""', 'mat'], {}), "('18d', mat)\n", (3334, 3346), False, 'from struct import unpack\n'), ((3420, 3481), 'numpy.matrix', 'np.matrix', (['[hmat[:3], hmat[3:6], hmat[6:9]]'], {'dtype': 'np.float32'}), '([hmat[:3], hmat[3:6], hmat[6:9]], dtype=np.float32)\n', (3429, 3481), True, 'import numpy as np\n'), ((3499, 3560), 'numpy.matrix', 'np.matrix', (['[fmat[:3], fmat[3:6], fmat[6:9]]'], {'dtype': 'np.float32'}), '([fmat[:3], fmat[3:6], fmat[6:9]], dtype=np.float32)\n', (3508, 3560), True, 'import numpy as np\n'), ((3762, 3797), 'struct.unpack', 'unpack', (["(match_num[0] * '3i')", 'struct'], {}), "(match_num[0] * '3i', struct)\n", (3768, 3797), False, 'from struct import unpack\n'), ((3819, 3846), 'numpy.reshape', 'np.reshape', (['struct', '(-1, 3)'], {}), '(struct, (-1, 3))\n', (3829, 3846), True, 'import numpy as np\n'), ((2613, 2632), 'struct.unpack', 'unpack', (['"""i"""', 'length'], {}), "('i', length)\n", (2619, 2632), False, 'from struct import unpack\n'), ((5773, 5818), 'numpy.multiply', 'np.multiply', (['epi_lines[0, :]', 'epi_lines[0, :]'], {}), '(epi_lines[0, :], epi_lines[0, :])\n', (5784, 5818), True, 'import numpy as np\n'), ((5830, 5875), 'numpy.multiply', 'np.multiply', (['epi_lines[1, :]', 'epi_lines[1, :]'], {}), '(epi_lines[1, :], epi_lines[1, :])\n', (5841, 5875), True, 'import numpy as np\n'), ((6433, 6457), 'os.path.split', 'os.path.split', (['sift_file'], {}), '(sift_file)\n', (6446, 6457), False, 'import os\n'), ((6839, 6866), 'os.path.split', 'os.path.split', (['one_mat_file'], {}), '(one_mat_file)\n', (6852, 6866), False, 'import os\n')] |
import matplotlib.pyplot as plt
from numpy import linspace
class EvaluateProteinInference():
def __init__(self):
return
def plot_n_over_fdr(self, protein_table):
fdr_values = linspace(0, 1, 100)
n_identified = []
for value in fdr_values:
n_identified.append(sum(protein_table.FDR <= value))
plt.plot(fdr_values, n_identified)
plt.xlabel("FDR")
plt.ylabel("Number of Proteins Inferred")
plt.title("Inference per Discovery Rate")
return plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((215, 234), 'numpy.linspace', 'linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (223, 234), False, 'from numpy import linspace\n'), ((373, 407), 'matplotlib.pyplot.plot', 'plt.plot', (['fdr_values', 'n_identified'], {}), '(fdr_values, n_identified)\n', (381, 407), True, 'import matplotlib.pyplot as plt\n'), ((417, 434), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""FDR"""'], {}), "('FDR')\n", (427, 434), True, 'import matplotlib.pyplot as plt\n'), ((444, 485), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Proteins Inferred"""'], {}), "('Number of Proteins Inferred')\n", (454, 485), True, 'import matplotlib.pyplot as plt\n'), ((495, 536), 'matplotlib.pyplot.title', 'plt.title', (['"""Inference per Discovery Rate"""'], {}), "('Inference per Discovery Rate')\n", (504, 536), True, 'import matplotlib.pyplot as plt\n'), ((555, 565), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (563, 565), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import theano as theano
import theano.tensor as T
from Evaluate import *
class lstm:
def __init__(self, user_dim=20, poi_dim=20, hidden_dim=20, negative=10, top_K=20, regu_para=0.01,
num_user=5000, num_poi=100000):
# Assign instance variables
self.user_dim = user_dim
self.poi_dim = poi_dim
self.hidden_dim = hidden_dim
self.top_K = top_K
# the number of negative samples
self.negative = negative
self.regu_para = regu_para
self.num_user = num_user
self.num_poi = num_poi
# Randomly initialize the network parameters
Wi = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / poi_dim), (hidden_dim, poi_dim))
Ui = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / hidden_dim), (hidden_dim, hidden_dim))
Wf = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / poi_dim), (hidden_dim, poi_dim))
Uf = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / hidden_dim), (hidden_dim, hidden_dim))
Wo = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / poi_dim), (hidden_dim, poi_dim))
Uo = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / hidden_dim), (hidden_dim, hidden_dim))
Wc = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / poi_dim), (hidden_dim, poi_dim))
Uc = np.random.uniform(-np.sqrt(1.0 / hidden_dim), np.sqrt(1.0 / hidden_dim), (hidden_dim, hidden_dim))
User_vector = np.random.uniform(-np.sqrt(1.0 / user_dim), np.sqrt(1.0 / num_user),
(user_dim, num_user))
Poi_vector = np.random.uniform(-np.sqrt(1.0 / poi_dim), np.sqrt(1.0 / num_poi), (poi_dim, num_poi))
# Theano: create shared variables
self.Wi = theano.shared(name='Wi', value=Wi.astype(theano.config.floatX))
self.Ui = theano.shared(name='Ui', value=Ui.astype(theano.config.floatX))
self.Wf = theano.shared(name='Wf', value=Wf.astype(theano.config.floatX))
self.Uf = theano.shared(name='Uf', value=Uf.astype(theano.config.floatX))
self.Wo = theano.shared(name='Wo', value=Wo.astype(theano.config.floatX))
self.Uo = theano.shared(name='Uo', value=Uo.astype(theano.config.floatX))
self.Wc = theano.shared(name='Wc', value=Wc.astype(theano.config.floatX))
self.Uc = theano.shared(name='Uc', value=Uc.astype(theano.config.floatX))
self.User_vector = theano.shared(name='User_vector', value=User_vector.astype(theano.config.floatX))
self.Poi_vector = theano.shared(name='Poi_vector', value=Poi_vector.astype(theano.config.floatX))
# we store the Theano graph here
# self.theano = {}
self.__theano_build__()
def __theano_build__(self):
Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector = self.Wi, self.Ui, self.Wf, self.Uf, \
self.Wo, self.Uo, self.Wc, self.Uc, self.User_vector, self.Poi_vector
x = T.ivector('x')
y = T.ivector('y')
u = T.iscalar('u')
def forward_step(x_t, y_t, u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector):
# list: the first one poi is the correct poi, and the rest are the negative ones
sampled = np.random.randint(low=0, high=self.num_poi, size=self.negative)
neg_sample = theano.shared(name='neg_sample', value=sampled.astype(theano.config.floatX))
def negative_step(poi, c_t_prev, h_t_prev, x_t, u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector):
# input poi embedding
x_e = Poi_vector[:, x_t]
# target negative poi embedding
x_c = Poi_vector[:, poi]
# user-embedding layer
u_e = User_vector[:, u]
i_t = T.nnet.sigmoid(Wi.dot(x_e) + Ui.dot(h_t_prev))
f_t = T.nnet.sigmoid(Wf.dot(x_e) + Uf.dot(h_t_prev))
o_t = T.nnet.sigmoid(Wo.dot(x_e) + Uo.dot(h_t_prev))
_c_t = T.nnet.sigmoid(Wc.dot(x_e) + Uc.dot(h_t_prev))
c_t = f_t * c_t_prev + i_t * _c_t
h_t = o_t * T.tanh(c_t)
out_t = (h_t + u_e).dot(x_c)
return [out_t, c_t, h_t]
[negative_out, c1, h1], updates1 = theano.scan(fn=negative_step,
sequences=neg_sample,
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[x_t, u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc,
User_vector, Poi_vector])
[correct_out, c2, h2], updates2 = theano.scan(fn=negative_step,
sequences=[y_t],
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[x_t, u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc,
User_vector, Poi_vector])
return len(sampled) * correct_out - T.sum(negative_out)
[o], updates = theano.scan(forward_step,
sequences=[x, y],
outputs_info=None,
non_sequences=[u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector])
# the output o is a vector in the size of time steps of the user
neg_loss = T.sum(o)
matrix_norm = Wi.norm(2) + Ui.norm(2) + Wf.norm(2) + Uf.norm(2) + Wo.norm(2) + Uo.norm(2) + Wc.norm(2) \
+ Uc.norm(2) + User_vector.norm(2) + Poi_vector.norm(2)
final_loss = neg_loss + self.regu_para / 2 * matrix_norm
# final_loss = T.sum(T.nnet.categorical_crossentropy(out, y)) + self.regu_para / 2 * matrix_norm
# Gradients
dWi = T.grad(final_loss, Wi)
dUi = T.grad(final_loss, Ui)
dWf = T.grad(final_loss, Wf)
dUf = T.grad(final_loss, Uf)
dWo = T.grad(final_loss, Wo)
dUo = T.grad(final_loss, Uo)
dWc = T.grad(final_loss, Wc)
dUc = T.grad(final_loss, Uc)
dUser_vector = T.grad(final_loss, User_vector)
dPoi_vector = T.grad(final_loss, Poi_vector)
def forward_prop(x_t, c_t_prev, h_t_prev, u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector):
# poi-embedding layer
x_e = Poi_vector[:, x_t]
# user-embedding layer
u_e = User_vector[:, u]
i_t = T.nnet.sigmoid(Wi.dot(x_e) + Ui.dot(h_t_prev))
f_t = T.nnet.sigmoid(Wf.dot(x_e) + Uf.dot(h_t_prev))
o_t = T.nnet.sigmoid(Wo.dot(x_e) + Uo.dot(h_t_prev))
_c_t = T.nnet.sigmoid(Wc.dot(x_e) + Uc.dot(h_t_prev))
c_t = f_t * c_t_prev + i_t * _c_t
h_t = o_t * T.tanh(c_t)
out_t = (h_t + u_e).dot(Poi_vector)
return [out_t, c_t, h_t]
[out, c, h], updates = theano.scan(forward_prop,
sequences=x,
outputs_info=[None,
dict(initial=T.zeros(self.hidden_dim)),
dict(initial=T.zeros(self.hidden_dim))],
non_sequences=[u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector])
# Assign functions
self.forward_propagation = theano.function([x, u], out)
self.cal_loss_function = theano.function([x, u], final_loss)
# self.bptt = theano.function([x, y, u], [dWi, dUi, dWf, dUf, dWo, dUo, dWc, dUc, dUser_vector, dPoi_vector])
# SGD
learning_rate = T.scalar('learning_rate')
self.sdg_step = theano.function([x, u, learning_rate], [],
updates=[(self.Wi, self.Wi - learning_rate * dWi),
(self.Ui, self.Ui - learning_rate * dUi),
(self.Wf, self.Wf - learning_rate * dWf),
(self.Uf, self.Uf - learning_rate * dUf),
(self.Wo, self.Wo - learning_rate * dWo),
(self.Uo, self.Uo - learning_rate * dUo),
(self.Wc, self.Wc - learning_rate * dWc),
(self.Uc, self.Uc - learning_rate * dUc),
(self.User_vector, self.User_vector - learning_rate * dUser_vector),
(self.Poi_vector, self.Poi_vector - learning_rate * dPoi_vector)])
def predict(self, x, u):
# perform forward propagation and return the probability of each poi
out = self.forward_propagation(x, u)
predict = np.zeros((len(x), self.top_K))
for i in np.arange(out.shape[0]):
predict[i] = get_topK(out[i], self.top_K)
# predict is a matrix of shape len(x)*top_K
return predict
| [
"theano.tensor.tanh",
"theano.tensor.iscalar",
"theano.tensor.sum",
"theano.function",
"theano.tensor.ivector",
"theano.scan",
"theano.tensor.zeros",
"theano.tensor.grad",
"numpy.random.randint",
"numpy.arange",
"theano.tensor.scalar",
"numpy.sqrt"
] | [((3081, 3095), 'theano.tensor.ivector', 'T.ivector', (['"""x"""'], {}), "('x')\n", (3090, 3095), True, 'import theano.tensor as T\n'), ((3108, 3122), 'theano.tensor.ivector', 'T.ivector', (['"""y"""'], {}), "('y')\n", (3117, 3122), True, 'import theano.tensor as T\n'), ((3135, 3149), 'theano.tensor.iscalar', 'T.iscalar', (['"""u"""'], {}), "('u')\n", (3144, 3149), True, 'import theano.tensor as T\n'), ((5787, 5929), 'theano.scan', 'theano.scan', (['forward_step'], {'sequences': '[x, y]', 'outputs_info': 'None', 'non_sequences': '[u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector]'}), '(forward_step, sequences=[x, y], outputs_info=None,\n non_sequences=[u, Wi, Ui, Wf, Uf, Wo, Uo, Wc, Uc, User_vector, Poi_vector])\n', (5798, 5929), True, 'import theano as theano\n'), ((6124, 6132), 'theano.tensor.sum', 'T.sum', (['o'], {}), '(o)\n', (6129, 6132), True, 'import theano.tensor as T\n'), ((6531, 6553), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Wi'], {}), '(final_loss, Wi)\n', (6537, 6553), True, 'import theano.tensor as T\n'), ((6568, 6590), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Ui'], {}), '(final_loss, Ui)\n', (6574, 6590), True, 'import theano.tensor as T\n'), ((6605, 6627), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Wf'], {}), '(final_loss, Wf)\n', (6611, 6627), True, 'import theano.tensor as T\n'), ((6642, 6664), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Uf'], {}), '(final_loss, Uf)\n', (6648, 6664), True, 'import theano.tensor as T\n'), ((6679, 6701), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Wo'], {}), '(final_loss, Wo)\n', (6685, 6701), True, 'import theano.tensor as T\n'), ((6716, 6738), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Uo'], {}), '(final_loss, Uo)\n', (6722, 6738), True, 'import theano.tensor as T\n'), ((6753, 6775), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Wc'], {}), '(final_loss, Wc)\n', (6759, 6775), True, 'import theano.tensor as T\n'), ((6790, 6812), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Uc'], {}), '(final_loss, Uc)\n', (6796, 6812), True, 'import theano.tensor as T\n'), ((6836, 6867), 'theano.tensor.grad', 'T.grad', (['final_loss', 'User_vector'], {}), '(final_loss, User_vector)\n', (6842, 6867), True, 'import theano.tensor as T\n'), ((6890, 6920), 'theano.tensor.grad', 'T.grad', (['final_loss', 'Poi_vector'], {}), '(final_loss, Poi_vector)\n', (6896, 6920), True, 'import theano.tensor as T\n'), ((8157, 8185), 'theano.function', 'theano.function', (['[x, u]', 'out'], {}), '([x, u], out)\n', (8172, 8185), True, 'import theano as theano\n'), ((8219, 8254), 'theano.function', 'theano.function', (['[x, u]', 'final_loss'], {}), '([x, u], final_loss)\n', (8234, 8254), True, 'import theano as theano\n'), ((8412, 8437), 'theano.tensor.scalar', 'T.scalar', (['"""learning_rate"""'], {}), "('learning_rate')\n", (8420, 8437), True, 'import theano.tensor as T\n'), ((8462, 9018), 'theano.function', 'theano.function', (['[x, u, learning_rate]', '[]'], {'updates': '[(self.Wi, self.Wi - learning_rate * dWi), (self.Ui, self.Ui - \n learning_rate * dUi), (self.Wf, self.Wf - learning_rate * dWf), (self.\n Uf, self.Uf - learning_rate * dUf), (self.Wo, self.Wo - learning_rate *\n dWo), (self.Uo, self.Uo - learning_rate * dUo), (self.Wc, self.Wc - \n learning_rate * dWc), (self.Uc, self.Uc - learning_rate * dUc), (self.\n User_vector, self.User_vector - learning_rate * dUser_vector), (self.\n Poi_vector, self.Poi_vector - learning_rate * dPoi_vector)]'}), '([x, u, learning_rate], [], updates=[(self.Wi, self.Wi - \n learning_rate * dWi), (self.Ui, self.Ui - learning_rate * dUi), (self.\n Wf, self.Wf - learning_rate * dWf), (self.Uf, self.Uf - learning_rate *\n dUf), (self.Wo, self.Wo - learning_rate * dWo), (self.Uo, self.Uo - \n learning_rate * dUo), (self.Wc, self.Wc - learning_rate * dWc), (self.\n Uc, self.Uc - learning_rate * dUc), (self.User_vector, self.User_vector -\n learning_rate * dUser_vector), (self.Poi_vector, self.Poi_vector - \n learning_rate * dPoi_vector)])\n', (8477, 9018), True, 'import theano as theano\n'), ((9685, 9708), 'numpy.arange', 'np.arange', (['out.shape[0]'], {}), '(out.shape[0])\n', (9694, 9708), True, 'import numpy as np\n'), ((707, 729), 'numpy.sqrt', 'np.sqrt', (['(1.0 / poi_dim)'], {}), '(1.0 / poi_dim)\n', (714, 729), True, 'import numpy as np\n'), ((813, 838), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (820, 838), True, 'import numpy as np\n'), ((925, 947), 'numpy.sqrt', 'np.sqrt', (['(1.0 / poi_dim)'], {}), '(1.0 / poi_dim)\n', (932, 947), True, 'import numpy as np\n'), ((1031, 1056), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1038, 1056), True, 'import numpy as np\n'), ((1143, 1165), 'numpy.sqrt', 'np.sqrt', (['(1.0 / poi_dim)'], {}), '(1.0 / poi_dim)\n', (1150, 1165), True, 'import numpy as np\n'), ((1249, 1274), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1256, 1274), True, 'import numpy as np\n'), ((1361, 1383), 'numpy.sqrt', 'np.sqrt', (['(1.0 / poi_dim)'], {}), '(1.0 / poi_dim)\n', (1368, 1383), True, 'import numpy as np\n'), ((1467, 1492), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1474, 1492), True, 'import numpy as np\n'), ((1586, 1609), 'numpy.sqrt', 'np.sqrt', (['(1.0 / num_user)'], {}), '(1.0 / num_user)\n', (1593, 1609), True, 'import numpy as np\n'), ((1737, 1759), 'numpy.sqrt', 'np.sqrt', (['(1.0 / num_poi)'], {}), '(1.0 / num_poi)\n', (1744, 1759), True, 'import numpy as np\n'), ((3362, 3425), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'self.num_poi', 'size': 'self.negative'}), '(low=0, high=self.num_poi, size=self.negative)\n', (3379, 3425), True, 'import numpy as np\n'), ((680, 705), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (687, 705), True, 'import numpy as np\n'), ((786, 811), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (793, 811), True, 'import numpy as np\n'), ((898, 923), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (905, 923), True, 'import numpy as np\n'), ((1004, 1029), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1011, 1029), True, 'import numpy as np\n'), ((1116, 1141), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1123, 1141), True, 'import numpy as np\n'), ((1222, 1247), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1229, 1247), True, 'import numpy as np\n'), ((1334, 1359), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1341, 1359), True, 'import numpy as np\n'), ((1440, 1465), 'numpy.sqrt', 'np.sqrt', (['(1.0 / hidden_dim)'], {}), '(1.0 / hidden_dim)\n', (1447, 1465), True, 'import numpy as np\n'), ((1561, 1584), 'numpy.sqrt', 'np.sqrt', (['(1.0 / user_dim)'], {}), '(1.0 / user_dim)\n', (1568, 1584), True, 'import numpy as np\n'), ((1713, 1735), 'numpy.sqrt', 'np.sqrt', (['(1.0 / poi_dim)'], {}), '(1.0 / poi_dim)\n', (1720, 1735), True, 'import numpy as np\n'), ((5743, 5762), 'theano.tensor.sum', 'T.sum', (['negative_out'], {}), '(negative_out)\n', (5748, 5762), True, 'import theano.tensor as T\n'), ((7506, 7517), 'theano.tensor.tanh', 'T.tanh', (['c_t'], {}), '(c_t)\n', (7512, 7517), True, 'import theano.tensor as T\n'), ((4252, 4263), 'theano.tensor.tanh', 'T.tanh', (['c_t'], {}), '(c_t)\n', (4258, 4263), True, 'import theano.tensor as T\n'), ((7850, 7874), 'theano.tensor.zeros', 'T.zeros', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (7857, 7874), True, 'import theano.tensor as T\n'), ((7947, 7971), 'theano.tensor.zeros', 'T.zeros', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (7954, 7971), True, 'import theano.tensor as T\n'), ((4674, 4698), 'theano.tensor.zeros', 'T.zeros', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (4681, 4698), True, 'import theano.tensor as T\n'), ((4787, 4811), 'theano.tensor.zeros', 'T.zeros', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (4794, 4811), True, 'import theano.tensor as T\n'), ((5343, 5367), 'theano.tensor.zeros', 'T.zeros', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (5350, 5367), True, 'import theano.tensor as T\n'), ((5455, 5479), 'theano.tensor.zeros', 'T.zeros', (['self.hidden_dim'], {}), '(self.hidden_dim)\n', (5462, 5479), True, 'import theano.tensor as T\n')] |
# -*- coding: utf-8 -*-
import types
from abc import ABC, abstractmethod
from collections import OrderedDict
from collections.abc import Iterable, Sequence
from contextlib import contextmanager
from inspect import iscode
import iris
import joblib
import numpy as np
import pandas as pd
import xxhash
from iris.time import PartialDateTime
from ..utils import traverse_nested_dict
class CodeObj:
"""Return a (somewhat) flattened, hashable version of func.__code__.
For a function `func`, use like so:
code_obj = CodeObj(func.__code__).hashable()
Note that closure variables are not supported.
"""
expansion_limit = 1000
def __init__(self, code, __expansion_count=0):
assert iscode(code), "Must pass in a code object (function.__code__)."
self.code = code
self.__expansion_count = __expansion_count
def hashable(self):
if self.__expansion_count > self.expansion_limit:
raise RuntimeError(
"Maximum number of code object expansions exceeded ({} > {}).".format(
self.__expansion_count, self.expansion_limit
)
)
# Get co_ attributes that describe the code object. Ignore the line number and
# file name of the function definition here, since we don't want unrelated
# changes to cause a recalculation of a cached result. Changes in comments are
# ignored, but changes in the docstring will still causes comparisons to fail
# (this could be ignored as well, however)!
self.code_dict = OrderedDict(
(attr, getattr(self.code, attr))
for attr in dir(self.code)
if "co_" in attr
and "co_firstlineno" not in attr
and "co_filename" not in attr
)
# Replace any nested code object (eg. for list comprehensions) with a reduced
# version by calling the hashable function recursively.
new_co_consts = []
for value in self.code_dict["co_consts"]:
if iscode(value):
self.__expansion_count += 1
value = type(self)(value, self.__expansion_count).hashable()
new_co_consts.append(value)
self.code_dict["co_consts"] = tuple(new_co_consts)
return tuple(self.code_dict.values())
class Hasher(ABC):
@staticmethod
@abstractmethod
def test_argument(arg):
"""Determine whether this Hasher is applicable for the given object."""
@staticmethod
@abstractmethod
def hash(x):
"""Calculate the hash value of the given object."""
class MAHasher(Hasher):
@staticmethod
def test_argument(arg):
return isinstance(arg, np.ma.core.MaskedArray)
@staticmethod
def hash(x):
"""Compute the hash for a numpy MaskedArray."""
return xxhash.xxh64_hexdigest(x.data) + xxhash.xxh64_hexdigest(x.mask)
_ma_hasher = MAHasher()
class CubeHasher(Hasher):
@staticmethod
def test_argument(arg):
return isinstance(arg, iris.cube.Cube)
@staticmethod
def hash(cube):
cube_hash = ""
if isinstance(cube.data, np.ma.core.MaskedArray):
cube_hash += _ma_hasher.hash(cube.data)
else:
cube_hash += xxhash.xxh64_hexdigest(cube.data)
for coord in cube.coords():
cube_hash += joblib.hashing.hash(coord)
cube_hash += joblib.hashing.hash(cube.metadata)
return cube_hash
_cube_hasher = CubeHasher()
class DatasetHasher(Hasher):
@staticmethod
def test_argument(arg):
from ..data import Dataset
return isinstance(arg, Dataset)
@staticmethod
def hash(dataset):
"""Compute the hash of a Dataset.
Note: This realises any lazy data.
"""
# Compute the hash for each piece of data.
dataset_hash = ""
for cube in dataset:
dataset_hash += _cube_hasher.hash(cube)
return dataset_hash
_dataset_hasher = DatasetHasher()
class DatasetsHasher(Hasher):
@staticmethod
def test_argument(arg):
from ..data import Datasets
return isinstance(arg, Datasets)
@staticmethod
def hash(datasets):
arg_hash = ""
for dataset in datasets:
arg_hash += _dataset_hasher.hash(dataset)
return arg_hash
class DFHasher(Hasher):
@staticmethod
def test_argument(arg):
return isinstance(arg, pd.DataFrame)
@staticmethod
def hash(df):
"""Compute the hash of a pandas DataFrame.
This only considers the index, data, and column names.
"""
dataset_hash = xxhash.xxh64_hexdigest(np.ascontiguousarray(df.values))
dataset_hash += joblib.hashing.hash(df.index)
dataset_hash += joblib.hashing.hash(df.columns)
return dataset_hash
class NestedMADictHasher(Hasher):
"""Hashing of (nested) dict with MaskedArray.
Supports flat, non-nested iterables of MaskedArray as the values.
Note that this does not consider other custom types which are known now or added
later on.
"""
@staticmethod
def test_argument(arg):
if not isinstance(arg, dict):
return False
for flat_key, val in traverse_nested_dict(arg):
# If any of the values is a MaskedArray, this hasher is applicable.
if _ma_hasher.test_argument(val):
return True
if not isinstance(val, np.ndarray) and isinstance(val, Iterable):
# If `val` is not a MaskedArray or ndarray but an Iterable,
# calculate hash values for every item in the Iterable with
# support for MaskedArray. This is only necessary if at least one of
# the items in the Iterable is actually a MaskedArray.
for item in val:
if _ma_hasher.test_argument(item):
return True
return False
@staticmethod
def hash(d):
"""Compute hash for the nested dict containing MaskedArray."""
hash_dict = {}
for flat_key, val in traverse_nested_dict(d):
if _ma_hasher.test_argument(val):
hash_dict[flat_key] = _ma_hasher.hash(val)
else:
if not isinstance(val, np.ndarray) and isinstance(val, Iterable):
# If `val` is not a MaskedArray or ndarray but an Iterable,
# calculate hash values for every item in the Iterable with
# support for MaskedArray.
hash_dict[flat_key] = []
for item in val:
if _ma_hasher.test_argument(item):
hash_dict[flat_key].append(_ma_hasher.hash(item))
else:
hash_dict[flat_key].append(joblib.hashing.hash(item))
else:
hash_dict[flat_key] = joblib.hashing.hash(val)
# Compute the combined hash value.
return joblib.hashing.hash(hash_dict)
class PartialDateTimeHasher(Hasher):
"""Hashing of (iterables of) PartialDateTime."""
@staticmethod
def test_argument(arg):
if isinstance(arg, PartialDateTime):
return True
elif isinstance(arg, Sequence) and all(
isinstance(v, PartialDateTime) for v in arg
):
return True
return False
@staticmethod
def hash(arg):
if isinstance(arg, PartialDateTime):
return PartialDateTimeHasher.calculate_hash(arg)
elif isinstance(arg, Sequence) and all(
isinstance(v, PartialDateTime) for v in arg
):
return joblib.hashing.hash(
[PartialDateTimeHasher.calculate_hash(dt) for dt in arg]
)
@staticmethod
def calculate_hash(dt):
return joblib.hashing.hash([getattr(dt, attr) for attr in dt.__slots__])
@contextmanager
def adjust_n_jobs(arg):
if hasattr(arg, "n_jobs"):
# Temporarily set `n_jobs=None` in order to obtain uniform hash values
# throughout.
orig_n_jobs = arg.n_jobs
arg.n_jobs = None
yield
# Restore the original value.
arg.n_jobs = orig_n_jobs
else:
# Do nothing.
yield
@contextmanager
def adjust_instance_n_jobs(arg):
if isinstance(arg, types.MethodType):
with adjust_n_jobs(arg.__self__):
yield
else:
# Do nothing.
yield
# Run outside of context managers.
_default_initial_hashers = []
# Context managers that temporarily change objects to enable consistent hashing.
_default_context_managers = [adjust_n_jobs, adjust_instance_n_jobs]
# Run within context managers.
_default_guarded_hashers = [
MAHasher(),
DatasetsHasher(),
DatasetHasher(),
CubeHasher(),
DFHasher(),
NestedMADictHasher(),
PartialDateTimeHasher(),
]
| [
"inspect.iscode",
"xxhash.xxh64_hexdigest",
"numpy.ascontiguousarray",
"joblib.hashing.hash"
] | [((721, 733), 'inspect.iscode', 'iscode', (['code'], {}), '(code)\n', (727, 733), False, 'from inspect import iscode\n'), ((3412, 3446), 'joblib.hashing.hash', 'joblib.hashing.hash', (['cube.metadata'], {}), '(cube.metadata)\n', (3431, 3446), False, 'import joblib\n'), ((4736, 4765), 'joblib.hashing.hash', 'joblib.hashing.hash', (['df.index'], {}), '(df.index)\n', (4755, 4765), False, 'import joblib\n'), ((4790, 4821), 'joblib.hashing.hash', 'joblib.hashing.hash', (['df.columns'], {}), '(df.columns)\n', (4809, 4821), False, 'import joblib\n'), ((7043, 7073), 'joblib.hashing.hash', 'joblib.hashing.hash', (['hash_dict'], {}), '(hash_dict)\n', (7062, 7073), False, 'import joblib\n'), ((2046, 2059), 'inspect.iscode', 'iscode', (['value'], {}), '(value)\n', (2052, 2059), False, 'from inspect import iscode\n'), ((2846, 2876), 'xxhash.xxh64_hexdigest', 'xxhash.xxh64_hexdigest', (['x.data'], {}), '(x.data)\n', (2868, 2876), False, 'import xxhash\n'), ((2879, 2909), 'xxhash.xxh64_hexdigest', 'xxhash.xxh64_hexdigest', (['x.mask'], {}), '(x.mask)\n', (2901, 2909), False, 'import xxhash\n'), ((3268, 3301), 'xxhash.xxh64_hexdigest', 'xxhash.xxh64_hexdigest', (['cube.data'], {}), '(cube.data)\n', (3290, 3301), False, 'import xxhash\n'), ((3363, 3389), 'joblib.hashing.hash', 'joblib.hashing.hash', (['coord'], {}), '(coord)\n', (3382, 3389), False, 'import joblib\n'), ((4679, 4710), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['df.values'], {}), '(df.values)\n', (4699, 4710), True, 'import numpy as np\n'), ((6960, 6984), 'joblib.hashing.hash', 'joblib.hashing.hash', (['val'], {}), '(val)\n', (6979, 6984), False, 'import joblib\n'), ((6869, 6894), 'joblib.hashing.hash', 'joblib.hashing.hash', (['item'], {}), '(item)\n', (6888, 6894), False, 'import joblib\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
Script de Python para la visualización de la simulación.
"""
import numpy as np
#import seaborn as sns
import matplotlib.pyplot as plt
import scipy as sc
import matplotlib.ticker as ticker
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
#plt.rcParams['image.cmap'] = 'PuBu'
#plt.rcParams['image.cmap'] = 'YlGnBu'
plt.rcParams['image.cmap'] = 'plasma'
rcParams.update({'font.size': 11})
plt.rcParams['image.cmap'] = 'plasma'
fsize = 16
BULLET = -147
JEANS = -137
GAUSS = -127
dat = np.loadtxt("./datFiles/grid0.dat").T
#density = np.loadtxt("density.dat")
constantes = np.loadtxt("./datFiles/constants.dat", usecols = 1)
dt = constantes[9]
TAU = int(constantes[8])
#inF = np.loadtxt("inF.dat")
#outF = np.loadtxt("outF0.dat")
#outF1 = np.loadtxt("outF1.dat")
#oI = np.loadtxt("oI.dat")
#oR = np.loadtxt("oR.dat")
#acce = np.loadtxt("acce.dat")
def fmt(x, pos):
a, b = '{:.1e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
x = np.linspace(constantes[0], constantes[1], int(constantes[4]))
#
figu = plt.gcf()
#figu.set_size_inches(18.5, 10.5)
#figu.set_dpi(300)
dpII = 200
velUnit = 621 #m/s
estUnit = 35 #kpc
potUnit = 385962691092 #J/kg
acceUnit = 3.5737451e-13 #km/s²
for i in range(int(constantes[6])):
dat = np.loadtxt("./datFiles/grid{:d}.dat".format(i)).T
#dat = dat#/np.max(dat)/7
plt.imshow(dat, extent=[constantes[0],constantes[1],constantes[2],constantes[3]], aspect='auto') #Es mucho más rápido imshow
# plt.contourf(np.flip(dat,axis=0), extent=[constantes[0],constantes[1],constantes[2],constantes[3]], levels = 8) #Es mucho más rápido imshow
plt.yticks(plt.yticks()[0], [str(np.round(t*velUnit)) for t in plt.yticks()[0]])
plt.ylabel("Velocity [km/s]",fontsize=fsize)
# plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
plt.xticks(plt.xticks()[0], ["{:.2f}".format(t*estUnit) for t in plt.xticks()[0]])
plt.xlabel("Position [kpc]",fontsize=fsize)
if(constantes[7] == JEANS):
# plt.title("Jeans Instability $\\tau =$ {:d}".format(TAU),fontsize=fsize)
plt.title("Phase Space Density $t =$ {:.2f} T".format(i*dt),fontsize=fsize) #Divido por 2 pues T = 2.
# plt.ylim(constantes[0],constantes[1])
# plt.xlim(constantes[2],constantes[3])
#plt.clim(0,37e5) #Jeans
elif(constantes[7] == GAUSS):
#plt.title("Gaussian Initialization $\\tau =$ {:d}".format(TAU),fontsize=fsize)
# plt.suptitle("$\\tau$ = {:d}".format(TAU),fontsize=fsize)
# if(TAU == 0):
# plt.suptitle("$\\tau$ = $\\infty$",fontsize=fsize)
plt.title("Phase Space Density $t =$ {:.2f}".format(i*dt),fontsize=fsize)
#plt.clim(0,30e5) #Gauss
plt.xlim(constantes[2]/2,constantes[3]/2)
plt.ylim(constantes[0]/1.5,constantes[1]/1.5)
elif(constantes[7] == BULLET):
#plt.title("Gaussian Initialization $\\tau =$ {:d}".format(TAU),fontsize=fsize)
# plt.suptitle("$\\tau$ = {:d}".format(TAU),fontsize=fsize)
# if(TAU == 0):
# plt.suptitle("$\\tau$ = $\\infty$",fontsize=fsize)
plt.ylim(constantes[2]/2,constantes[3]/2)
plt.xlim(constantes[0],constantes[1])
plt.title("Phase Space Density $t =$ {:.2f} ut".format(i*dt),fontsize=fsize)
#plt.clim(0,27e5)
cbar = plt.colorbar(format=ticker.FuncFormatter(fmt))
cbar.set_label("Mass density [$M_{\odot}$ / kpc $\\frac{km}{s}$]",fontsize=fsize)
plt.savefig("./images/phase{:d}.png".format(i), dpi = dpII)
plt.clf()
dens = np.loadtxt("./datFiles/density{:d}.dat".format(i))
plt.plot(x,dens)
plt.xticks(plt.xticks()[0], ["{:.2f}".format(t*estUnit) for t in plt.xticks()[0]])
plt.xlabel("Position [kpc]",fontsize=fsize)
plt.ylabel("Linear Density [$M_{\odot}$ / kpc]",fontsize=fsize)
#plt.title("Density $\\tau =$ {:d}".format(TAU),fontsize=fsize)
#plt.title("Density $t =$ {:.2f} ut".format(i*dt),fontsize=fsize)
if(TAU == 0):
plt.title("Density at {:.2f}, $\\tau \\rightarrow \\infty$".format(i*dt),fontsize=fsize)
#plt.ylim(-0.75e9,6.85e10)#Gauss
# plt.ylim(6e10,7e10)#Jeans
plt.ylim(constantes[10]-0.05,constantes[10]+0.05)#Jeans
#plt.xlim(constantes[0]/2,constantes[1]/2)
plt.xlim(constantes[0],constantes[1])
plt.savefig("./images/density{:d}.png".format(i), dpi = dpII)
plt.clf()
# potential = np.loadtxt("./datFiles/potential{:d}.dat".format(i))
# plt.plot(x,potential)
# plt.ylabel("Potential [J /kg]",fontsize=fsize)
# plt.xlim(-1.1,1.1)
# plt.title("Potential $t =$ {:.2f} ut".format(i*dt),fontsize=fsize)
# #plt.title("Potential $\\tau =$ {:d}".format(TAU),fontsize=fsize)
# plt.ylim(-1.5e11,1.1e11)#Gauss
# #plt.ylim(-1.6e11,1.1e11)#Jeans
# plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
# # plt.yticks(plt.yticks()[0], [fmt(t*potUnit,1) for t in plt.yticks()[0]])
# plt.xlabel("Position [kpc]",fontsize=fsize)
# plt.savefig("./images/potential{:d}.png".format(i), dpi = dpII)
# plt.clf()
#
#
# acce = np.loadtxt("./datFiles/acce{:d}.dat".format(i))
# plt.plot(x,acce)
# plt.ylabel("Acceleration [kpc / Gy$^2$]",fontsize=fsize)
# #plt.title("Acceleration $\\tau =-\\infty$",fontsize=fsize)
# plt.ylim(-0.009,0.009)#Gauss
## plt.ylim(-0.009,0.009)#Jeans
# plt.xlim(-1.1,1.1)
# plt.xticks(plt.xticks()[0], [str(t*estUnit) for t in plt.xticks()[0]])
# #plt.yticks(plt.yticks()[0], [fmt(t*acceUnit,1) for t in plt.yticks()[0]])
## plt.ylim(np.min(acce)*1.1,np.max(acce)*1.1)
# plt.title("Acceleration $t =$ {:.2f} ut".format(i*dt),fontsize=fsize)
# plt.xlabel("Position [kpc]",fontsize=fsize)
#
# plt.savefig("./images/acce{:d}.png".format(i), dpi = dpII)
# plt.clf()
f = open('plots', 'w+')
f.close()
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.imshow",
"matplotlib.rcParams.update",
"matplotlib.pyplot.yticks",
"matplotlib.ticker.FuncFormatter",
"numpy.loadtxt",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",... | [((268, 312), 'matplotlib.rcParams.update', 'rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (283, 312), False, 'from matplotlib import rcParams\n'), ((428, 462), 'matplotlib.rcParams.update', 'rcParams.update', (["{'font.size': 11}"], {}), "({'font.size': 11})\n", (443, 462), False, 'from matplotlib import rcParams\n'), ((648, 697), 'numpy.loadtxt', 'np.loadtxt', (['"""./datFiles/constants.dat"""'], {'usecols': '(1)'}), "('./datFiles/constants.dat', usecols=1)\n", (658, 697), True, 'import numpy as np\n'), ((1132, 1141), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1139, 1141), True, 'import matplotlib.pyplot as plt\n'), ((561, 595), 'numpy.loadtxt', 'np.loadtxt', (['"""./datFiles/grid0.dat"""'], {}), "('./datFiles/grid0.dat')\n", (571, 595), True, 'import numpy as np\n'), ((1436, 1539), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dat'], {'extent': '[constantes[0], constantes[1], constantes[2], constantes[3]]', 'aspect': '"""auto"""'}), "(dat, extent=[constantes[0], constantes[1], constantes[2],\n constantes[3]], aspect='auto')\n", (1446, 1539), True, 'import matplotlib.pyplot as plt\n'), ((1800, 1845), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Velocity [km/s]"""'], {'fontsize': 'fsize'}), "('Velocity [km/s]', fontsize=fsize)\n", (1810, 1845), True, 'import matplotlib.pyplot as plt\n'), ((2012, 2056), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position [kpc]"""'], {'fontsize': 'fsize'}), "('Position [kpc]', fontsize=fsize)\n", (2022, 2056), True, 'import matplotlib.pyplot as plt\n'), ((3605, 3614), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3612, 3614), True, 'import matplotlib.pyplot as plt\n'), ((3695, 3712), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'dens'], {}), '(x, dens)\n', (3703, 3712), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3847), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position [kpc]"""'], {'fontsize': 'fsize'}), "('Position [kpc]', fontsize=fsize)\n", (3813, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3851, 3916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Linear Density [$M_{\\\\odot}$ / kpc]"""'], {'fontsize': 'fsize'}), "('Linear Density [$M_{\\\\odot}$ / kpc]', fontsize=fsize)\n", (3861, 3916), True, 'import matplotlib.pyplot as plt\n'), ((4240, 4294), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(constantes[10] - 0.05)', '(constantes[10] + 0.05)'], {}), '(constantes[10] - 0.05, constantes[10] + 0.05)\n', (4248, 4294), True, 'import matplotlib.pyplot as plt\n'), ((4347, 4385), 'matplotlib.pyplot.xlim', 'plt.xlim', (['constantes[0]', 'constantes[1]'], {}), '(constantes[0], constantes[1])\n', (4355, 4385), True, 'import matplotlib.pyplot as plt\n'), ((4455, 4464), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4462, 4464), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1737), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (1735, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1936, 1948), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (1946, 1948), True, 'import matplotlib.pyplot as plt\n'), ((2806, 2852), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(constantes[2] / 2)', '(constantes[3] / 2)'], {}), '(constantes[2] / 2, constantes[3] / 2)\n', (2814, 2852), True, 'import matplotlib.pyplot as plt\n'), ((2856, 2906), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(constantes[0] / 1.5)', '(constantes[1] / 1.5)'], {}), '(constantes[0] / 1.5, constantes[1] / 1.5)\n', (2864, 2906), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3443), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['fmt'], {}), '(fmt)\n', (3438, 3443), True, 'import matplotlib.ticker as ticker\n'), ((3727, 3739), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (3737, 3739), True, 'import matplotlib.pyplot as plt\n'), ((1747, 1768), 'numpy.round', 'np.round', (['(t * velUnit)'], {}), '(t * velUnit)\n', (1755, 1768), True, 'import numpy as np\n'), ((3187, 3233), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(constantes[2] / 2)', '(constantes[3] / 2)'], {}), '(constantes[2] / 2, constantes[3] / 2)\n', (3195, 3233), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3275), 'matplotlib.pyplot.xlim', 'plt.xlim', (['constantes[0]', 'constantes[1]'], {}), '(constantes[0], constantes[1])\n', (3245, 3275), True, 'import matplotlib.pyplot as plt\n'), ((1777, 1789), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (1787, 1789), True, 'import matplotlib.pyplot as plt\n'), ((1990, 2002), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (2000, 2002), True, 'import matplotlib.pyplot as plt\n'), ((3781, 3793), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (3791, 3793), True, 'import matplotlib.pyplot as plt\n')] |
from typing import NamedTuple
from matplotlib import pyplot as plt
import argparse
import re
import os
import numpy as np
# Parse loss from a file, assuming the style of checkpoints/loss_log.txt but with only a single run in it with no headers
def parse_loss(file):
# regex pattern
p = r'\(epoch: (\d+), iters: (\d+), time: (\S+), data: (\S+)\) D_A: (\S+) G_A: (\S+) cycle_A: (\S+) idt_A: (\S+) D_B: (\S+) G_B: (\S+) cycle_B: (\S+) idt_B: (\S+)\s*'
with open(file) as f:
# get a list of lists of all parsed out fields
m = re.findall(p, f.read())
# transpose the list of lists to get them by column
m = np.array(m).T.astype(np.float)
# take each list and put them into a dict with the name as the key
labels = ['epoch', 'iters', 'time', 'data', 'D_A', 'G_A',
'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
lossDict = {k: v for k, v in zip(labels, m)}
return lossDict
# Take Dictionary of Losses from parse_loss and save graph files
def graph_loss(ld, args):
# calculate X-axis value combining epoch and iter
x = ld['epoch'] + ld['iters'] / np.max(ld['iters'])
plt.figure()
plt.title(f'{args.type} Graph')
plt.xlabel(f'Epochs')
plt.ylabel(f'{args.type}ing Loss')
# Graph field we care about
for y in ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']:
plt.plot(x, ld[y], label=y, linewidth=.25)
plt.grid()
plt.legend()
if not os.path.isdir('graphs'):
os.mkdir('graphs')
plt.savefig(f'graphs/{args.outputFile}.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Parse a single run from checkpoints/loss_log.txt')
parser.add_argument('file', type=str, help='input file')
parser.add_argument('--type', default='Train',
help='train or test(graph labeling purposes only')
parser.add_argument('--outputFile', default='train_graph',
type=str, help='output file name(no extension)')
args = parser.parse_args()
lossDict = parse_loss(args.file)
graph_loss(lossDict, args)
| [
"matplotlib.pyplot.title",
"os.mkdir",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"os.path.isdir",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplo... | [((1179, 1191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1189, 1191), True, 'from matplotlib import pyplot as plt\n'), ((1196, 1227), 'matplotlib.pyplot.title', 'plt.title', (['f"""{args.type} Graph"""'], {}), "(f'{args.type} Graph')\n", (1205, 1227), True, 'from matplotlib import pyplot as plt\n'), ((1232, 1253), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Epochs"""'], {}), "(f'Epochs')\n", (1242, 1253), True, 'from matplotlib import pyplot as plt\n'), ((1258, 1292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{args.type}ing Loss"""'], {}), "(f'{args.type}ing Loss')\n", (1268, 1292), True, 'from matplotlib import pyplot as plt\n'), ((1464, 1474), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1472, 1474), True, 'from matplotlib import pyplot as plt\n'), ((1479, 1491), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1489, 1491), True, 'from matplotlib import pyplot as plt\n'), ((1559, 1603), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""graphs/{args.outputFile}.png"""'], {}), "(f'graphs/{args.outputFile}.png')\n", (1570, 1603), True, 'from matplotlib import pyplot as plt\n'), ((1647, 1739), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse a single run from checkpoints/loss_log.txt"""'}), "(description=\n 'Parse a single run from checkpoints/loss_log.txt')\n", (1670, 1739), False, 'import argparse\n'), ((1416, 1459), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ld[y]'], {'label': 'y', 'linewidth': '(0.25)'}), '(x, ld[y], label=y, linewidth=0.25)\n', (1424, 1459), True, 'from matplotlib import pyplot as plt\n'), ((1503, 1526), 'os.path.isdir', 'os.path.isdir', (['"""graphs"""'], {}), "('graphs')\n", (1516, 1526), False, 'import os\n'), ((1536, 1554), 'os.mkdir', 'os.mkdir', (['"""graphs"""'], {}), "('graphs')\n", (1544, 1554), False, 'import os\n'), ((1154, 1173), 'numpy.max', 'np.max', (["ld['iters']"], {}), "(ld['iters'])\n", (1160, 1173), True, 'import numpy as np\n'), ((649, 660), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (657, 660), True, 'import numpy as np\n')] |
import numpy as np
import socket
from base64 import b64encode as b64e, b64decode as b64d
from zlib import compress, decompress
q = 2**11
n = 280
n_bar = 4
m_bar = 4
def get_E_from_ABSq(A, B, S, q):
return np.mod(np.mod((B- A*S), q) + 1, q) - 1
def tryUCkb(U, C, kb):
# vérifie si self.__decode(np.mod(C - np.dot(U, self.__S_a), self.q)) == kb
pass
## partie tests:
S_a = np.matrix(np.random.randint(-1, 2, size = (280, 4))).astype('int64')
A = np.matrix(np.random.randint( 0, 2**11, size = (280, 280))).astype('int64')
E_a = np.matrix(np.random.randint(-1, 2, size = (280, 4))).astype('int64')
B = np.mod(A * S_a + E_a, 2**11).astype('int64')
U = np.matrix(np.random.randint(0, q, size = (4, 280))).astype('int64')
C = np.matrix(np.random.randint(0, 1, size = (4, 4))).astype('int64')
b = np.matrix(np.random.randint(-1, 2, size = (4, 4))).astype('int64')
def __decode(mat):
def recenter(x):
if x > q // 2:
return x - q
else:
return x
def mult_and_round(x):
return round((x / (q / 4)))
out = np.vectorize(recenter)(mat)
out = np.vectorize(mult_and_round)(out)
return out
def __decaps(U, C):
key_a = __decode(np.mod(C - np.dot(U, S_a), q))
return key_a
## Partie réseau
class Netcat:
""" Python 'netcat like' module """
def __init__(self, ip, port):
self.buff = b""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((ip, port))
def read(self, length = 1024):
""" Read 1024 bytes off the socket """
return self.socket.recv(length)
def read_until(self, data):
""" Read data into the buffer until we have data """
while not data in self.buff:
self.buff += self.socket.recv(1024)
pos = self.buff.find(data)
rval = self.buff[:pos + len(data)]
self.buff = self.buff[pos + len(data):]
return rval
def write(self, data):
self.socket.send(data)
def close(self):
self.socket.close()
nc = Netcat('challenges1.france-cybersecurity-challenge.fr', 2002)
def recupererAB():
raw = nc.read_until(b"Possible actions:").decode()
#nc.read()
raw = raw.split("\n")
Ab64 = raw[1][4:]
Bb64 = raw[2][4:]
A = np.reshape(np.frombuffer(decompress(b64d(Ab64)), dtype = np.int64), (280, 280))
B = np.reshape(np.frombuffer(decompress(b64d(Bb64)), dtype = np.int64), (280, 4))
return A, B
def testKeys(U, C, keyB):
raw = nc.read_until(b">>> ")
nc.write(b'1\n')
nc.read_until(b'U = ')
nc.write(b64e(compress(U.tobytes())) + b'\n')
nc.read_until(b'C = ')
nc.write(b64e(compress(C.tobytes())) + b'\n')
nc.read_until(b'key_b = ')
nc.write(b64e(compress(keyB.tobytes())) + b'\n')
ret = nc.read_until(b'\n').decode()
return 'Success' in ret
def sendKeys(S, E):
raw = nc.read_until(b">>> ")
nc.write(b'2\n')
nc.read_until(b'S_a = ')
nc.write(b64e(compress(S.tobytes())) + b'\n')
nc.read_until(b'E_a = ')
nc.write(b64e(compress(E.tobytes())) + b'\n')
print(nc.read())
## implementation
A, B = recupererAB()
reconstitution_Sa = [[0,0,0,0] for i in range(n)]
C = [[0 for _ in range(n_bar)] for __ in range(n_bar)]
C[0] = [q/4, q/4, q/4, q/4]
C = np.matrix(C).astype('int64')
possibilites_Sa = [-1, 0, 1]
possibilitesLignes = []
for a in possibilites_Sa:
for b in possibilites_Sa:
for c in possibilites_Sa:
for d in possibilites_Sa:
l = [a,b,c,d]
if l.count(0) == 2 and l.count(1) == 1 and l.count(-1) == 1:
possibilitesLignes.append([a,b,c,d])
for i in range(n):
trouve_ligne = False
U = [[0 for _ in range(n)] for __ in range(n_bar)]
U[0][i] = -q/4
U = np.matrix(U).astype('int64')
for j in range(len(possibilitesLignes)):
a,b,c,d = possibilitesLignes[j]
keyB = [[0 for _ in range(n_bar)] for __ in range(n_bar)]
keyB[0] = [-a+1, -b+1, -c+1, -d+1]
keyB = np.matrix(keyB).astype('int64')
res = testKeys(U, C, keyB)
if res:
trouve_ligne = True
reconstitution_Sa[i] = [-a, -b, -c, -d]
break
if not trouve_ligne:
print("Echec ligne" + str(i))
else:
if i%10 == 0:
print(i)
print("Fini de dump S")
reconstitution_Sa = np.matrix(reconstitution_Sa).astype('int64')
E = get_E_from_ABSq(A, B, reconstitution_Sa, q)
sendKeys(reconstitution_Sa, E) | [
"numpy.matrix",
"numpy.vectorize",
"socket.socket",
"numpy.mod",
"base64.b64decode",
"numpy.random.randint",
"numpy.dot"
] | [((623, 653), 'numpy.mod', 'np.mod', (['(A * S_a + E_a)', '(2 ** 11)'], {}), '(A * S_a + E_a, 2 ** 11)\n', (629, 653), True, 'import numpy as np\n'), ((1082, 1104), 'numpy.vectorize', 'np.vectorize', (['recenter'], {}), '(recenter)\n', (1094, 1104), True, 'import numpy as np\n'), ((1120, 1148), 'numpy.vectorize', 'np.vectorize', (['mult_and_round'], {}), '(mult_and_round)\n', (1132, 1148), True, 'import numpy as np\n'), ((1425, 1474), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1438, 1474), False, 'import socket\n'), ((3335, 3347), 'numpy.matrix', 'np.matrix', (['C'], {}), '(C)\n', (3344, 3347), True, 'import numpy as np\n'), ((4441, 4469), 'numpy.matrix', 'np.matrix', (['reconstitution_Sa'], {}), '(reconstitution_Sa)\n', (4450, 4469), True, 'import numpy as np\n'), ((402, 441), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {'size': '(280, 4)'}), '(-1, 2, size=(280, 4))\n', (419, 441), True, 'import numpy as np\n'), ((475, 521), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 11)'], {'size': '(280, 280)'}), '(0, 2 ** 11, size=(280, 280))\n', (492, 521), True, 'import numpy as np\n'), ((556, 595), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {'size': '(280, 4)'}), '(-1, 2, size=(280, 4))\n', (573, 595), True, 'import numpy as np\n'), ((683, 721), 'numpy.random.randint', 'np.random.randint', (['(0)', 'q'], {'size': '(4, 280)'}), '(0, q, size=(4, 280))\n', (700, 721), True, 'import numpy as np\n'), ((755, 791), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1)'], {'size': '(4, 4)'}), '(0, 1, size=(4, 4))\n', (772, 791), True, 'import numpy as np\n'), ((825, 862), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {'size': '(4, 4)'}), '(-1, 2, size=(4, 4))\n', (842, 862), True, 'import numpy as np\n'), ((3838, 3850), 'numpy.matrix', 'np.matrix', (['U'], {}), '(U)\n', (3847, 3850), True, 'import numpy as np\n'), ((222, 242), 'numpy.mod', 'np.mod', (['(B - A * S)', 'q'], {}), '(B - A * S, q)\n', (228, 242), True, 'import numpy as np\n'), ((1222, 1236), 'numpy.dot', 'np.dot', (['U', 'S_a'], {}), '(U, S_a)\n', (1228, 1236), True, 'import numpy as np\n'), ((2361, 2371), 'base64.b64decode', 'b64d', (['Ab64'], {}), '(Ab64)\n', (2365, 2371), True, 'from base64 import b64encode as b64e, b64decode as b64d\n'), ((2449, 2459), 'base64.b64decode', 'b64d', (['Bb64'], {}), '(Bb64)\n', (2453, 2459), True, 'from base64 import b64encode as b64e, b64decode as b64d\n'), ((4076, 4091), 'numpy.matrix', 'np.matrix', (['keyB'], {}), '(keyB)\n', (4085, 4091), True, 'import numpy as np\n')] |
# This file is part of the bapsflib package, a Python toolkit for the
# BaPSF group at UCLA.
#
# http://plasma.physics.ucla.edu/
#
# Copyright 2017-2018 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "LICENSES/LICENSE.txt" for full
# license terms and contributor agreement.
#
"""
Helper functions that are utilized by the the HDF5 utility classes
defined in module :mod:`bapsflib._hdf.utils`.
"""
__all__ = [
"build_shotnum_dset_relation",
"build_sndr_for_simple_dset",
"build_sndr_for_complex_dset",
"condition_controls",
"condition_shotnum",
"do_shotnum_intersection",
]
import h5py
import numpy as np
from typing import Any, Dict, Iterable, List, Tuple, Union
from bapsflib._hdf.maps.controls.templates import (
HDFMapControlCLTemplate,
HDFMapControlTemplate,
)
from .file import File
# define type aliases
ControlMap = Union[HDFMapControlTemplate, HDFMapControlCLTemplate]
IndexDict = Dict[str, np.ndarray]
def build_shotnum_dset_relation(
shotnum: np.ndarray,
dset: h5py.Dataset,
shotnumkey: str,
cmap: ControlMap,
cconfn: Any,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compares the **shotnum** numpy array to the specified dataset,
**dset**, to determine which indices contain the desired shot
number(s)
[for :class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls`].
As a results, two numpy arrays are returned which satisfy the rule::
shotnum[sni] = dset[index, shotnumkey]
where **shotnum** is the original shot number array, **sni** is a
boolean numpy array masking which shot numbers were determined to
be in the dataset, and **index** is an array of indices
corresponding to the desired shot number(s).
:param shotnum: desired HDF5 shot number(s)
:param dset: control device dataset
:type dset: :class:`h5py.Dataset`
:param str shotnumkey: field name in the control device dataset that
contains shot numbers
:param cmap: mapping object for control device
:param cconfn: configuration name for the control device
:return: :code:`index` and :code:`sni` numpy arrays
.. note::
This function leverages the functions
:func:`~.helpers.build_sndr_for_simple_dset`
and
:func:`~.helpers.build_sndr_for_complex_dset`
"""
# Inputs:
# shotnum - the desired shot number(s)
# dset - the control device dataset
# shotnumkey - field name for the shot number column in dset
# cmap - file mapping object for the control device
# cconfn - configuration for control device
#
# Returns:
# index np.array(dtype=uint32) - dset row index for the
# specified shotnum
# shotnum np.array(dtype=uint32) - shot numbers
# sni np.array(dtype=bool) - shotnum mask such that:
#
# shotnum[sni] = dset[index, shotnumkey]
#
# Calc. index, shotnum, and sni
if cmap.one_config_per_dset:
# the dataset only saves data for one configuration
index, sni = build_sndr_for_simple_dset(shotnum, dset, shotnumkey)
else:
# the dataset saves data for multiple configurations
index, sni = build_sndr_for_complex_dset(shotnum, dset, shotnumkey, cmap, cconfn)
# return calculated arrays
return index.view(), sni.view()
def build_sndr_for_simple_dset(
shotnum: np.ndarray, dset: h5py.Dataset, shotnumkey: str
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compares the **shotnum** numpy array to the specified "simple"
dataset, **dset**, to determine which indices contain the desired
shot number(s). As a results, two numpy arrays are returned which
satisfy the rule::
shotnum[sni] = dset[index, shotnumkey]
where **shotnum** is the original shot number array, **sni** is a
boolean numpy array masking which shot numbers were determined to
be in the dataset, and **index** is an array of indices
corresponding to the desired shot number(s).
A "simple" dataset is a dataset in which the data for only ONE
configuration is recorded.
:param shotnum: desired HDF5 shot number
:param dset: dataset containing shot numbers
:type dset: :class:`h5py.Dataset`
:param str shotnumkey: field name in the dataset that contains
the shot numbers
:return: :code:`index` and :code:`sni` numpy arrays
"""
# this is for a dataset that only records data for one configuration
#
# get corresponding indices for shotnum
# build associated sni array
#
if dset.shape[0] == 1:
# only one possible shot number
only_sn = dset[0, shotnumkey]
sni = np.where(shotnum == only_sn, True, False)
index = np.array([0]) if True in sni else np.empty(shape=0, dtype=np.uint32)
else:
# get 1st and last shot number
first_sn = dset[0, shotnumkey]
last_sn = dset[-1, shotnumkey]
if last_sn - first_sn + 1 == dset.shape[0]:
# shot numbers are sequential
index = shotnum - first_sn
# build sni and filter index
sni = np.where(index < dset.shape[0], True, False)
index = index[sni]
else:
# shot numbers are NOT sequential
step_front_read = shotnum[-1] - first_sn
step_end_read = last_sn - shotnum[0]
if dset.shape[0] <= 1 + min(step_front_read, step_end_read):
# dset.shape is smaller than the theoretical reads from
# either end of the array
#
dset_sn = dset[shotnumkey].view()
sni = np.isin(shotnum, dset_sn)
# define index
index = np.where(np.isin(dset_sn, shotnum))[0]
elif step_front_read <= step_end_read:
# extracting from the beginning of the array is the
# smallest
some_dset_sn = dset[0 : step_front_read + 1, shotnumkey]
sni = np.isin(shotnum, some_dset_sn)
# define index
index = np.where(np.isin(some_dset_sn, shotnum))[0]
else:
# extracting from the end of the array is the smallest
start, stop, step = slice(
-step_end_read.astype(np.int32) - 1, None, None
).indices(dset.shape[0])
some_dset_sn = dset[start::, shotnumkey]
sni = np.isin(shotnum, some_dset_sn)
# define index
# NOTE: if index is empty (i.e. index.shape[0] == 0)
# then adding an int still returns an empty array
index = np.where(np.isin(some_dset_sn, shotnum))[0]
index += start
# return calculated arrays
return index.view(), sni.view()
def build_sndr_for_complex_dset(
shotnum: np.ndarray,
dset: h5py.Dataset,
shotnumkey: str,
cmap: ControlMap,
cconfn: Any,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compares the **shotnum** numpy array to the specified "complex"
dataset, **dset**, to determine which indices contain the desired
shot number(s). As a results, two numpy arrays are returned
which satisfy the rule::
shotnum[sni] = dset[index, shotnumkey]
where **shotnum** is the original shot number array, **sni** is a
boolean numpy array masking which shot numbers were determined to
be in the dataset, and **index** is an array of indices
corresponding to the desired shot number(s).
A "complex" dataset is a dataset in which the data for MULTIPLE
configurations is recorded.
.. admonition:: Dataset Assumption
There is an assumption that each shot number spans **n_configs**
number of rows in the dataset, where **n_configs** is the number
of control device configurations. It is also assumed that the
order in which the configs are recorded is the same for each
shot number. That is, if there are 3 configs (config01,
config02, and config03) and the first three rows of the dataset
are recorded in that order, then each following grouping of
three rows will maintain that order.
:param shotnum: desired HDF5 shot number
:param dset: dataset containing shot numbers
:type dset: :class:`h5py.Dataset`
:param str shotnumkey: field name in the dataset that contains
the shot numbers
:param cmap: mapping object for control device
:param cconfn: configuration name for the control device
:return: :code:`index` and :code:`sni` numpy arrays
"""
# this is for a dataset that records data for multiple
# configurations
#
# Initialize some vals
n_configs = len(cmap.configs)
# determine configkey
# - configkey is the dataset field name for the column that contains
# the associated configuration name
#
configkey = ""
for df in dset.dtype.names:
if "configuration" in df.casefold():
configkey = df
break
if configkey == "":
raise ValueError(
f"Can NOT find a configuration field in the control "
f"({cmap.device_name}) dataset"
)
# find index
if dset.shape[0] == n_configs:
# only one possible shotnum, index can be 0 to n_configs-1
#
# NOTE: The HDF5 configuration field stores a string with the
# name of the configuration. When reading that into a
# numpy array the string becomes a byte string (i.e. b'').
# When comparing with np.where() the comparing string
# needs to be encoded (i.e. cconfn.encode()).
#
only_sn = dset[0, shotnumkey]
sni = np.where(shotnum == only_sn, True, False)
# construct index
if True not in sni:
# shotnum does not contain only_sn
index = np.empty(shape=0, dtype=np.uint32)
else:
config_name_arr = dset[0:n_configs, configkey]
index = np.where(config_name_arr == cconfn.encode())[0]
if index.size != 1: # pragma: no cover
# something went wrong...no configurations are found
# and, thus, the routines assumption's do not match
# the format of the dataset
raise ValueError(
"The specified dataset is NOT consistent with the"
"routines assumptions of a complex dataset"
)
else:
# get 1st and last shot number
first_sn = dset[0, shotnumkey]
last_sn = dset[-1, shotnumkey]
# find sub-group index corresponding to the requested device
# configuration
config_name_arr = dset[0:n_configs, configkey]
config_where = np.where(config_name_arr == cconfn.encode())[0]
if config_where.size == 1:
config_subindex = config_where[0]
else: # pragma: no cover
# something went wrong...either no configurations
# are found or the routine's assumptions do not
# match the format of the dataset
raise ValueError(
"The specified dataset is NOT consistent with the"
"routines assumptions of a complex dataset"
)
# construct index for remaining scenarios
if n_configs * (last_sn - first_sn + 1) == dset.shape[0]:
# shot numbers are sequential and there are n_configs per
# shot number
index = shotnum - first_sn
# adjust index to correspond to associated configuration
# - stretch by n_configs then shift by config_subindex
#
index = (n_configs * index) + config_subindex
# build sni and filter index
sni = np.where(index < dset.shape[0], True, False)
index = index[sni]
else:
# shot numbers are NOT sequential
step_front_read = shotnum[-1] - first_sn
step_end_read = last_sn - shotnum[0]
# construct index and sni
if dset.shape[0] <= n_configs * (min(step_front_read, step_end_read) + 1):
# dset.shape is smaller than the theoretical
# sequential array
dset_sn = dset[config_subindex::n_configs, shotnumkey]
sni = np.isin(shotnum, dset_sn)
index = np.where(np.isin(dset_sn, shotnum))[0]
# adjust index to correspond to associated configuration
index = (index * n_configs) + config_subindex
elif step_front_read <= step_end_read:
# extracting from the beginning of the array is the
# smallest
start = config_subindex
stop = n_configs * (step_front_read + 1)
stop += config_subindex
step = n_configs
some_dset_sn = dset[start:stop:step, shotnumkey]
sni = np.isin(shotnum, some_dset_sn)
index = np.where(np.isin(some_dset_sn, shotnum))[0]
# adjust index to correspond to associated configuration
index = (index * n_configs) + config_subindex
else:
# extracting from the end of the array is the
# smallest
start, stop, step = slice(
-n_configs * (step_end_read + 1), None, n_configs
).indices(dset.shape[0])
start += config_subindex
some_dset_sn = dset[start:stop:step, shotnumkey]
sni = np.isin(shotnum, some_dset_sn)
index = np.where(np.isin(some_dset_sn, shotnum))[0]
# adjust index to correspond to associated configuration
index = (index * n_configs) + start
# return calculated arrays
return index.view(), sni.view()
def condition_controls(hdf_file: File, controls: Any) -> List[Tuple[str, Any]]:
"""
Conditions the **controls** argument for
:class:`~.hdfreadcontrols.HDFReadControls` and
:class:`~.hdfreaddata.HDFReadData`.
:param hdf_file: HDF5 object instance
:param controls: `controls` argument to be conditioned
:return: list containing tuple pairs of control device name and
desired configuration name
:Example:
>>> from bapsflib import lapd
>>> f = lapd.File('sample.hdf5')
>>> controls = ['Wavefrom', ('6K Compumotor', 3)]
>>> conditioned_controls = condition_controls(f, controls)
>>> conditioned_controls
[('Waveform', 'config01'), ('6K Compumotor', 3)]
.. admonition:: Condition Criteria
#. Input **controls** should be
:code:`Union[str, Iterable[Union[str, Tuple[str, Any]]]]`
#. There can only be one control for each
:class:`~bapsflib._hdf.maps.controls.types.ConType`.
#. If a control has multiple configurations, then one must be
specified.
#. If a control has ONLY ONE configuration, then that will be
assumed (and checked against the specified configuration).
"""
# grab instance of file mapping
_fmap = hdf_file.file_map
# -- condition 'controls' argument ----
# - controls is:
# 1. a string or Iterable
# 2. each element is either a string or tuple
# 3. if tuple, then length <= 2
# ('control name',) or ('control_name', config_name)
#
# check if NULL
if not bool(controls):
# catch a null controls
raise ValueError("controls argument is NULL")
# make string a list
if isinstance(controls, str):
controls = [controls]
# condition Iterable
if isinstance(controls, Iterable):
# all list items have to be strings or tuples
if not all(isinstance(con, (str, tuple)) for con in controls):
raise TypeError("all elements of `controls` must be of type string or tuple")
# condition controls
new_controls = []
for control in controls:
if isinstance(control, str):
name = control
config_name = None
else:
# tuple case
if len(control) > 2:
raise ValueError(
"a `controls` tuple element must be specified "
"as ('control name') or, "
"('control name', config_name)"
)
name = control[0]
config_name = None if len(control) == 1 else control[1]
# ensure proper control and configuration name are defined
if name in [cc[0] for cc in new_controls]:
raise ValueError(
f"Control device ({control}) can only have one occurrence in controls"
)
elif name in _fmap.controls:
if config_name in _fmap.controls[name].configs:
# all is good
pass
elif len(_fmap.controls[name].configs) == 1 and config_name is None:
config_name = list(_fmap.controls[name].configs)[0]
else:
raise ValueError(
f"'{config_name}' is not a valid configuration name for "
f"control device '{name}'"
)
else:
raise ValueError(f"Control device ({name}) not in HDF5 file")
# add control to new_controls
new_controls.append((name, config_name))
else:
raise TypeError("`controls` argument is not Iterable")
# re-assign `controls`
controls = new_controls
# enforce one control per contype
checked = []
for control in controls:
# control is a tuple, not a string
contype = _fmap.controls[control[0]].contype
if contype in checked:
raise TypeError("`controls` has multiple devices per contype")
else:
checked.append(contype)
# return conditioned list
return controls
def condition_shotnum(
shotnum: Any, dset_dict: Dict[str, h5py.Dataset], shotnumkey_dict: Dict[str, str]
) -> np.ndarray:
"""
Conditions the **shotnum** argument for
:class:`~bapsflib._hdf.utils.hdfreadcontrols.HDFReadControls` and
:class:`~bapsflib._hdf.utils.hdfreaddata.HDFReadData`.
:param shotnum: desired HDF5 shot numbers
:param dset_dict: dictionary of all control dataset instances
:param shotnumkey_dict: dictionary of the shot number field name
for each control dataset in dset_dict
:return: conditioned **shotnum** numpy array
.. admonition:: Condition Criteria
#. Input **shotnum** should be
:code:`Union[int, List[int,...], slice, np.ndarray]`
#. Any :math:`\mathbf{shotnum} \le 0` will be removed.
#. A :code:`ValueError` will be thrown if the conditioned array
is NULL.
"""
# Acceptable `shotnum` types
# 1. int
# 2. slice() object
# 3. List[int, ...]
# 4. np.array (dtype = np.integer and ndim = 1)
#
# Catch each `shotnum` type and convert to numpy array
#
if isinstance(shotnum, int):
if shotnum <= 0 or isinstance(shotnum, bool):
raise ValueError(
f"Valid `shotnum` ({shotnum}) not passed. Resulting array would be NULL."
)
# convert
shotnum = np.array([shotnum], dtype=np.uint32)
elif isinstance(shotnum, list):
# ensure all elements are int
if not all(isinstance(sn, int) for sn in shotnum):
raise ValueError("Valid `shotnum` not passed. All values NOT int.")
# remove shot numbers <= 0
shotnum.sort()
shotnum = list(set(shotnum))
shotnum.sort()
if min(shotnum) <= 0:
# remove values less-than or equal to 0
new_sn = [sn for sn in shotnum if sn > 0]
shotnum = new_sn
# ensure not NULL
if len(shotnum) == 0:
raise ValueError("Valid `shotnum` not passed. Resulting array would be NULL")
# convert
shotnum = np.array(shotnum, dtype=np.uint32)
elif isinstance(shotnum, slice):
# determine largest possible shot number
last_sn = [
dset_dict[cname][-1, shotnumkey_dict[cname]] + 1 for cname in dset_dict
]
if shotnum.stop is not None:
last_sn.append(shotnum.stop)
stop_sn = max(last_sn)
# get the start, stop, and step for the shot number array
start, stop, step = shotnum.indices(stop_sn)
# re-define `shotnum`
shotnum = np.arange(start, stop, step, dtype=np.int32)
# remove shot numbers <= 0
shotnum = np.delete(shotnum, np.where(shotnum <= 0)[0])
shotnum = shotnum.astype(np.uint32)
# ensure not NULL
if shotnum.size == 0:
raise ValueError("Valid `shotnum` not passed. Resulting array would be NULL")
elif isinstance(shotnum, np.ndarray):
if shotnum.ndim != 1:
shotnum = shotnum.squeeze()
if (
shotnum.ndim != 1
or not np.issubdtype(shotnum.dtype, np.integer)
or bool(shotnum.dtype.names)
):
raise ValueError("Valid `shotnum` not passed")
# remove shot numbers <= 0
shotnum.sort()
shotnum = np.delete(shotnum, np.where(shotnum <= 0)[0])
shotnum = shotnum.astype(np.uint32)
# ensure not NULL
if shotnum.size == 0:
raise ValueError("Valid `shotnum` not passed. Resulting array would be NULL")
else:
raise ValueError("Valid `shotnum` not passed")
# return
return shotnum
def do_shotnum_intersection(
shotnum: np.ndarray, sni_dict: IndexDict, index_dict: IndexDict
) -> Tuple[np.ndarray, IndexDict, IndexDict]:
"""
Calculates intersection of **shotnum** and all existing dataset
shot numbers, **shotnum[sni]**.
:param shotnum: desired HDF5 shot numbers
:param sni_dict: dictionary of all dataset **sni** arrays
:param index_dict: dictionary of all dataset **index** arrays
:return: intersected and re-calculated versions of :code:`index`
and :code:`sni` numpy arrays
.. admonition:: Recall Array Relationship
.. code-block:: python
shotnum[sni] = dset[index, shotnumkey]
"""
# intersect shot numbers
shotnum_intersect = shotnum
for sni in sni_dict.values():
shotnum_intersect = np.intersect1d(
shotnum_intersect, shotnum[sni], assume_unique=True
)
if shotnum_intersect.shape[0] == 0:
raise ValueError("Input `shotnum` would result in a NULL array")
# now filter
for cname in index_dict:
sni = sni_dict[cname]
mask_for_index = np.isin(shotnum[sni], shotnum_intersect)
index_dict[cname] = index_dict[cname][mask_for_index]
sni_dict[cname] = np.ones(shotnum_intersect.shape, dtype=bool)
# update shotnum
shotnum = shotnum_intersect
# return
return shotnum, sni_dict, index_dict
| [
"numpy.isin",
"numpy.empty",
"numpy.ones",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.intersect1d",
"numpy.issubdtype"
] | [((4734, 4775), 'numpy.where', 'np.where', (['(shotnum == only_sn)', '(True)', '(False)'], {}), '(shotnum == only_sn, True, False)\n', (4742, 4775), True, 'import numpy as np\n'), ((9817, 9858), 'numpy.where', 'np.where', (['(shotnum == only_sn)', '(True)', '(False)'], {}), '(shotnum == only_sn, True, False)\n', (9825, 9858), True, 'import numpy as np\n'), ((19607, 19643), 'numpy.array', 'np.array', (['[shotnum]'], {'dtype': 'np.uint32'}), '([shotnum], dtype=np.uint32)\n', (19615, 19643), True, 'import numpy as np\n'), ((22716, 22783), 'numpy.intersect1d', 'np.intersect1d', (['shotnum_intersect', 'shotnum[sni]'], {'assume_unique': '(True)'}), '(shotnum_intersect, shotnum[sni], assume_unique=True)\n', (22730, 22783), True, 'import numpy as np\n'), ((23021, 23061), 'numpy.isin', 'np.isin', (['shotnum[sni]', 'shotnum_intersect'], {}), '(shotnum[sni], shotnum_intersect)\n', (23028, 23061), True, 'import numpy as np\n'), ((23150, 23194), 'numpy.ones', 'np.ones', (['shotnum_intersect.shape'], {'dtype': 'bool'}), '(shotnum_intersect.shape, dtype=bool)\n', (23157, 23194), True, 'import numpy as np\n'), ((4792, 4805), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4800, 4805), True, 'import numpy as np\n'), ((4826, 4860), 'numpy.empty', 'np.empty', ([], {'shape': '(0)', 'dtype': 'np.uint32'}), '(shape=0, dtype=np.uint32)\n', (4834, 4860), True, 'import numpy as np\n'), ((5182, 5226), 'numpy.where', 'np.where', (['(index < dset.shape[0])', '(True)', '(False)'], {}), '(index < dset.shape[0], True, False)\n', (5190, 5226), True, 'import numpy as np\n'), ((9981, 10015), 'numpy.empty', 'np.empty', ([], {'shape': '(0)', 'dtype': 'np.uint32'}), '(shape=0, dtype=np.uint32)\n', (9989, 10015), True, 'import numpy as np\n'), ((11900, 11944), 'numpy.where', 'np.where', (['(index < dset.shape[0])', '(True)', '(False)'], {}), '(index < dset.shape[0], True, False)\n', (11908, 11944), True, 'import numpy as np\n'), ((20326, 20360), 'numpy.array', 'np.array', (['shotnum'], {'dtype': 'np.uint32'}), '(shotnum, dtype=np.uint32)\n', (20334, 20360), True, 'import numpy as np\n'), ((5698, 5723), 'numpy.isin', 'np.isin', (['shotnum', 'dset_sn'], {}), '(shotnum, dset_sn)\n', (5705, 5723), True, 'import numpy as np\n'), ((12453, 12478), 'numpy.isin', 'np.isin', (['shotnum', 'dset_sn'], {}), '(shotnum, dset_sn)\n', (12460, 12478), True, 'import numpy as np\n'), ((20840, 20884), 'numpy.arange', 'np.arange', (['start', 'stop', 'step'], {'dtype': 'np.int32'}), '(start, stop, step, dtype=np.int32)\n', (20849, 20884), True, 'import numpy as np\n'), ((6060, 6090), 'numpy.isin', 'np.isin', (['shotnum', 'some_dset_sn'], {}), '(shotnum, some_dset_sn)\n', (6067, 6090), True, 'import numpy as np\n'), ((6511, 6541), 'numpy.isin', 'np.isin', (['shotnum', 'some_dset_sn'], {}), '(shotnum, some_dset_sn)\n', (6518, 6541), True, 'import numpy as np\n'), ((13081, 13111), 'numpy.isin', 'np.isin', (['shotnum', 'some_dset_sn'], {}), '(shotnum, some_dset_sn)\n', (13088, 13111), True, 'import numpy as np\n'), ((13705, 13735), 'numpy.isin', 'np.isin', (['shotnum', 'some_dset_sn'], {}), '(shotnum, some_dset_sn)\n', (13712, 13735), True, 'import numpy as np\n'), ((5789, 5814), 'numpy.isin', 'np.isin', (['dset_sn', 'shotnum'], {}), '(dset_sn, shotnum)\n', (5796, 5814), True, 'import numpy as np\n'), ((12512, 12537), 'numpy.isin', 'np.isin', (['dset_sn', 'shotnum'], {}), '(dset_sn, shotnum)\n', (12519, 12537), True, 'import numpy as np\n'), ((20958, 20980), 'numpy.where', 'np.where', (['(shotnum <= 0)'], {}), '(shotnum <= 0)\n', (20966, 20980), True, 'import numpy as np\n'), ((6156, 6186), 'numpy.isin', 'np.isin', (['some_dset_sn', 'shotnum'], {}), '(some_dset_sn, shotnum)\n', (6163, 6186), True, 'import numpy as np\n'), ((6748, 6778), 'numpy.isin', 'np.isin', (['some_dset_sn', 'shotnum'], {}), '(some_dset_sn, shotnum)\n', (6755, 6778), True, 'import numpy as np\n'), ((13145, 13175), 'numpy.isin', 'np.isin', (['some_dset_sn', 'shotnum'], {}), '(some_dset_sn, shotnum)\n', (13152, 13175), True, 'import numpy as np\n'), ((13769, 13799), 'numpy.isin', 'np.isin', (['some_dset_sn', 'shotnum'], {}), '(some_dset_sn, shotnum)\n', (13776, 13799), True, 'import numpy as np\n'), ((21351, 21391), 'numpy.issubdtype', 'np.issubdtype', (['shotnum.dtype', 'np.integer'], {}), '(shotnum.dtype, np.integer)\n', (21364, 21391), True, 'import numpy as np\n'), ((21599, 21621), 'numpy.where', 'np.where', (['(shotnum <= 0)'], {}), '(shotnum <= 0)\n', (21607, 21621), True, 'import numpy as np\n')] |
import collections
import numpy as np
import os
import sys
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
import abi.core.rnn_utils as rnn_utils
import abi.misc.tf_utils as tf_utils
import abi.misc.utils as utils
class RNNVAE(object):
def __init__(
self,
max_len,
obs_dim,
act_dim,
batch_size,
dropout_keep_prob=1.,
enc_hidden_dim=64,
z_dim=32,
dec_hidden_dim=64,
kl_initial=0.0,
kl_final=1.0,
kl_steps=10000,
kl_loss_min=.2,
learning_rate=5e-4,
grad_clip=1.,
tile_z=True,
n_encoding_batches=2):
self.max_len = max_len
self.obs_dim = obs_dim
self.act_dim = act_dim
self.batch_size = batch_size
self.dropout_keep_prob = dropout_keep_prob
self.enc_hidden_dim = enc_hidden_dim
self.z_dim = z_dim
self.dec_hidden_dim = dec_hidden_dim
self.kl_initial = kl_initial
self.kl_final = kl_final
self.kl_steps = kl_steps
self.kl_loss_min = kl_loss_min
self.learning_rate = learning_rate
self.grad_clip = grad_clip
self.tile_z = tile_z
self.n_encoding_batches = n_encoding_batches
self._build_model()
def _build_model(self):
self._build_placeholders()
self._build_perception()
self._build_encoder()
self._build_decoder()
self._build_loss()
self._build_train_op()
self._build_summary_op()
def _build_placeholders(self):
self.obs = tf.placeholder(tf.float32, (self.batch_size, self.max_len, self.obs_dim), 'obs')
self.act = tf.placeholder(tf.float32, (self.batch_size, self.max_len, self.act_dim), 'act')
self.lengths = tf.placeholder(tf.int32, (self.batch_size,), 'lengths')
self.sequence_mask = tf.sequence_mask(self.lengths, maxlen=self.max_len, dtype=tf.float32)
self.dropout_keep_prob_ph = tf.placeholder_with_default(self.dropout_keep_prob, (), 'dropout_keep_prob')
self.global_step = tf.Variable(0, trainable=False, name='global_step')
def _build_perception(self):
self.enc_inputs = tf.concat((self.obs, self.act), axis=-1)
self.dec_inputs = self.obs
def _build_encoder(self):
self.enc_cell_fw = rnn_utils._build_recurrent_cell(self.enc_hidden_dim, self.dropout_keep_prob_ph)
self.enc_cell_bw = rnn_utils._build_recurrent_cell(self.enc_hidden_dim, self.dropout_keep_prob_ph)
# inputs is assumed to be padded at the start with a <start> token or state
# in the case of continuous values, probably just zeros
# so for _encoding_ we ignore this padding
outputs, states = tf.nn.bidirectional_dynamic_rnn(
self.enc_cell_fw,
self.enc_cell_bw,
inputs=self.enc_inputs,
sequence_length=self.lengths,
dtype=tf.float32,
time_major=False
)
# since the inputs are zero-padded, we can't just use the outputs
# because the invalid timesteps will be zeros
# instead we manually extract the last hidden states for each sample
# in the batch and use those to define the posterior
hidden_fw = self.enc_cell_fw.get_output(states[0])
hidden_bw = self.enc_cell_bw.get_output(states[1])
hidden = tf.concat((hidden_fw, hidden_bw), axis=1)
# output the parameters of a diagonal gaussian from which to sample
# the z value
self.z_mean = tf.contrib.layers.fully_connected(
hidden,
self.z_dim,
activation_fn=None
)
self.z_logvar = tf.contrib.layers.fully_connected(
hidden,
self.z_dim,
activation_fn=None
)
self.z_sigma = tf.exp(self.z_logvar / 2.)
# sample z
noise = tf.random_normal((self.batch_size, self.z_dim), 0.0, 1.0)
self.z = self.z_mean + self.z_sigma * noise
def _build_decoder(self):
self.dec_cell = rnn_utils._build_recurrent_cell(self.dec_hidden_dim, self.dropout_keep_prob_ph)
# the initial state of the rnn cells is a function of z as well
# tanh because we want the values to be small
self.initial_state = tf.nn.tanh(tf.contrib.layers.fully_connected(
self.z,
self.dec_cell.input_size * 2,
activation_fn=None
))
# we optionally tile the z value, concatenating to each timestep input
# the reason for this is that it reduces the burden placed on the
# decoder hidden state and further encourages usage of the latent var
if self.tile_z:
tile_z = tf.reshape(self.z, (self.batch_size, 1, self.z_dim))
tile_z = tf.tile(tile_z, (1, self.max_len, 1))
self.dec_inputs = tf.concat((self.dec_inputs, tile_z), axis=2)
outputs, states = tf.nn.dynamic_rnn(
self.dec_cell,
inputs=self.dec_inputs,
sequence_length=self.lengths,
initial_state=self.initial_state,
dtype=tf.float32,
time_major=False
)
# map the outputs to mean and logvar of gaussian over actions
act_sequence_mask = tf.reshape(self.sequence_mask, (self.batch_size, self.max_len, 1))
epsilon = 1e-8
outputs = tf.reshape(outputs, (-1, self.dec_cell.output_size))
act_mean = tf.contrib.layers.fully_connected(
outputs,
self.act_dim,
activation_fn=None
)
self.act_mean = tf.reshape(act_mean, (self.batch_size, self.max_len, self.act_dim))
self.act_mean *= act_sequence_mask
act_logvar = tf.contrib.layers.fully_connected(
outputs,
self.act_dim,
activation_fn=None
)
self.act_logvar = tf.reshape(act_logvar, (self.batch_size, self.max_len, self.act_dim))
self.act_logvar *= act_sequence_mask + epsilon
self.act_sigma = tf.exp(self.act_logvar / 2.)
self.act_sigma *= act_sequence_mask + epsilon
def _build_loss(self):
# kl loss
# this measures the kl between the posterior distribution output from
# the encoder over z, and the prior of z which we choose as a unit gaussian
self.kl_loss = -0.5 * tf.reduce_mean(
(1 + self.z_logvar - tf.square(self.z_mean) - tf.exp(self.z_logvar)))
self.kl_loss = tf.maximum(self.kl_loss, self.kl_loss_min)
# gradually increase the weight of the kl loss with a coefficient
# we do this because there's no explicit reason why the model should
# use the z value (i.e., nothing in the objective encourages this)
# so by starting with a small loss value for "using" the z (i.e, outputting
# a posterior distribution over z quite different from a unit gaussian)
# the network basically becomes reliant on the z value, and then due to
# the optimization being locally optimal and other factors the network
# continues to use the z values in informing outputs
self.kl_weight = tf.train.polynomial_decay(
self.kl_initial,
self.global_step,
self.kl_steps,
end_learning_rate=self.kl_final,
power=2.0,
name='kl_weight'
)
# reconstruction loss
# output mean and sigma of a gaussian for the actions
# and compute reconstruction loss as the -log prob of true values
dist = tf.contrib.distributions.MultivariateNormalDiag(self.act_mean, self.act_sigma)
data_loss = -dist.log_prob(self.act)
# can't remember how many times I've messed this part up
# at this point, the data_loss has shape (batch_size, max_len)
# a lot of the values of this array are invalid, though, because they
# correspond to padded values, so we have to mask them out
data_loss = self.sequence_mask * data_loss
# then we want to average over the timesteps of each sample
# since values are invalid, we can't just take the mean
# we have to sum in order to ignore the zeros, and then divide by the
# lengths to get the correct average
data_loss = tf.reduce_sum(data_loss, axis=1) / tf.cast(self.lengths, tf.float32)
# then finally average over the batch
self.data_loss = tf.reduce_mean(data_loss)
self.loss = self.data_loss + self.kl_weight * self.kl_loss
# summaries
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('data_loss', self.data_loss)
tf.summary.scalar('kl_loss', self.kl_loss)
tf.summary.scalar('kl_weight', self.kl_weight)
def _build_train_op(self):
self.var_list = tf.trainable_variables()
optimizer = tf.train.AdamOptimizer(self.learning_rate)
# compute gradients, then clip them because otherwise they'll tend
# to explode
grads_vars = optimizer.compute_gradients(self.loss, self.var_list)
clipped_grads_vars = [(tf.clip_by_value(g, -self.grad_clip, self.grad_clip), v)
for (g,v) in grads_vars]
self.train_op = optimizer.apply_gradients(clipped_grads_vars, global_step=self.global_step)
# summaries
tf.summary.scalar('grads_global_norm', tf.global_norm([g for (g,_) in grads_vars]))
tf.summary.scalar('clipped_grads_global_norm', tf.global_norm([g for (g,_) in clipped_grads_vars]))
tf.summary.scalar('vars_global_norm', tf.global_norm(self.var_list))
tf.summary.scalar('learning_rate', self.learning_rate)
def _build_summary_op(self):
self.summary_op = tf.summary.merge_all()
# embeddings variable used for visualization in tensorboard
n_samples = self.batch_size * self.n_encoding_batches
self.encoding_ph = tf.placeholder(tf.float32, (n_samples, self.z_dim), 'encodings')
self.encoding = tf.Variable(
tf.zeros((n_samples, self.z_dim), dtype=tf.float32),
trainable=False,
dtype=tf.float32,
name='encoding'
)
self.assign_encoding = tf.assign(self.encoding, self.encoding_ph)
def _train_batch(self, batch, info, writer=None, train=True):
outputs = [self.global_step, self.summary_op, self.data_loss, self.kl_loss]
if train:
outputs += [self.train_op]
feed = {
self.obs: batch['obs'],
self.act: batch['act'],
self.lengths: batch['lengths'],
self.dropout_keep_prob_ph: self.dropout_keep_prob if train else 1.
}
sess = tf.get_default_session()
fetched = sess.run(outputs, feed_dict=feed)
if train:
step, summary, data_loss, kl_loss, _ = fetched
else:
step, summary, data_loss, kl_loss = fetched
if writer is not None:
writer.add_summary(summary, step)
info['data_loss'] += data_loss
info['kl_loss'] += kl_loss
info['itr'] += 1
def _report(self, info, name, epoch, n_epochs, batch, n_batches):
msg = '\r{} epoch: {} / {} batch: {} / {}'.format(
name, epoch+1, n_epochs, batch+1, n_batches)
keys = sorted(info.keys())
for k in keys:
if k != 'itr':
msg += ' {}: {:.5f} '.format(k, info[k] / info['itr'])
sys.stdout.write(msg)
def _validate(self, dataset, writer, epoch, name):
if writer is None:
return
batch = dataset.sample(self.batch_size * self.n_encoding_batches)
info = self.reconstruct(batch['obs'], batch['act'], batch['lengths'])
summary = tf_utils.scatter_encodings_summary(info['mean'], name)
writer.add_summary(summary, epoch)
# encodings only for training
if name == 'val':
return
# assign encodings as well
# this does only one of training or validation, whichever comes last
# before saving, which is validation, provided validation is performed
sess = tf.get_default_session()
sess.run(self.assign_encoding, feed_dict={self.encoding_ph: info['mean']})
# write the metadata file as well
logdir = writer.get_logdir()
filepath = os.path.join(logdir, 'metadata.tsv')
utils.write_metadata(filepath, batch['metadata'], batch['meta_labels'])
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = self.encoding.name
embed.metadata_path = 'metadata.tsv'
projector.visualize_embeddings(writer, config)
def _save(self, saver, writer, epoch):
if saver is not None and writer is not None:
save_dir = writer.get_logdir()
sess = tf.get_default_session()
filepath = os.path.join(save_dir, 'chkpt_{}'.format(epoch))
saver.save(sess, filepath)
def train(
self,
dataset,
val_dataset=None,
n_epochs=100,
writer=None,
val_writer=None,
verbose=True,
saver=None):
for epoch in range(n_epochs):
train_info = collections.defaultdict(float)
for bidx, batch in enumerate(dataset.batches()):
self._train_batch(batch, train_info, writer)
self._report(train_info, 'train', epoch, n_epochs, bidx, dataset.n_batches)
self._validate(dataset, writer, epoch, 'train')
if val_dataset is not None:
val_info = collections.defaultdict(float)
for bidx, batch in enumerate(val_dataset.batches()):
self._train_batch(batch, val_info, val_writer, train=False)
self._report(val_info, 'val', epoch, n_epochs, bidx, val_dataset.n_batches)
self._validate(val_dataset, val_writer, epoch, 'val')
self._save(saver, writer, epoch)
def reconstruct(self, obs, act, lengths):
# setup
sess = tf.get_default_session()
bs = self.batch_size
n_samples = len(obs)
n_batches = utils.compute_n_batches(n_samples, bs)
# allocate return containers
z = np.zeros((n_samples, self.z_dim))
mean = np.zeros((n_samples, self.z_dim))
sigma = np.zeros((n_samples, self.z_dim))
act_mean = np.zeros((n_samples, self.max_len, self.act_dim))
act_sigma = np.zeros((n_samples, self.max_len, self.act_dim))
data_loss = 0
kl_loss = 0
# formulate outputs
outputs = [
self.z,
self.z_mean,
self.z_sigma,
self.act_mean,
self.act_sigma,
self.data_loss,
self.kl_loss
]
# run the batches
for bidx in range(n_batches):
idxs = utils.compute_batch_idxs(bidx * bs, bs, n_samples)
feed = {
self.obs: obs[idxs],
self.act: act[idxs],
self.lengths: lengths[idxs]
}
fetched = sess.run(outputs, feed_dict=feed)
# unpack
z[idxs] = fetched[0]
mean[idxs] = fetched[1]
sigma[idxs] = fetched[2]
act_mean[idxs] = fetched[3]
act_sigma[idxs] = fetched[4]
data_loss += fetched[5]
kl_loss += fetched[6]
# return the relevant info
return dict(
z=z,
mean=mean,
sigma=sigma,
act_mean=act_mean,
act_sigma=act_sigma,
data_loss=data_loss,
kl_loss=kl_loss
)
def get_param_values(self):
sess = tf.get_default_session()
return [sess.run(v) for v in self.var_list]
def set_param_values(self, values):
assign = tf.group(*[tf.assign(var, val)
for (var, val) in zip(self.var_list, values)])
sess = tf.get_default_session()
sess.run(assign)
def save_params(self, filepath):
values = self.get_param_values()
np.save(filepath, values)
def load_params(self, filepath):
values = np.load(filepath).item()
self.set_param_values(values)
| [
"sys.stdout.write",
"abi.misc.utils.write_metadata",
"numpy.load",
"tensorflow.reduce_sum",
"tensorflow.trainable_variables",
"tensorflow.clip_by_value",
"tensorflow.maximum",
"tensorflow.reshape",
"collections.defaultdict",
"tensorflow.assign",
"tensorflow.Variable",
"tensorflow.contrib.tenso... | [((1677, 1762), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(self.batch_size, self.max_len, self.obs_dim)', '"""obs"""'], {}), "(tf.float32, (self.batch_size, self.max_len, self.obs_dim), 'obs'\n )\n", (1691, 1762), True, 'import tensorflow as tf\n'), ((1777, 1862), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(self.batch_size, self.max_len, self.act_dim)', '"""act"""'], {}), "(tf.float32, (self.batch_size, self.max_len, self.act_dim), 'act'\n )\n", (1791, 1862), True, 'import tensorflow as tf\n'), ((1881, 1936), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '(self.batch_size,)', '"""lengths"""'], {}), "(tf.int32, (self.batch_size,), 'lengths')\n", (1895, 1936), True, 'import tensorflow as tf\n'), ((1966, 2035), 'tensorflow.sequence_mask', 'tf.sequence_mask', (['self.lengths'], {'maxlen': 'self.max_len', 'dtype': 'tf.float32'}), '(self.lengths, maxlen=self.max_len, dtype=tf.float32)\n', (1982, 2035), True, 'import tensorflow as tf\n'), ((2072, 2148), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['self.dropout_keep_prob', '()', '"""dropout_keep_prob"""'], {}), "(self.dropout_keep_prob, (), 'dropout_keep_prob')\n", (2099, 2148), True, 'import tensorflow as tf\n'), ((2176, 2227), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)', 'name': '"""global_step"""'}), "(0, trainable=False, name='global_step')\n", (2187, 2227), True, 'import tensorflow as tf\n'), ((2288, 2328), 'tensorflow.concat', 'tf.concat', (['(self.obs, self.act)'], {'axis': '(-1)'}), '((self.obs, self.act), axis=-1)\n', (2297, 2328), True, 'import tensorflow as tf\n'), ((2422, 2501), 'abi.core.rnn_utils._build_recurrent_cell', 'rnn_utils._build_recurrent_cell', (['self.enc_hidden_dim', 'self.dropout_keep_prob_ph'], {}), '(self.enc_hidden_dim, self.dropout_keep_prob_ph)\n', (2453, 2501), True, 'import abi.core.rnn_utils as rnn_utils\n'), ((2529, 2608), 'abi.core.rnn_utils._build_recurrent_cell', 'rnn_utils._build_recurrent_cell', (['self.enc_hidden_dim', 'self.dropout_keep_prob_ph'], {}), '(self.enc_hidden_dim, self.dropout_keep_prob_ph)\n', (2560, 2608), True, 'import abi.core.rnn_utils as rnn_utils\n'), ((2835, 3001), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', (['self.enc_cell_fw', 'self.enc_cell_bw'], {'inputs': 'self.enc_inputs', 'sequence_length': 'self.lengths', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(self.enc_cell_fw, self.enc_cell_bw, inputs=\n self.enc_inputs, sequence_length=self.lengths, dtype=tf.float32,\n time_major=False)\n', (2866, 3001), True, 'import tensorflow as tf\n'), ((3477, 3518), 'tensorflow.concat', 'tf.concat', (['(hidden_fw, hidden_bw)'], {'axis': '(1)'}), '((hidden_fw, hidden_bw), axis=1)\n', (3486, 3518), True, 'import tensorflow as tf\n'), ((3641, 3714), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['hidden', 'self.z_dim'], {'activation_fn': 'None'}), '(hidden, self.z_dim, activation_fn=None)\n', (3674, 3714), True, 'import tensorflow as tf\n'), ((3787, 3860), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['hidden', 'self.z_dim'], {'activation_fn': 'None'}), '(hidden, self.z_dim, activation_fn=None)\n', (3820, 3860), True, 'import tensorflow as tf\n'), ((3930, 3957), 'tensorflow.exp', 'tf.exp', (['(self.z_logvar / 2.0)'], {}), '(self.z_logvar / 2.0)\n', (3936, 3957), True, 'import tensorflow as tf\n'), ((3994, 4051), 'tensorflow.random_normal', 'tf.random_normal', (['(self.batch_size, self.z_dim)', '(0.0)', '(1.0)'], {}), '((self.batch_size, self.z_dim), 0.0, 1.0)\n', (4010, 4051), True, 'import tensorflow as tf\n'), ((4159, 4238), 'abi.core.rnn_utils._build_recurrent_cell', 'rnn_utils._build_recurrent_cell', (['self.dec_hidden_dim', 'self.dropout_keep_prob_ph'], {}), '(self.dec_hidden_dim, self.dropout_keep_prob_ph)\n', (4190, 4238), True, 'import abi.core.rnn_utils as rnn_utils\n'), ((5037, 5202), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['self.dec_cell'], {'inputs': 'self.dec_inputs', 'sequence_length': 'self.lengths', 'initial_state': 'self.initial_state', 'dtype': 'tf.float32', 'time_major': '(False)'}), '(self.dec_cell, inputs=self.dec_inputs, sequence_length=\n self.lengths, initial_state=self.initial_state, dtype=tf.float32,\n time_major=False)\n', (5054, 5202), True, 'import tensorflow as tf\n'), ((5375, 5441), 'tensorflow.reshape', 'tf.reshape', (['self.sequence_mask', '(self.batch_size, self.max_len, 1)'], {}), '(self.sequence_mask, (self.batch_size, self.max_len, 1))\n', (5385, 5441), True, 'import tensorflow as tf\n'), ((5483, 5535), 'tensorflow.reshape', 'tf.reshape', (['outputs', '(-1, self.dec_cell.output_size)'], {}), '(outputs, (-1, self.dec_cell.output_size))\n', (5493, 5535), True, 'import tensorflow as tf\n'), ((5555, 5631), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['outputs', 'self.act_dim'], {'activation_fn': 'None'}), '(outputs, self.act_dim, activation_fn=None)\n', (5588, 5631), True, 'import tensorflow as tf\n'), ((5702, 5769), 'tensorflow.reshape', 'tf.reshape', (['act_mean', '(self.batch_size, self.max_len, self.act_dim)'], {}), '(act_mean, (self.batch_size, self.max_len, self.act_dim))\n', (5712, 5769), True, 'import tensorflow as tf\n'), ((5834, 5910), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['outputs', 'self.act_dim'], {'activation_fn': 'None'}), '(outputs, self.act_dim, activation_fn=None)\n', (5867, 5910), True, 'import tensorflow as tf\n'), ((5983, 6052), 'tensorflow.reshape', 'tf.reshape', (['act_logvar', '(self.batch_size, self.max_len, self.act_dim)'], {}), '(act_logvar, (self.batch_size, self.max_len, self.act_dim))\n', (5993, 6052), True, 'import tensorflow as tf\n'), ((6133, 6162), 'tensorflow.exp', 'tf.exp', (['(self.act_logvar / 2.0)'], {}), '(self.act_logvar / 2.0)\n', (6139, 6162), True, 'import tensorflow as tf\n'), ((6576, 6618), 'tensorflow.maximum', 'tf.maximum', (['self.kl_loss', 'self.kl_loss_min'], {}), '(self.kl_loss, self.kl_loss_min)\n', (6586, 6618), True, 'import tensorflow as tf\n'), ((7258, 7399), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['self.kl_initial', 'self.global_step', 'self.kl_steps'], {'end_learning_rate': 'self.kl_final', 'power': '(2.0)', 'name': '"""kl_weight"""'}), "(self.kl_initial, self.global_step, self.kl_steps,\n end_learning_rate=self.kl_final, power=2.0, name='kl_weight')\n", (7283, 7399), True, 'import tensorflow as tf\n'), ((7665, 7743), 'tensorflow.contrib.distributions.MultivariateNormalDiag', 'tf.contrib.distributions.MultivariateNormalDiag', (['self.act_mean', 'self.act_sigma'], {}), '(self.act_mean, self.act_sigma)\n', (7712, 7743), True, 'import tensorflow as tf\n'), ((8539, 8564), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['data_loss'], {}), '(data_loss)\n', (8553, 8564), True, 'import tensorflow as tf\n'), ((8662, 8698), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (8679, 8698), True, 'import tensorflow as tf\n'), ((8707, 8753), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""data_loss"""', 'self.data_loss'], {}), "('data_loss', self.data_loss)\n", (8724, 8753), True, 'import tensorflow as tf\n'), ((8762, 8804), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""kl_loss"""', 'self.kl_loss'], {}), "('kl_loss', self.kl_loss)\n", (8779, 8804), True, 'import tensorflow as tf\n'), ((8813, 8859), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""kl_weight"""', 'self.kl_weight'], {}), "('kl_weight', self.kl_weight)\n", (8830, 8859), True, 'import tensorflow as tf\n'), ((8916, 8940), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (8938, 8940), True, 'import tensorflow as tf\n'), ((8970, 9012), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (8992, 9012), True, 'import tensorflow as tf\n'), ((9718, 9772), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.learning_rate'], {}), "('learning_rate', self.learning_rate)\n", (9735, 9772), True, 'import tensorflow as tf\n'), ((9833, 9855), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (9853, 9855), True, 'import tensorflow as tf\n'), ((10014, 10078), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(n_samples, self.z_dim)', '"""encodings"""'], {}), "(tf.float32, (n_samples, self.z_dim), 'encodings')\n", (10028, 10078), True, 'import tensorflow as tf\n'), ((10311, 10353), 'tensorflow.assign', 'tf.assign', (['self.encoding', 'self.encoding_ph'], {}), '(self.encoding, self.encoding_ph)\n', (10320, 10353), True, 'import tensorflow as tf\n'), ((10799, 10823), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (10821, 10823), True, 'import tensorflow as tf\n'), ((11552, 11573), 'sys.stdout.write', 'sys.stdout.write', (['msg'], {}), '(msg)\n', (11568, 11573), False, 'import sys\n'), ((11847, 11901), 'abi.misc.tf_utils.scatter_encodings_summary', 'tf_utils.scatter_encodings_summary', (["info['mean']", 'name'], {}), "(info['mean'], name)\n", (11881, 11901), True, 'import abi.misc.tf_utils as tf_utils\n'), ((12250, 12274), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (12272, 12274), True, 'import tensorflow as tf\n'), ((12457, 12493), 'os.path.join', 'os.path.join', (['logdir', '"""metadata.tsv"""'], {}), "(logdir, 'metadata.tsv')\n", (12469, 12493), False, 'import os\n'), ((12502, 12573), 'abi.misc.utils.write_metadata', 'utils.write_metadata', (['filepath', "batch['metadata']", "batch['meta_labels']"], {}), "(filepath, batch['metadata'], batch['meta_labels'])\n", (12522, 12573), True, 'import abi.misc.utils as utils\n'), ((12591, 12618), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (12616, 12618), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((12759, 12805), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['writer', 'config'], {}), '(writer, config)\n', (12789, 12805), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((14247, 14271), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (14269, 14271), True, 'import tensorflow as tf\n'), ((14350, 14388), 'abi.misc.utils.compute_n_batches', 'utils.compute_n_batches', (['n_samples', 'bs'], {}), '(n_samples, bs)\n', (14373, 14388), True, 'import abi.misc.utils as utils\n'), ((14447, 14480), 'numpy.zeros', 'np.zeros', (['(n_samples, self.z_dim)'], {}), '((n_samples, self.z_dim))\n', (14455, 14480), True, 'import numpy as np\n'), ((14496, 14529), 'numpy.zeros', 'np.zeros', (['(n_samples, self.z_dim)'], {}), '((n_samples, self.z_dim))\n', (14504, 14529), True, 'import numpy as np\n'), ((14546, 14579), 'numpy.zeros', 'np.zeros', (['(n_samples, self.z_dim)'], {}), '((n_samples, self.z_dim))\n', (14554, 14579), True, 'import numpy as np\n'), ((14599, 14648), 'numpy.zeros', 'np.zeros', (['(n_samples, self.max_len, self.act_dim)'], {}), '((n_samples, self.max_len, self.act_dim))\n', (14607, 14648), True, 'import numpy as np\n'), ((14669, 14718), 'numpy.zeros', 'np.zeros', (['(n_samples, self.max_len, self.act_dim)'], {}), '((n_samples, self.max_len, self.act_dim))\n', (14677, 14718), True, 'import numpy as np\n'), ((15931, 15955), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (15953, 15955), True, 'import tensorflow as tf\n'), ((16172, 16196), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (16194, 16196), True, 'import tensorflow as tf\n'), ((16309, 16334), 'numpy.save', 'np.save', (['filepath', 'values'], {}), '(filepath, values)\n', (16316, 16334), True, 'import numpy as np\n'), ((4406, 4501), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.z', '(self.dec_cell.input_size * 2)'], {'activation_fn': 'None'}), '(self.z, self.dec_cell.input_size * 2,\n activation_fn=None)\n', (4439, 4501), True, 'import tensorflow as tf\n'), ((4823, 4875), 'tensorflow.reshape', 'tf.reshape', (['self.z', '(self.batch_size, 1, self.z_dim)'], {}), '(self.z, (self.batch_size, 1, self.z_dim))\n', (4833, 4875), True, 'import tensorflow as tf\n'), ((4897, 4934), 'tensorflow.tile', 'tf.tile', (['tile_z', '(1, self.max_len, 1)'], {}), '(tile_z, (1, self.max_len, 1))\n', (4904, 4934), True, 'import tensorflow as tf\n'), ((4965, 5009), 'tensorflow.concat', 'tf.concat', (['(self.dec_inputs, tile_z)'], {'axis': '(2)'}), '((self.dec_inputs, tile_z), axis=2)\n', (4974, 5009), True, 'import tensorflow as tf\n'), ((8399, 8431), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['data_loss'], {'axis': '(1)'}), '(data_loss, axis=1)\n', (8412, 8431), True, 'import tensorflow as tf\n'), ((8434, 8467), 'tensorflow.cast', 'tf.cast', (['self.lengths', 'tf.float32'], {}), '(self.lengths, tf.float32)\n', (8441, 8467), True, 'import tensorflow as tf\n'), ((9480, 9522), 'tensorflow.global_norm', 'tf.global_norm', (['[g for g, _ in grads_vars]'], {}), '([g for g, _ in grads_vars])\n', (9494, 9522), True, 'import tensorflow as tf\n'), ((9580, 9630), 'tensorflow.global_norm', 'tf.global_norm', (['[g for g, _ in clipped_grads_vars]'], {}), '([g for g, _ in clipped_grads_vars])\n', (9594, 9630), True, 'import tensorflow as tf\n'), ((9679, 9708), 'tensorflow.global_norm', 'tf.global_norm', (['self.var_list'], {}), '(self.var_list)\n', (9693, 9708), True, 'import tensorflow as tf\n'), ((10128, 10179), 'tensorflow.zeros', 'tf.zeros', (['(n_samples, self.z_dim)'], {'dtype': 'tf.float32'}), '((n_samples, self.z_dim), dtype=tf.float32)\n', (10136, 10179), True, 'import tensorflow as tf\n'), ((12965, 12989), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (12987, 12989), True, 'import tensorflow as tf\n'), ((13403, 13433), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (13426, 13433), False, 'import collections\n'), ((15085, 15135), 'abi.misc.utils.compute_batch_idxs', 'utils.compute_batch_idxs', (['(bidx * bs)', 'bs', 'n_samples'], {}), '(bidx * bs, bs, n_samples)\n', (15109, 15135), True, 'import abi.misc.utils as utils\n'), ((9217, 9269), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['g', '(-self.grad_clip)', 'self.grad_clip'], {}), '(g, -self.grad_clip, self.grad_clip)\n', (9233, 9269), True, 'import tensorflow as tf\n'), ((13776, 13806), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (13799, 13806), False, 'import collections\n'), ((16390, 16407), 'numpy.load', 'np.load', (['filepath'], {}), '(filepath)\n', (16397, 16407), True, 'import numpy as np\n'), ((6529, 6550), 'tensorflow.exp', 'tf.exp', (['self.z_logvar'], {}), '(self.z_logvar)\n', (6535, 6550), True, 'import tensorflow as tf\n'), ((16077, 16096), 'tensorflow.assign', 'tf.assign', (['var', 'val'], {}), '(var, val)\n', (16086, 16096), True, 'import tensorflow as tf\n'), ((6504, 6526), 'tensorflow.square', 'tf.square', (['self.z_mean'], {}), '(self.z_mean)\n', (6513, 6526), True, 'import tensorflow as tf\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import numpy as np
__all__ = ['SmoothedValue', 'TrainingStats']
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({avg:.4f})"
self.deque = collections.deque(maxlen=window_size)
self.fmt = fmt
self.total = 0.
self.count = 0
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
@property
def median(self):
return np.median(self.deque)
@property
def avg(self):
return np.mean(self.deque)
@property
def max(self):
return np.max(self.deque)
@property
def value(self):
return self.deque[-1]
@property
def global_avg(self):
return self.total / self.count
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, max=self.max, value=self.value)
class TrainingStats(object):
def __init__(self, window_size, delimiter=' '):
self.meters = None
self.window_size = window_size
self.delimiter = delimiter
def update(self, stats):
if self.meters is None:
self.meters = {
k: SmoothedValue(self.window_size)
for k in stats.keys()
}
for k, v in self.meters.items():
v.update(stats[k].numpy())
def get(self, extras=None):
stats = collections.OrderedDict()
if extras:
for k, v in extras.items():
stats[k] = v
for k, v in self.meters.items():
stats[k] = format(v.median, '.6f')
return stats
def log(self, extras=None):
d = self.get(extras)
strs = []
for k, v in d.items():
strs.append("{}: {}".format(k, str(v)))
return self.delimiter.join(strs)
| [
"numpy.median",
"numpy.max",
"numpy.mean",
"collections.OrderedDict",
"collections.deque"
] | [((993, 1030), 'collections.deque', 'collections.deque', ([], {'maxlen': 'window_size'}), '(maxlen=window_size)\n', (1010, 1030), False, 'import collections\n'), ((1277, 1298), 'numpy.median', 'np.median', (['self.deque'], {}), '(self.deque)\n', (1286, 1298), True, 'import numpy as np\n'), ((1348, 1367), 'numpy.mean', 'np.mean', (['self.deque'], {}), '(self.deque)\n', (1355, 1367), True, 'import numpy as np\n'), ((1417, 1435), 'numpy.max', 'np.max', (['self.deque'], {}), '(self.deque)\n', (1423, 1435), True, 'import numpy as np\n'), ((2222, 2247), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (2245, 2247), False, 'import collections\n')] |
''' Define the sublayers in encoder/decoder layer '''
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from transformer.Modules import ScaledDotProductAttention
__author__ = "<NAME>"
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_out, d_k, d_v, dropout=0.1, d_in=None):
"""
:param n_head: number of attention heads
:param d_out: size of output of multi head attention, same as query input q
:param d_k: dimension of keys during attention
:param d_v: dimension of values during attention
:param dropout: regularization constant
"""
super().__init__()
d_in = d_in if d_in is not None else d_out
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_out, n_head * d_k)
self.w_ks = nn.Linear(d_in, n_head * d_k)
self.w_vs = nn.Linear(d_in, n_head * d_v)
# TODO change back to original initialization
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_out + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_in + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_in + d_v)))
#nn.init.normal_(self.w_qs.weight, mean=0, std=1 / (d_out + d_k))
#nn.init.normal_(self.w_ks.weight, mean=0, std=1 / (d_in + d_k))
#nn.init.normal_(self.w_vs.weight, mean=0, std=1 / (d_in + d_v))
# set all biases to zero
#nn.init.zeros_(self.w_qs.bias)
#nn.init.zeros_(self.w_ks.bias)
#nn.init.zeros_(self.w_vs.bias)
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_out)
# TODO undo this
self.fc = nn.Linear(n_head * d_v, d_out)
#nn.init.xavier_normal_(self.fc.weight)
#nn.init.normal_(self.fc.weight, mean=0, std=1 / d_out)
nn.init.xavier_normal_(self.fc.weight, gain=1/100)
nn.init.zeros_(self.fc.bias)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
# TODO remove these special initializations
#nn.init.normal_(self.w_1.weight, mean=0, std=1 / d_in)
#nn.init.normal_(self.w_2.weight, mean=0, std=1 / d_hid)
nn.init.xavier_normal_(self.w_2.weight, gain=1/100)
#nn.init.zeros_(self.w_1.bias)
nn.init.zeros_(self.w_2.bias)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
| [
"torch.nn.Dropout",
"numpy.power",
"torch.nn.init.xavier_normal_",
"torch.nn.Conv1d",
"torch.nn.init.zeros_",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"numpy.sqrt"
] | [((854, 884), 'torch.nn.Linear', 'nn.Linear', (['d_out', '(n_head * d_k)'], {}), '(d_out, n_head * d_k)\n', (863, 884), True, 'import torch.nn as nn\n'), ((905, 934), 'torch.nn.Linear', 'nn.Linear', (['d_in', '(n_head * d_k)'], {}), '(d_in, n_head * d_k)\n', (914, 934), True, 'import torch.nn as nn\n'), ((955, 984), 'torch.nn.Linear', 'nn.Linear', (['d_in', '(n_head * d_v)'], {}), '(d_in, n_head * d_v)\n', (964, 984), True, 'import torch.nn as nn\n'), ((1774, 1793), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_out'], {}), '(d_out)\n', (1786, 1793), True, 'import torch.nn as nn\n'), ((1838, 1868), 'torch.nn.Linear', 'nn.Linear', (['(n_head * d_v)', 'd_out'], {}), '(n_head * d_v, d_out)\n', (1847, 1868), True, 'import torch.nn as nn\n'), ((1989, 2041), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.fc.weight'], {'gain': '(1 / 100)'}), '(self.fc.weight, gain=1 / 100)\n', (2011, 2041), True, 'import torch.nn as nn\n'), ((2048, 2076), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.fc.bias'], {}), '(self.fc.bias)\n', (2062, 2076), True, 'import torch.nn as nn\n'), ((2101, 2120), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (2111, 2120), True, 'import torch.nn as nn\n'), ((3362, 3387), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_in', 'd_hid', '(1)'], {}), '(d_in, d_hid, 1)\n', (3371, 3387), True, 'import torch.nn as nn\n'), ((3423, 3448), 'torch.nn.Conv1d', 'nn.Conv1d', (['d_hid', 'd_in', '(1)'], {}), '(d_hid, d_in, 1)\n', (3432, 3448), True, 'import torch.nn as nn\n'), ((3655, 3708), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.w_2.weight'], {'gain': '(1 / 100)'}), '(self.w_2.weight, gain=1 / 100)\n', (3677, 3708), True, 'import torch.nn as nn\n'), ((3754, 3783), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['self.w_2.bias'], {}), '(self.w_2.bias)\n', (3768, 3783), True, 'import torch.nn as nn\n'), ((3811, 3829), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['d_in'], {}), '(d_in)\n', (3823, 3829), True, 'import torch.nn as nn\n'), ((3853, 3872), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (3863, 3872), True, 'import torch.nn as nn\n'), ((1093, 1121), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_out + d_k))'], {}), '(2.0 / (d_out + d_k))\n', (1100, 1121), True, 'import numpy as np\n'), ((1177, 1204), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_in + d_k))'], {}), '(2.0 / (d_in + d_k))\n', (1184, 1204), True, 'import numpy as np\n'), ((1260, 1287), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (d_in + d_v))'], {}), '(2.0 / (d_in + d_v))\n', (1267, 1287), True, 'import numpy as np\n'), ((1728, 1746), 'numpy.power', 'np.power', (['d_k', '(0.5)'], {}), '(d_k, 0.5)\n', (1736, 1746), True, 'import numpy as np\n')] |
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
from astropy.coordinates import SkyCoord, AltAz, EarthLocation, ICRS
from astropy import units as u
from astropy.time import Time
from astropy.wcs import WCS
from astropy.io import fits
from scipy.signal import convolve2d
import tempfile, os
import astrofluxlib as aflux
import sys
def main():
parser = argparse.ArgumentParser(description='A CLI for running radio interferometry simulations')
parser.add_argument('--sky', type=str, help='"cross" | "stars" | path to FITS sky map file', required=True)
parser.add_argument('--json', type=str, help='JSON input string')
parser.add_argument('--file', type=str, help='path to observation JSON input file')
parser.add_argument('--duration', type=float, help='observation duration in hours')
parser.add_argument('--samples', type=int, help='number of samples per short term interval')
parser.add_argument('--snr', type=float, help='SNR of antenna signals')
parser.add_argument('--fast', action='store_true', help='simulate all antennas with the parameters')
parser.add_argument('--spiral', metavar='RADIUS', type=float, help='generate spiral array with this max radius in meters')
parser.add_argument('--random', metavar='RADIUS', type=float, help='generate random array positions on this order')
parser.add_argument('--count', metavar='NUM_ANTENNAS', type=int, help='number of antennas for generated array')
parser.add_argument('--size', metavar='DISH_SIZE', type=float, help='size of generate dishes')
parser.add_argument('--save', type=str, help='save generated configuration to this output path')
parser.add_argument('--dump', action='store_true', help='dump output as JSON string in stdout')
args = parser.parse_args()
# parse the input json string or file
input_data = None
if args.json:
input_data = json.loads(args.json)
elif args.file:
with open(args.file, 'r') as f:
input_data = json.loads(f.read())
else:
print("No input")
sys.exit(1)
wavelength = input_data['wavelength']
bandwidth = input_data['bandwidth']
samplingRate = input_data['samplingRate']
antenna_xy = np.zeros((len(input_data['antennas']), 2))
antenna_sizes = np.zeros(antenna_xy.shape[0])
antenna_eta = np.zeros(antenna_xy.shape[0])
# create the antenna array
if args.random:
antenna_xy = (np.random.rand(args.count, 2) - 0.5) * args.random
antenna_sizes[:] = args.size if args.size else 3
antenna_eta[:] = 0.5
elif args.spiral:
t = (np.arange(args.count)+1) / args.count
logarg = t
antenna_xy = np.zeros((args.count, 2))
antenna_xy[:,0] = args.spiral*np.log(logarg)*np.cos(t*4*np.pi)/2
antenna_xy[:,1] = args.spiral*np.log(logarg)*np.sin(t*4*np.pi)/2
antenna_sizes[:] = args.size if args.size else 3
antenna_eta[:] = 0.5
else:
for i, a in enumerate(input_data['antennas']):
antenna_xy[i,0] = a['x']
antenna_xy[i,1] = a['y']
antenna_sizes[i] = a['size']
antenna_eta[i] = a['eta']
if args.save:
input_data['antennas'] = list(map(
lambda axy: {
'x': axy[0],
'y': axy[1],
'size': antenna_sizes[0],
'eta': antenna_eta[0]
},
antenna_xy
))
with open(args.save, 'w') as f:
f.write(json.dumps(input_data, sort_keys=True, indent=2))
observation = aflux.Observation(
input_data['target']['ra'],
input_data['target']['dec'],
input_data['latitude'],
input_data['longitude'],
input_data['timestamp'],
args.duration if args.duration else input_data['duration']
)
DURATION_STEP = 0.1 # 6 minutes
image_size = 64
# create the desired skymap
skymap = None
if args.sky == 'cross':
skymap = aflux.CrossSky()
elif args.sky == 'stars':
skymap = aflux.StarSky(image_size)
else:
skymap = aflux.FITSSkyMap(args.sky)
# figure out the beamwidths of each antenna
beamwidths = aflux.parabolic_beamwidth(antenna_sizes, wavelength, degrees=True)
image = np.zeros((image_size, image_size), dtype=complex)
all_uv = []
all_xcorr = []
for elapsed in np.arange(0, observation.duration, DURATION_STEP):
current_xy = aflux.propagate_antennas(antenna_xy, elapsed)
current_uv = aflux.to_uv(current_xy, wavelength)
all_uv.append(current_uv)
signals = []
if args.fast:
signals, pixeldata = aflux.simulate(
observation,
current_xy,
beamwidths[0],
wavelength,
skymap,
samples_per_dim=image_size,
snr=args.snr,
samples=args.samples if args.samples else 1
)
else:
for (axy, bw, eta) in zip(current_xy, beamwidths, antenna_eta):
axy = np.expand_dims(axy, axis=0)
rx, pixeldata = aflux.simulate(
observation,
axy,
bw,
wavelength,
skymap,
samples_per_dim=image_size,
snr=args.snr,
samples=args.samples if args.samples else 1
)
signals.append(rx)
signals = np.stack(signals, axis=0)
xcorr = aflux.xcorr_signals(signals)
all_xcorr.append(xcorr.reshape(-1))
all_xcorr = np.concatenate(all_xcorr)
all_uv = np.concatenate(all_uv, axis=0)
# find the dirty image
image = aflux.compute_dirty_image(
all_uv,
all_xcorr,
np.amax(beamwidths),
samples_per_dim=image_size
)
# figure out the estimated synthetic beamwidth
norms = current_uv.dot(current_uv.T)
max_baseline = np.sqrt(np.amax(norms))*wavelength
synthetic_bw = aflux.parabolic_beamwidth(max_baseline, wavelength, degrees=True)
# find the dirty beam
dirty_beam = aflux.dirty_beam(all_uv, np.amax(beamwidths)*2, image_size*2)
# CLEAN
lmbda = 0.05
iters = 1000
cleaned = aflux.clean(image, all_uv, np.amax(beamwidths), synthetic_bw, iters, lmbda)
image = np.abs(image)
# ---- COMPARISON ---- #
# compare_size = image_size
# uv_image = np.zeros((compare_size, compare_size))
# max_dim = np.amax(np.abs(all_uv))
# for i in range(all_uv.shape[0]):
# pos = (all_uv[i,:]/max_dim * compare_size/2)
# uv_image[int(compare_size/2 - pos[1] - 1), int(pos[0] + compare_size/2) - 1] += 1.0
# plt.figure(2, figsize=(8,8))
# plt.subplot(221)
# plt.imshow(uv_image, cmap='Greys')
# db = np.fft.fft2(uv_image)
# plt.subplot(222)
# plt.imshow(np.abs(np.fft.fftshift(db)), cmap='gray')
# di = np.fft.ifft2(uv_image * np.fft.fftshift(np.fft.fft2(pixeldata.reshape(image_size, image_size))))
# plt.subplot(223)
# plt.imshow(np.abs(di), cmap='gray')
# clnd = aflux.clean(di, all_uv, np.amax(beamwidths), synthetic_bw, iters, lmbda)
# plt.subplot(224)
# plt.imshow(np.abs(clnd), cmap='gray')
# -------------------- #
if args.dump:
outdir = tempfile.gettempdir()
out = {
'cleanPath': os.path.join(outdir, 'clean.jpg'),
'skyPath': os.path.join(outdir, 'sky.jpg'),
'dirtyBeamPath': os.path.join(outdir, 'dirtyBeam.jpg'),
'dirtyImagePath': os.path.join(outdir, 'dirtyImage.jpg'),
'uvPath': os.path.join(outdir, 'uv.jpg'),
}
plt.figure()
plt.imshow(pixeldata.reshape(image_size,image_size), cmap='gray')
plt.axis('off')
plt.title('Source Image')
plt.savefig(out['skyPath'])
plt.imshow(cleaned, cmap='gray')
plt.title('CLEANed Image')
plt.axis('off')
plt.savefig(out['cleanPath'])
plt.imshow(image, cmap='gray')
plt.title('Dirty Image')
plt.axis('off')
plt.savefig(out['dirtyImagePath'])
plt.imshow(dirty_beam, cmap='gray')
plt.title('Dirty Beam')
plt.axis('off')
plt.savefig(out['dirtyBeamPath'])
plt.figure()
ax = plt.gca()
plt.scatter(all_uv[:,0], all_uv[:,1], color='k', marker='.')
plt.title('uv Plane')
ax.set_aspect('equal')
plt.title('uv Plane')
plt.xlabel('u [wavelengths]')
plt.ylabel('v [wavelengths]')
ax.grid()
plt.savefig(out['uvPath'])
print(json.dumps(out))
else:
# visualizations
plt.figure(figsize=(10,10))
ax = plt.subplot(231)
plt.scatter(antenna_xy[:,0], antenna_xy[:,1], color='k', marker='x')
ax.set_aspect('equal')
plt.title('xy Plane')
plt.xlabel('x [meters]')
plt.ylabel('y [meters]')
ax.grid()
ax = plt.subplot(232)
plt.scatter(all_uv[:,0], all_uv[:,1], color='k', marker='.')
ax.set_aspect('equal')
plt.title('uv Plane')
plt.xlabel('u [wavelengths]')
plt.ylabel('v [wavelengths]')
ax.grid()
plt.subplot(233)
plt.imshow(pixeldata.reshape(image_size,image_size), cmap='gray')
plt.title('Source Image (BW = {:.1f} deg)'.format(np.amax(beamwidths)))
plt.xlabel('Azimuth')
plt.ylabel('Altitude')
plt.axis('off')
plt.subplot(234)
plt.imshow(np.abs(dirty_beam), cmap='gray')
plt.axis('off')
plt.title('Dirty Beam (BW = {:.1f} deg)'.format(np.amax(beamwidths)*2))
plt.xlabel('Azimuth')
plt.ylabel('Altitude')
plt.subplot(235)
plt.imshow(np.abs(image), cmap='gray')
plt.axis('off')
plt.title('Dirty Image (BW = {:.1f} deg)'.format(np.amax(beamwidths)))
plt.xlabel('Azimuth')
plt.ylabel('Altitude')
plt.subplot(236)
plt.imshow(np.abs(cleaned), cmap='gray')
plt.axis('off')
plt.title('CLEANed ({:d} iters, lambda={:.2f})'.format(iters, lmbda))
plt.xlabel('Azimuth')
plt.ylabel('Altitude')
plt.tight_layout(pad=4.0)
plt.show()
if __name__ == '__main__':
main() | [
"astrofluxlib.CrossSky",
"matplotlib.pyplot.title",
"numpy.abs",
"argparse.ArgumentParser",
"astrofluxlib.to_uv",
"astrofluxlib.xcorr_signals",
"json.dumps",
"matplotlib.pyplot.figure",
"astrofluxlib.propagate_antennas",
"numpy.arange",
"astrofluxlib.StarSky",
"numpy.sin",
"matplotlib.pyplot... | [((393, 487), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A CLI for running radio interferometry simulations"""'}), "(description=\n 'A CLI for running radio interferometry simulations')\n", (416, 487), False, 'import argparse\n'), ((2319, 2348), 'numpy.zeros', 'np.zeros', (['antenna_xy.shape[0]'], {}), '(antenna_xy.shape[0])\n', (2327, 2348), True, 'import numpy as np\n'), ((2367, 2396), 'numpy.zeros', 'np.zeros', (['antenna_xy.shape[0]'], {}), '(antenna_xy.shape[0])\n', (2375, 2396), True, 'import numpy as np\n'), ((3601, 3818), 'astrofluxlib.Observation', 'aflux.Observation', (["input_data['target']['ra']", "input_data['target']['dec']", "input_data['latitude']", "input_data['longitude']", "input_data['timestamp']", "(args.duration if args.duration else input_data['duration'])"], {}), "(input_data['target']['ra'], input_data['target']['dec'],\n input_data['latitude'], input_data['longitude'], input_data['timestamp'\n ], args.duration if args.duration else input_data['duration'])\n", (3618, 3818), True, 'import astrofluxlib as aflux\n'), ((4228, 4294), 'astrofluxlib.parabolic_beamwidth', 'aflux.parabolic_beamwidth', (['antenna_sizes', 'wavelength'], {'degrees': '(True)'}), '(antenna_sizes, wavelength, degrees=True)\n', (4253, 4294), True, 'import astrofluxlib as aflux\n'), ((4308, 4357), 'numpy.zeros', 'np.zeros', (['(image_size, image_size)'], {'dtype': 'complex'}), '((image_size, image_size), dtype=complex)\n', (4316, 4357), True, 'import numpy as np\n'), ((4412, 4461), 'numpy.arange', 'np.arange', (['(0)', 'observation.duration', 'DURATION_STEP'], {}), '(0, observation.duration, DURATION_STEP)\n', (4421, 4461), True, 'import numpy as np\n'), ((5699, 5724), 'numpy.concatenate', 'np.concatenate', (['all_xcorr'], {}), '(all_xcorr)\n', (5713, 5724), True, 'import numpy as np\n'), ((5738, 5768), 'numpy.concatenate', 'np.concatenate', (['all_uv'], {'axis': '(0)'}), '(all_uv, axis=0)\n', (5752, 5768), True, 'import numpy as np\n'), ((6110, 6175), 'astrofluxlib.parabolic_beamwidth', 'aflux.parabolic_beamwidth', (['max_baseline', 'wavelength'], {'degrees': '(True)'}), '(max_baseline, wavelength, degrees=True)\n', (6135, 6175), True, 'import astrofluxlib as aflux\n'), ((6435, 6448), 'numpy.abs', 'np.abs', (['image'], {}), '(image)\n', (6441, 6448), True, 'import numpy as np\n'), ((1917, 1938), 'json.loads', 'json.loads', (['args.json'], {}), '(args.json)\n', (1927, 1938), False, 'import json\n'), ((4018, 4034), 'astrofluxlib.CrossSky', 'aflux.CrossSky', ([], {}), '()\n', (4032, 4034), True, 'import astrofluxlib as aflux\n'), ((4484, 4529), 'astrofluxlib.propagate_antennas', 'aflux.propagate_antennas', (['antenna_xy', 'elapsed'], {}), '(antenna_xy, elapsed)\n', (4508, 4529), True, 'import astrofluxlib as aflux\n'), ((4551, 4586), 'astrofluxlib.to_uv', 'aflux.to_uv', (['current_xy', 'wavelength'], {}), '(current_xy, wavelength)\n', (4562, 4586), True, 'import astrofluxlib as aflux\n'), ((5601, 5629), 'astrofluxlib.xcorr_signals', 'aflux.xcorr_signals', (['signals'], {}), '(signals)\n', (5620, 5629), True, 'import astrofluxlib as aflux\n'), ((5881, 5900), 'numpy.amax', 'np.amax', (['beamwidths'], {}), '(beamwidths)\n', (5888, 5900), True, 'import numpy as np\n'), ((6374, 6393), 'numpy.amax', 'np.amax', (['beamwidths'], {}), '(beamwidths)\n', (6381, 6393), True, 'import numpy as np\n'), ((7404, 7425), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (7423, 7425), False, 'import tempfile, os\n'), ((7768, 7780), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7778, 7780), True, 'import matplotlib.pyplot as plt\n'), ((7863, 7878), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7871, 7878), True, 'import matplotlib.pyplot as plt\n'), ((7887, 7912), 'matplotlib.pyplot.title', 'plt.title', (['"""Source Image"""'], {}), "('Source Image')\n", (7896, 7912), True, 'import matplotlib.pyplot as plt\n'), ((7921, 7948), 'matplotlib.pyplot.savefig', 'plt.savefig', (["out['skyPath']"], {}), "(out['skyPath'])\n", (7932, 7948), True, 'import matplotlib.pyplot as plt\n'), ((7958, 7990), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cleaned'], {'cmap': '"""gray"""'}), "(cleaned, cmap='gray')\n", (7968, 7990), True, 'import matplotlib.pyplot as plt\n'), ((7999, 8025), 'matplotlib.pyplot.title', 'plt.title', (['"""CLEANed Image"""'], {}), "('CLEANed Image')\n", (8008, 8025), True, 'import matplotlib.pyplot as plt\n'), ((8034, 8049), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8042, 8049), True, 'import matplotlib.pyplot as plt\n'), ((8058, 8087), 'matplotlib.pyplot.savefig', 'plt.savefig', (["out['cleanPath']"], {}), "(out['cleanPath'])\n", (8069, 8087), True, 'import matplotlib.pyplot as plt\n'), ((8097, 8127), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (8107, 8127), True, 'import matplotlib.pyplot as plt\n'), ((8136, 8160), 'matplotlib.pyplot.title', 'plt.title', (['"""Dirty Image"""'], {}), "('Dirty Image')\n", (8145, 8160), True, 'import matplotlib.pyplot as plt\n'), ((8169, 8184), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8177, 8184), True, 'import matplotlib.pyplot as plt\n'), ((8193, 8227), 'matplotlib.pyplot.savefig', 'plt.savefig', (["out['dirtyImagePath']"], {}), "(out['dirtyImagePath'])\n", (8204, 8227), True, 'import matplotlib.pyplot as plt\n'), ((8237, 8272), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dirty_beam'], {'cmap': '"""gray"""'}), "(dirty_beam, cmap='gray')\n", (8247, 8272), True, 'import matplotlib.pyplot as plt\n'), ((8281, 8304), 'matplotlib.pyplot.title', 'plt.title', (['"""Dirty Beam"""'], {}), "('Dirty Beam')\n", (8290, 8304), True, 'import matplotlib.pyplot as plt\n'), ((8313, 8328), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8321, 8328), True, 'import matplotlib.pyplot as plt\n'), ((8337, 8370), 'matplotlib.pyplot.savefig', 'plt.savefig', (["out['dirtyBeamPath']"], {}), "(out['dirtyBeamPath'])\n", (8348, 8370), True, 'import matplotlib.pyplot as plt\n'), ((8379, 8391), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8389, 8391), True, 'import matplotlib.pyplot as plt\n'), ((8405, 8414), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8412, 8414), True, 'import matplotlib.pyplot as plt\n'), ((8423, 8485), 'matplotlib.pyplot.scatter', 'plt.scatter', (['all_uv[:, 0]', 'all_uv[:, 1]'], {'color': '"""k"""', 'marker': '"""."""'}), "(all_uv[:, 0], all_uv[:, 1], color='k', marker='.')\n", (8434, 8485), True, 'import matplotlib.pyplot as plt\n'), ((8492, 8513), 'matplotlib.pyplot.title', 'plt.title', (['"""uv Plane"""'], {}), "('uv Plane')\n", (8501, 8513), True, 'import matplotlib.pyplot as plt\n'), ((8553, 8574), 'matplotlib.pyplot.title', 'plt.title', (['"""uv Plane"""'], {}), "('uv Plane')\n", (8562, 8574), True, 'import matplotlib.pyplot as plt\n'), ((8583, 8612), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""u [wavelengths]"""'], {}), "('u [wavelengths]')\n", (8593, 8612), True, 'import matplotlib.pyplot as plt\n'), ((8621, 8650), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v [wavelengths]"""'], {}), "('v [wavelengths]')\n", (8631, 8650), True, 'import matplotlib.pyplot as plt\n'), ((8677, 8703), 'matplotlib.pyplot.savefig', 'plt.savefig', (["out['uvPath']"], {}), "(out['uvPath'])\n", (8688, 8703), True, 'import matplotlib.pyplot as plt\n'), ((8778, 8806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (8788, 8806), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8835), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (8830, 8835), True, 'import matplotlib.pyplot as plt\n'), ((8844, 8914), 'matplotlib.pyplot.scatter', 'plt.scatter', (['antenna_xy[:, 0]', 'antenna_xy[:, 1]'], {'color': '"""k"""', 'marker': '"""x"""'}), "(antenna_xy[:, 0], antenna_xy[:, 1], color='k', marker='x')\n", (8855, 8914), True, 'import matplotlib.pyplot as plt\n'), ((8952, 8973), 'matplotlib.pyplot.title', 'plt.title', (['"""xy Plane"""'], {}), "('xy Plane')\n", (8961, 8973), True, 'import matplotlib.pyplot as plt\n'), ((8982, 9006), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x [meters]"""'], {}), "('x [meters]')\n", (8992, 9006), True, 'import matplotlib.pyplot as plt\n'), ((9015, 9039), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y [meters]"""'], {}), "('y [meters]')\n", (9025, 9039), True, 'import matplotlib.pyplot as plt\n'), ((9071, 9087), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (9082, 9087), True, 'import matplotlib.pyplot as plt\n'), ((9096, 9158), 'matplotlib.pyplot.scatter', 'plt.scatter', (['all_uv[:, 0]', 'all_uv[:, 1]'], {'color': '"""k"""', 'marker': '"""."""'}), "(all_uv[:, 0], all_uv[:, 1], color='k', marker='.')\n", (9107, 9158), True, 'import matplotlib.pyplot as plt\n'), ((9196, 9217), 'matplotlib.pyplot.title', 'plt.title', (['"""uv Plane"""'], {}), "('uv Plane')\n", (9205, 9217), True, 'import matplotlib.pyplot as plt\n'), ((9226, 9255), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""u [wavelengths]"""'], {}), "('u [wavelengths]')\n", (9236, 9255), True, 'import matplotlib.pyplot as plt\n'), ((9264, 9293), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v [wavelengths]"""'], {}), "('v [wavelengths]')\n", (9274, 9293), True, 'import matplotlib.pyplot as plt\n'), ((9320, 9336), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (9331, 9336), True, 'import matplotlib.pyplot as plt\n'), ((9499, 9520), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Azimuth"""'], {}), "('Azimuth')\n", (9509, 9520), True, 'import matplotlib.pyplot as plt\n'), ((9529, 9551), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (9539, 9551), True, 'import matplotlib.pyplot as plt\n'), ((9560, 9575), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9568, 9575), True, 'import matplotlib.pyplot as plt\n'), ((9584, 9600), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (9595, 9600), True, 'import matplotlib.pyplot as plt\n'), ((9661, 9676), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9669, 9676), True, 'import matplotlib.pyplot as plt\n'), ((9765, 9786), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Azimuth"""'], {}), "('Azimuth')\n", (9775, 9786), True, 'import matplotlib.pyplot as plt\n'), ((9795, 9817), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (9805, 9817), True, 'import matplotlib.pyplot as plt\n'), ((9826, 9842), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (9837, 9842), True, 'import matplotlib.pyplot as plt\n'), ((9898, 9913), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9906, 9913), True, 'import matplotlib.pyplot as plt\n'), ((10001, 10022), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Azimuth"""'], {}), "('Azimuth')\n", (10011, 10022), True, 'import matplotlib.pyplot as plt\n'), ((10031, 10053), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (10041, 10053), True, 'import matplotlib.pyplot as plt\n'), ((10062, 10078), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (10073, 10078), True, 'import matplotlib.pyplot as plt\n'), ((10136, 10151), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10144, 10151), True, 'import matplotlib.pyplot as plt\n'), ((10238, 10259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Azimuth"""'], {}), "('Azimuth')\n", (10248, 10259), True, 'import matplotlib.pyplot as plt\n'), ((10268, 10290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude"""'], {}), "('Altitude')\n", (10278, 10290), True, 'import matplotlib.pyplot as plt\n'), ((10299, 10324), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(4.0)'}), '(pad=4.0)\n', (10315, 10324), True, 'import matplotlib.pyplot as plt\n'), ((10333, 10343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10341, 10343), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2100), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2097, 2100), False, 'import sys\n'), ((2721, 2746), 'numpy.zeros', 'np.zeros', (['(args.count, 2)'], {}), '((args.count, 2))\n', (2729, 2746), True, 'import numpy as np\n'), ((4082, 4107), 'astrofluxlib.StarSky', 'aflux.StarSky', (['image_size'], {}), '(image_size)\n', (4095, 4107), True, 'import astrofluxlib as aflux\n'), ((4135, 4161), 'astrofluxlib.FITSSkyMap', 'aflux.FITSSkyMap', (['args.sky'], {}), '(args.sky)\n', (4151, 4161), True, 'import astrofluxlib as aflux\n'), ((4697, 4867), 'astrofluxlib.simulate', 'aflux.simulate', (['observation', 'current_xy', 'beamwidths[0]', 'wavelength', 'skymap'], {'samples_per_dim': 'image_size', 'snr': 'args.snr', 'samples': '(args.samples if args.samples else 1)'}), '(observation, current_xy, beamwidths[0], wavelength, skymap,\n samples_per_dim=image_size, snr=args.snr, samples=args.samples if args.\n samples else 1)\n', (4711, 4867), True, 'import astrofluxlib as aflux\n'), ((5558, 5583), 'numpy.stack', 'np.stack', (['signals'], {'axis': '(0)'}), '(signals, axis=0)\n', (5566, 5583), True, 'import numpy as np\n'), ((6064, 6078), 'numpy.amax', 'np.amax', (['norms'], {}), '(norms)\n', (6071, 6078), True, 'import numpy as np\n'), ((6245, 6264), 'numpy.amax', 'np.amax', (['beamwidths'], {}), '(beamwidths)\n', (6252, 6264), True, 'import numpy as np\n'), ((7467, 7500), 'os.path.join', 'os.path.join', (['outdir', '"""clean.jpg"""'], {}), "(outdir, 'clean.jpg')\n", (7479, 7500), False, 'import tempfile, os\n'), ((7525, 7556), 'os.path.join', 'os.path.join', (['outdir', '"""sky.jpg"""'], {}), "(outdir, 'sky.jpg')\n", (7537, 7556), False, 'import tempfile, os\n'), ((7587, 7624), 'os.path.join', 'os.path.join', (['outdir', '"""dirtyBeam.jpg"""'], {}), "(outdir, 'dirtyBeam.jpg')\n", (7599, 7624), False, 'import tempfile, os\n'), ((7656, 7694), 'os.path.join', 'os.path.join', (['outdir', '"""dirtyImage.jpg"""'], {}), "(outdir, 'dirtyImage.jpg')\n", (7668, 7694), False, 'import tempfile, os\n'), ((7718, 7748), 'os.path.join', 'os.path.join', (['outdir', '"""uv.jpg"""'], {}), "(outdir, 'uv.jpg')\n", (7730, 7748), False, 'import tempfile, os\n'), ((8718, 8733), 'json.dumps', 'json.dumps', (['out'], {}), '(out)\n', (8728, 8733), False, 'import json\n'), ((9620, 9638), 'numpy.abs', 'np.abs', (['dirty_beam'], {}), '(dirty_beam)\n', (9626, 9638), True, 'import numpy as np\n'), ((9862, 9875), 'numpy.abs', 'np.abs', (['image'], {}), '(image)\n', (9868, 9875), True, 'import numpy as np\n'), ((10098, 10113), 'numpy.abs', 'np.abs', (['cleaned'], {}), '(cleaned)\n', (10104, 10113), True, 'import numpy as np\n'), ((2471, 2500), 'numpy.random.rand', 'np.random.rand', (['args.count', '(2)'], {}), '(args.count, 2)\n', (2485, 2500), True, 'import numpy as np\n'), ((3532, 3580), 'json.dumps', 'json.dumps', (['input_data'], {'sort_keys': '(True)', 'indent': '(2)'}), '(input_data, sort_keys=True, indent=2)\n', (3542, 3580), False, 'import json\n'), ((5116, 5143), 'numpy.expand_dims', 'np.expand_dims', (['axy'], {'axis': '(0)'}), '(axy, axis=0)\n', (5130, 5143), True, 'import numpy as np\n'), ((5176, 5324), 'astrofluxlib.simulate', 'aflux.simulate', (['observation', 'axy', 'bw', 'wavelength', 'skymap'], {'samples_per_dim': 'image_size', 'snr': 'args.snr', 'samples': '(args.samples if args.samples else 1)'}), '(observation, axy, bw, wavelength, skymap, samples_per_dim=\n image_size, snr=args.snr, samples=args.samples if args.samples else 1)\n', (5190, 5324), True, 'import astrofluxlib as aflux\n'), ((9469, 9488), 'numpy.amax', 'np.amax', (['beamwidths'], {}), '(beamwidths)\n', (9476, 9488), True, 'import numpy as np\n'), ((9971, 9990), 'numpy.amax', 'np.amax', (['beamwidths'], {}), '(beamwidths)\n', (9978, 9990), True, 'import numpy as np\n'), ((2643, 2664), 'numpy.arange', 'np.arange', (['args.count'], {}), '(args.count)\n', (2652, 2664), True, 'import numpy as np\n'), ((2800, 2821), 'numpy.cos', 'np.cos', (['(t * 4 * np.pi)'], {}), '(t * 4 * np.pi)\n', (2806, 2821), True, 'import numpy as np\n'), ((2873, 2894), 'numpy.sin', 'np.sin', (['(t * 4 * np.pi)'], {}), '(t * 4 * np.pi)\n', (2879, 2894), True, 'import numpy as np\n'), ((9733, 9752), 'numpy.amax', 'np.amax', (['beamwidths'], {}), '(beamwidths)\n', (9740, 9752), True, 'import numpy as np\n'), ((2785, 2799), 'numpy.log', 'np.log', (['logarg'], {}), '(logarg)\n', (2791, 2799), True, 'import numpy as np\n'), ((2858, 2872), 'numpy.log', 'np.log', (['logarg'], {}), '(logarg)\n', (2864, 2872), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
from PySource import RickerSource
from PyModel import Model
from PySource import Receiver
from JAcoustic_codegen import forward_born, forward_modeling, forward_freq_modeling, adjoint_freq_born, adjoint_born
import time
import h5py
import matplotlib.pyplot as plt
# Load Sigsbee model
sigsbee = h5py.File('/scratch/slim/shared/mathias-philipp/bp_synthetic_2004/model/vp_fine.h5','r+')
m0 = np.transpose(np.array(sigsbee['m0']))
dm = np.transpose(np.array(sigsbee['dm']))
# Model
shape = (10789, 1911)
spacing = (6.25, 6.26)
origin = (0., 0.)
model0 = Model(shape=shape, origin=origin, spacing=spacing, vp=np.sqrt(1/m0), dm=dm)
# Time axis
t0 = 0.
tn = 14000.
dt = model0.critical_dt
nt = int(1 + (tn-t0) / dt)
time_axis = np.linspace(t0,tn,nt)
# Source
f0 = 0.020
src = RickerSource(name='src', grid=model0.grid, f0=f0, time=time_axis)
src.coordinates.data[0,:] = np.array(4617.)
src.coordinates.data[0,-1] = 20.
# Receiver for observed data
rec_t = Receiver(name='rec_t', grid=model0.grid, npoint=1201, ntime=nt)
rec_t.coordinates.data[:, 0] = np.linspace(4717., 19717., num=1201)
rec_t.coordinates.data[:, 1] = 50.
# Compute LS-RTM gradient w/ on-the-fly DFTs
num_frequencies = [1, 2, 4, 8, 16, 32, 64, 128]
timings = np.zeros(len(num_frequencies))
for j in range(len(num_frequencies)):
f = np.linspace(0.01, 0.01, num_frequencies[j]) # always use 10 Hz
t1 = time.time()
d0, ufr, ufi = forward_freq_modeling(model0, src.coordinates.data, src.data, rec_t.coordinates.data, freq=f, dt=dt, factor=8)
t2 = time.time()
print('Forward: ', t2 - t1)
t3 = time.time()
dm = adjoint_freq_born(model0, rec_t.coordinates.data, d0.data, f, ufr, ufi, isic=True, dt=dt, factor=8)
t4 = time.time()
print('Adjoint: ', t4 - t3)
print('Total: ', (t2 - t1) + (t4 - t3))
timings[j] = (t2 - t1) + (t4 - t3)
timings.dump('timing_bp_frequencies.dat')
# Checkpointing
d0, _ = forward_modeling(model0, src.coordinates.data, src.data, rec_t.coordinates.data)
ta = time.time()
op_predicted = forward_modeling(model0, src.coordinates.data, src.data, rec_t.coordinates.data, op_return=True, dt=dt)
f1, g1 = adjoint_born(model0, rec_t.coordinates.data, d0.data, op_forward=op_predicted, dt=dt)
tb = time.time()
print('Optimal checkpointing: ', tb - ta)
timings_oc = np.array(tb - ta)
timings_oc.dump('timing_bp_optimal_checkpointing.dat')
| [
"JAcoustic_codegen.forward_modeling",
"h5py.File",
"JAcoustic_codegen.forward_freq_modeling",
"JAcoustic_codegen.adjoint_born",
"time.time",
"PySource.RickerSource",
"PySource.Receiver",
"JAcoustic_codegen.adjoint_freq_born",
"numpy.array",
"numpy.linspace",
"numpy.sqrt"
] | [((352, 451), 'h5py.File', 'h5py.File', (['"""/scratch/slim/shared/mathias-philipp/bp_synthetic_2004/model/vp_fine.h5"""', '"""r+"""'], {}), "(\n '/scratch/slim/shared/mathias-philipp/bp_synthetic_2004/model/vp_fine.h5',\n 'r+')\n", (361, 451), False, 'import h5py\n'), ((781, 804), 'numpy.linspace', 'np.linspace', (['t0', 'tn', 'nt'], {}), '(t0, tn, nt)\n', (792, 804), True, 'import numpy as np\n'), ((830, 895), 'PySource.RickerSource', 'RickerSource', ([], {'name': '"""src"""', 'grid': 'model0.grid', 'f0': 'f0', 'time': 'time_axis'}), "(name='src', grid=model0.grid, f0=f0, time=time_axis)\n", (842, 895), False, 'from PySource import RickerSource\n'), ((924, 940), 'numpy.array', 'np.array', (['(4617.0)'], {}), '(4617.0)\n', (932, 940), True, 'import numpy as np\n'), ((1011, 1074), 'PySource.Receiver', 'Receiver', ([], {'name': '"""rec_t"""', 'grid': 'model0.grid', 'npoint': '(1201)', 'ntime': 'nt'}), "(name='rec_t', grid=model0.grid, npoint=1201, ntime=nt)\n", (1019, 1074), False, 'from PySource import Receiver\n'), ((1106, 1144), 'numpy.linspace', 'np.linspace', (['(4717.0)', '(19717.0)'], {'num': '(1201)'}), '(4717.0, 19717.0, num=1201)\n', (1117, 1144), True, 'import numpy as np\n'), ((1964, 2049), 'JAcoustic_codegen.forward_modeling', 'forward_modeling', (['model0', 'src.coordinates.data', 'src.data', 'rec_t.coordinates.data'], {}), '(model0, src.coordinates.data, src.data, rec_t.coordinates.data\n )\n', (1980, 2049), False, 'from JAcoustic_codegen import forward_born, forward_modeling, forward_freq_modeling, adjoint_freq_born, adjoint_born\n'), ((2050, 2061), 'time.time', 'time.time', ([], {}), '()\n', (2059, 2061), False, 'import time\n'), ((2077, 2185), 'JAcoustic_codegen.forward_modeling', 'forward_modeling', (['model0', 'src.coordinates.data', 'src.data', 'rec_t.coordinates.data'], {'op_return': '(True)', 'dt': 'dt'}), '(model0, src.coordinates.data, src.data, rec_t.coordinates.\n data, op_return=True, dt=dt)\n', (2093, 2185), False, 'from JAcoustic_codegen import forward_born, forward_modeling, forward_freq_modeling, adjoint_freq_born, adjoint_born\n'), ((2190, 2280), 'JAcoustic_codegen.adjoint_born', 'adjoint_born', (['model0', 'rec_t.coordinates.data', 'd0.data'], {'op_forward': 'op_predicted', 'dt': 'dt'}), '(model0, rec_t.coordinates.data, d0.data, op_forward=\n op_predicted, dt=dt)\n', (2202, 2280), False, 'from JAcoustic_codegen import forward_born, forward_modeling, forward_freq_modeling, adjoint_freq_born, adjoint_born\n'), ((2281, 2292), 'time.time', 'time.time', ([], {}), '()\n', (2290, 2292), False, 'import time\n'), ((2348, 2365), 'numpy.array', 'np.array', (['(tb - ta)'], {}), '(tb - ta)\n', (2356, 2365), True, 'import numpy as np\n'), ((460, 483), 'numpy.array', 'np.array', (["sigsbee['m0']"], {}), "(sigsbee['m0'])\n", (468, 483), True, 'import numpy as np\n'), ((503, 526), 'numpy.array', 'np.array', (["sigsbee['dm']"], {}), "(sigsbee['dm'])\n", (511, 526), True, 'import numpy as np\n'), ((1359, 1402), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.01)', 'num_frequencies[j]'], {}), '(0.01, 0.01, num_frequencies[j])\n', (1370, 1402), True, 'import numpy as np\n'), ((1434, 1445), 'time.time', 'time.time', ([], {}), '()\n', (1443, 1445), False, 'import time\n'), ((1465, 1580), 'JAcoustic_codegen.forward_freq_modeling', 'forward_freq_modeling', (['model0', 'src.coordinates.data', 'src.data', 'rec_t.coordinates.data'], {'freq': 'f', 'dt': 'dt', 'factor': '(8)'}), '(model0, src.coordinates.data, src.data, rec_t.\n coordinates.data, freq=f, dt=dt, factor=8)\n', (1486, 1580), False, 'from JAcoustic_codegen import forward_born, forward_modeling, forward_freq_modeling, adjoint_freq_born, adjoint_born\n'), ((1585, 1596), 'time.time', 'time.time', ([], {}), '()\n', (1594, 1596), False, 'import time\n'), ((1639, 1650), 'time.time', 'time.time', ([], {}), '()\n', (1648, 1650), False, 'import time\n'), ((1660, 1763), 'JAcoustic_codegen.adjoint_freq_born', 'adjoint_freq_born', (['model0', 'rec_t.coordinates.data', 'd0.data', 'f', 'ufr', 'ufi'], {'isic': '(True)', 'dt': 'dt', 'factor': '(8)'}), '(model0, rec_t.coordinates.data, d0.data, f, ufr, ufi,\n isic=True, dt=dt, factor=8)\n', (1677, 1763), False, 'from JAcoustic_codegen import forward_born, forward_modeling, forward_freq_modeling, adjoint_freq_born, adjoint_born\n'), ((1769, 1780), 'time.time', 'time.time', ([], {}), '()\n', (1778, 1780), False, 'import time\n'), ((663, 678), 'numpy.sqrt', 'np.sqrt', (['(1 / m0)'], {}), '(1 / m0)\n', (670, 678), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from typing import List
import warnings
from skimage.morphology import remove_small_objects, label
warnings.filterwarnings('ignore', category=FutureWarning)
import tensorflow as tf
__all__ = ["getMagnification"]
def _initModel():
'''Initialise the ML model
https://towardsdatascience.com/an-actual-application-for-the-mnist-digits-classifier-bbd76548bf2f
Parameters
----------
None
Returns
-------
model : tensorflow.python.keras.engine.sequential.Sequential
The ML trained model.
'''
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(254, kernel_size=(3, 3), input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPool2D((2, 2)))
model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3)))
model.add(tf.keras.layers.MaxPool2D((2, 2)))
# convert from 2D input to 1D vectors
model.add(tf.keras.layers.Flatten())
# finish our model with densely connected layers
model.add(tf.keras.layers.Dense(140, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(80, activation='relu'))
model.add(tf.keras.layers.Dropout(0.2))
# output layer with 10 units (one per each class 0-9)
model.add(tf.keras.layers.Dense(units=10, activation='sigmoid'))
model.load_weights('Ml-test/image_to_number_model.hdf5')
return model
def _processLabels(image: np.ndarray, stats: List, label: int) -> np.ndarray:
'''Function returns image of a single digit cropped from labeled image and
resized for ML model
Parameters
----------
image : np.ndarray, 2D
Labeled image to be cropped and resized.
stats : List
List of various stats (eg. centroid, bbox etc.) for each labeled
component in image.
label : int
Integer label for connected components.
Returns
-------
digit : np.ndarray, 2D
Array holding the digit to be classified.
'''
# discard parts of image that are not of current label
digit = np.where(image != label, 0, 255)
# get bbox coords
x1 = max(0, stats[label, cv2.CC_STAT_LEFT] - 5)
x2 = x1 + stats[label, cv2.CC_STAT_WIDTH] + 10
y1 = max(0, stats[label, cv2.CC_STAT_TOP] - 5)
y2 = y1 + stats[label, cv2.CC_STAT_HEIGHT] + 10
# crop and resize for ML classification.
digit = digit[y1:y2, x1:x2]
digit = np.array(digit, dtype="uint8")
digit = cv2.resize(digit,
dsize=(28, 28),
interpolation=cv2.INTER_CUBIC)
return digit
def getMagnification(frame: np.ndarray, debug=False) -> float:
'''Function uses ML OCR to determine the magnification of the frame from
the drone video.
Parameters
----------
filename : str
filename of the frame to determine the magnification from.
debug : bool, optional
If True then returns list of images and their classifications
for debug purposes.
Returns
-------
magnification : float
The determined magnification level of the drone video.
'''
from setting import model
if debug:
import matplotlib.pyplot as plt
fig, axs = plt.subplots(1, 3)
# Open image and convert to grayscale
array = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# array = img
array = array[92:130, 24:140]
# Threshold, dilate and then crop to ROI.
ret2, thresh = cv2.threshold(array, 200, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = label(thresh)
thresh = remove_small_objects(thresh, 100)
# convert image back to binary and uint8 type
thresh = np.where(thresh > 0, 255, 0)
thresh = np.array(thresh, "uint8")
if debug:
axs[0].imshow(array)
if np.mean(thresh) > 100.:
ret, thresh = cv2.threshold(array, 200, 255, cv2.THRESH_BINARY)
kernel = np.ones((2, 2), np.uint8)
thresh = cv2.dilate(thresh, kernel, iterations=1)
thresh = label(thresh)
thresh = remove_small_objects(thresh, 100)
thresh = np.where(thresh > 0, 255, 0)
array = np.array(thresh, "uint8")
else:
fraction = np.sum(thresh/255)/(thresh.shape[0]*thresh.shape[1])
if fraction > 0.2:
array = np.where(array < 50, 255, array)
ret2, thresh = cv2.threshold(array, 210, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = label(thresh)
thresh = remove_small_objects(thresh, 100)
# convert image back to binary and uint8 type
thresh = np.where(thresh > 0, 255, 0)
thresh = np.array(thresh, "uint8")
kernel = np.ones((2, 2), np.uint8)
thresh = cv2.erode(thresh, kernel, iterations=1)
if debug:
axs[1].imshow(thresh)
array = thresh
else:
kernel = np.ones((2, 2), np.uint8)
array = thresh
array = cv2.dilate(thresh, kernel, iterations=1)
# label again, this time with stats calculated
output = cv2.connectedComponentsWithStats(array, 8, cv2.CV_32S)
nlabels = output[0] # number of labels
array = output[1] # Labeled image
stats = output[2] # List of stats for each label
labels = []
digits = []
if debug:
axs[2].imshow(array)
# Sort labels so that they are processed in left to right order
s = stats[1:, cv2.CC_STAT_LEFT]
labelOrder = sorted(range(len(s)), key=lambda k: s[k])
# classify digits
for i in labelOrder:
digits.append(_processLabels(array, stats, i+1))
lab = model.predict_classes(digits[-1].reshape(1, 28, 28, 1).astype('float32')/255)
labels.append(lab)
if debug:
print(labels)
plt.show()
# if method fails just return 1.0 magnification
if len(labels) == 1:
return 1.0
# format and return magnification level
first = labels[0]
second = labels[1]
if len(labels) > 2:
if int(str(first[0])+str(second[0])) < 20.:
if len(labels) == 3:
third = None
else:
third = labels[2]
else:
third = None
else:
third = None
if third:
magnification = float(f"{first[0]}{second[0]}.{third[0]}")
else:
magnification = float(f"{first[0]}.{second[0]}")
return magnification
if __name__ == '__main__':
import glob as gb
import time
files = gb.glob("large/*.png")
files.sort()
# run tests on 1.0x magnification
for i, file in enumerate(files[:631]):
print(f"{i+1}/{len(files)}")
start = time.time()
magnification = getMagnification(file, debug=True)
assert magnification - 1.0 < 0.2, print(file, magnification)
finish = time.time()
files = gb.glob("small/2019_*.png")
files.sort()
# run tests on different magnifications
magns = [1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.1, 2.0, 3.3, 4.0, 4.0, 4.0, 4.2,
4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 8.0, 9.4, 12.6, 10.1]
for i, file in enumerate(files):
print(f"{i+1}/{len(files)}")
start = time.time()
magnification = getMagnification(file, debug=False)
assert magnification - magns[i] < 0.001, f"{file}, {magnification}"
finish = time.time()
| [
"numpy.sum",
"tensorflow.keras.layers.Dense",
"numpy.ones",
"numpy.mean",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.models.Sequential",
"glob.glob",
"cv2.erode",
"tensorflow.keras.layers.Flatten",
"cv2.dilate",
"cv2.cvtColor",
"skimage.morphology.label",
"matplotlib.pyplot.subpl... | [((129, 186), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (152, 186), False, 'import warnings\n'), ((611, 639), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), '()\n', (637, 639), True, 'import tensorflow as tf\n'), ((1366, 1422), 'setting.model.load_weights', 'model.load_weights', (['"""Ml-test/image_to_number_model.hdf5"""'], {}), "('Ml-test/image_to_number_model.hdf5')\n", (1384, 1422), False, 'from setting import model\n'), ((2155, 2187), 'numpy.where', 'np.where', (['(image != label)', '(0)', '(255)'], {}), '(image != label, 0, 255)\n', (2163, 2187), True, 'import numpy as np\n'), ((2506, 2536), 'numpy.array', 'np.array', (['digit'], {'dtype': '"""uint8"""'}), "(digit, dtype='uint8')\n", (2514, 2536), True, 'import numpy as np\n'), ((2549, 2613), 'cv2.resize', 'cv2.resize', (['digit'], {'dsize': '(28, 28)', 'interpolation': 'cv2.INTER_CUBIC'}), '(digit, dsize=(28, 28), interpolation=cv2.INTER_CUBIC)\n', (2559, 2613), False, 'import cv2\n'), ((3435, 3474), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3447, 3474), False, 'import cv2\n'), ((3594, 3661), 'cv2.threshold', 'cv2.threshold', (['array', '(200)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(array, 200, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3607, 3661), False, 'import cv2\n'), ((3674, 3687), 'skimage.morphology.label', 'label', (['thresh'], {}), '(thresh)\n', (3679, 3687), False, 'from skimage.morphology import remove_small_objects, label\n'), ((3701, 3734), 'skimage.morphology.remove_small_objects', 'remove_small_objects', (['thresh', '(100)'], {}), '(thresh, 100)\n', (3721, 3734), False, 'from skimage.morphology import remove_small_objects, label\n'), ((3798, 3826), 'numpy.where', 'np.where', (['(thresh > 0)', '(255)', '(0)'], {}), '(thresh > 0, 255, 0)\n', (3806, 3826), True, 'import numpy as np\n'), ((3840, 3865), 'numpy.array', 'np.array', (['thresh', '"""uint8"""'], {}), "(thresh, 'uint8')\n", (3848, 3865), True, 'import numpy as np\n'), ((5199, 5253), 'cv2.connectedComponentsWithStats', 'cv2.connectedComponentsWithStats', (['array', '(8)', 'cv2.CV_32S'], {}), '(array, 8, cv2.CV_32S)\n', (5231, 5253), False, 'import cv2\n'), ((6618, 6640), 'glob.glob', 'gb.glob', (['"""large/*.png"""'], {}), "('large/*.png')\n", (6625, 6640), True, 'import glob as gb\n'), ((6977, 7004), 'glob.glob', 'gb.glob', (['"""small/2019_*.png"""'], {}), "('small/2019_*.png')\n", (6984, 7004), True, 'import glob as gb\n'), ((655, 727), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(254)'], {'kernel_size': '(3, 3)', 'input_shape': '(28, 28, 1)'}), '(254, kernel_size=(3, 3), input_shape=(28, 28, 1))\n', (677, 727), True, 'import tensorflow as tf\n'), ((743, 776), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (768, 776), True, 'import tensorflow as tf\n'), ((792, 839), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)'], {'kernel_size': '(3, 3)'}), '(128, kernel_size=(3, 3))\n', (814, 839), True, 'import tensorflow as tf\n'), ((855, 888), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', (['(2, 2)'], {}), '((2, 2))\n', (880, 888), True, 'import tensorflow as tf\n'), ((946, 971), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (969, 971), True, 'import tensorflow as tf\n'), ((1040, 1085), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(140)'], {'activation': '"""relu"""'}), "(140, activation='relu')\n", (1061, 1085), True, 'import tensorflow as tf\n'), ((1101, 1129), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1124, 1129), True, 'import tensorflow as tf\n'), ((1145, 1189), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(80)'], {'activation': '"""relu"""'}), "(80, activation='relu')\n", (1166, 1189), True, 'import tensorflow as tf\n'), ((1205, 1233), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (1228, 1233), True, 'import tensorflow as tf\n'), ((1307, 1360), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(10)', 'activation': '"""sigmoid"""'}), "(units=10, activation='sigmoid')\n", (1328, 1360), True, 'import tensorflow as tf\n'), ((3361, 3379), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (3373, 3379), True, 'import matplotlib.pyplot as plt\n'), ((3918, 3933), 'numpy.mean', 'np.mean', (['thresh'], {}), '(thresh)\n', (3925, 3933), True, 'import numpy as np\n'), ((3964, 4013), 'cv2.threshold', 'cv2.threshold', (['array', '(200)', '(255)', 'cv2.THRESH_BINARY'], {}), '(array, 200, 255, cv2.THRESH_BINARY)\n', (3977, 4013), False, 'import cv2\n'), ((4032, 4057), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (4039, 4057), True, 'import numpy as np\n'), ((4075, 4115), 'cv2.dilate', 'cv2.dilate', (['thresh', 'kernel'], {'iterations': '(1)'}), '(thresh, kernel, iterations=1)\n', (4085, 4115), False, 'import cv2\n'), ((4133, 4146), 'skimage.morphology.label', 'label', (['thresh'], {}), '(thresh)\n', (4138, 4146), False, 'from skimage.morphology import remove_small_objects, label\n'), ((4164, 4197), 'skimage.morphology.remove_small_objects', 'remove_small_objects', (['thresh', '(100)'], {}), '(thresh, 100)\n', (4184, 4197), False, 'from skimage.morphology import remove_small_objects, label\n'), ((4216, 4244), 'numpy.where', 'np.where', (['(thresh > 0)', '(255)', '(0)'], {}), '(thresh > 0, 255, 0)\n', (4224, 4244), True, 'import numpy as np\n'), ((4261, 4286), 'numpy.array', 'np.array', (['thresh', '"""uint8"""'], {}), "(thresh, 'uint8')\n", (4269, 4286), True, 'import numpy as np\n'), ((5904, 5914), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5912, 5914), True, 'import matplotlib.pyplot as plt\n'), ((6794, 6805), 'time.time', 'time.time', ([], {}), '()\n', (6803, 6805), False, 'import time\n'), ((6952, 6963), 'time.time', 'time.time', ([], {}), '()\n', (6961, 6963), False, 'import time\n'), ((7306, 7317), 'time.time', 'time.time', ([], {}), '()\n', (7315, 7317), False, 'import time\n'), ((7472, 7483), 'time.time', 'time.time', ([], {}), '()\n', (7481, 7483), False, 'import time\n'), ((4317, 4337), 'numpy.sum', 'np.sum', (['(thresh / 255)'], {}), '(thresh / 255)\n', (4323, 4337), True, 'import numpy as np\n'), ((4417, 4449), 'numpy.where', 'np.where', (['(array < 50)', '(255)', 'array'], {}), '(array < 50, 255, array)\n', (4425, 4449), True, 'import numpy as np\n'), ((4477, 4544), 'cv2.threshold', 'cv2.threshold', (['array', '(210)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(array, 210, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (4490, 4544), False, 'import cv2\n'), ((4565, 4578), 'skimage.morphology.label', 'label', (['thresh'], {}), '(thresh)\n', (4570, 4578), False, 'from skimage.morphology import remove_small_objects, label\n'), ((4600, 4633), 'skimage.morphology.remove_small_objects', 'remove_small_objects', (['thresh', '(100)'], {}), '(thresh, 100)\n', (4620, 4633), False, 'from skimage.morphology import remove_small_objects, label\n'), ((4713, 4741), 'numpy.where', 'np.where', (['(thresh > 0)', '(255)', '(0)'], {}), '(thresh > 0, 255, 0)\n', (4721, 4741), True, 'import numpy as np\n'), ((4763, 4788), 'numpy.array', 'np.array', (['thresh', '"""uint8"""'], {}), "(thresh, 'uint8')\n", (4771, 4788), True, 'import numpy as np\n'), ((4811, 4836), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (4818, 4836), True, 'import numpy as np\n'), ((4858, 4897), 'cv2.erode', 'cv2.erode', (['thresh', 'kernel'], {'iterations': '(1)'}), '(thresh, kernel, iterations=1)\n', (4867, 4897), False, 'import cv2\n'), ((5020, 5045), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (5027, 5045), True, 'import numpy as np\n'), ((5093, 5133), 'cv2.dilate', 'cv2.dilate', (['thresh', 'kernel'], {'iterations': '(1)'}), '(thresh, kernel, iterations=1)\n', (5103, 5133), False, 'import cv2\n')] |
import numpy as np
def get_vehicle_info(vehicle):
angular_velocity = vehicle.get_angular_velocity()
acceleration = vehicle.get_acceleration()
velocity = vehicle.get_velocity()
transform = vehicle.get_transform()
location = transform.location
rotation = transform.rotation
vehicle_info = np.array([
location.x,
location.y,
location.z,
rotation.pitch,
rotation.roll,
rotation.yaw,
velocity.x,
velocity.y,
velocity.z,
angular_velocity.x,
angular_velocity.y,
angular_velocity.z,
acceleration.x,
acceleration.y,
acceleration.z,
])
return vehicle_info
def get_transform_location(transform):
location = transform.location
vehicle_info = np.array([
location.x,
location.y,
location.z,
])
return vehicle_info
| [
"numpy.array"
] | [((317, 564), 'numpy.array', 'np.array', (['[location.x, location.y, location.z, rotation.pitch, rotation.roll,\n rotation.yaw, velocity.x, velocity.y, velocity.z, angular_velocity.x,\n angular_velocity.y, angular_velocity.z, acceleration.x, acceleration.y,\n acceleration.z]'], {}), '([location.x, location.y, location.z, rotation.pitch, rotation.roll,\n rotation.yaw, velocity.x, velocity.y, velocity.z, angular_velocity.x,\n angular_velocity.y, angular_velocity.z, acceleration.x, acceleration.y,\n acceleration.z])\n', (325, 564), True, 'import numpy as np\n'), ((798, 844), 'numpy.array', 'np.array', (['[location.x, location.y, location.z]'], {}), '([location.x, location.y, location.z])\n', (806, 844), True, 'import numpy as np\n')] |
"""
Test & demo file
"""
from collections import deque
from enum import Enum
import numpy as np
from hamiltonian.manager import Manager
class TestManager(Manager):
"""
A hectic test to show that the point generation,
line generation, point deletion, line deletion,
point movements work.
:param per: frames between two cycles of actions
"""
def __init__(self, per):
super(TestManager, self).__init__(
callback=lambda x: x.refresh()
)
self.ind = 0
self.per = per
self.prevs = []
def update(self, render):
self.ind += 1
cyl = self.ind // self.per
action = self.ind % self.per
if action == 0:
node = render.add_node(
"node%i" % cyl,
np.random.rand(2)
)
self.prevs.append(node)
elif action == 1 and cyl > 1:
a, b = self.prevs[-1], self.prevs[-2]
render.add_link(a, b)
elif action == 2 and cyl > 1:
self.prevs[-1].set_destination(
np.random.rand(2)
)
elif action == 3 and cyl > 5:
render.remove_node(self.prevs[0].name)
del self.prevs[0]
TestManager(20).start()
| [
"numpy.random.rand"
] | [((794, 811), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (808, 811), True, 'import numpy as np\n'), ((1082, 1099), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (1096, 1099), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import json
import os
import pickle
import random
import dlib
### Work around - CUDA_ERROR_OUT_OF_MEMORY
import keras.backend as K
import numpy as np
import tensorflow as tf
from PIL import Image
from keras import models, layers, optimizers
from keras.callbacks import EarlyStopping
from keras.models import load_model
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from api_face.keras_vggface.vggface import VGGFace
from utils import util
config = K.tf.ConfigProto()
config.gpu_options.allow_growth = True
session = K.tf.Session(config=config)
########################################
if __name__ == "__main__":
print("core 직접 실행")
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
else:
print("core import")
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
MAX_DETECT = 4
PROB_BENCHMARK = 0.9
UPLOAD_DIR = './dataset/' # 프론트에서 업로드한 원본 이미지 저장 path
PEOPLE_DIR = './people/' # 224 사이즈로 리사이즈한 이미지 저장
DATA_XY_FILE = './feature/dataXY.npz'
MODEL_NAME = './feature/hs_model.h5' # 모델명
MODEL_LABEL = './feature/hs_model_label.pkl' # 레이블 리스트
global graph
graph = tf.get_default_graph()
detector = dlib.get_frontal_face_detector()
resnet_vgg = VGGFace(model='resnet50', include_top=False,
input_shape=(224, 224, 3), pooling='max', weights='vggface')
myModel = None
LabelDic = None
# unknown 판별 모델
with open('./assets/benchmark/svm_unknown.pkl', 'rb') as f:
svm_unknown = pickle.load(f)
###############################################################################
# Private functions
###############################################################################
# Action:
# 모델, 레이블이 업데이트 되었는지 체크 후 모델, 레이블 로드
def loadModel():
_model = None
_labeldic = None
if os.path.isfile(MODEL_NAME):
_model = load_model(MODEL_NAME)
with open(MODEL_LABEL, 'rb') as f:
_labeldic = pickle.load(f)
return _model, _labeldic
#util
# Actions:
# boundingBox 잘라내기 및 이미지 리사이즈
# Params:
# img - Image 객첵
# d - dector 객체
# x,y - 리사이즈할 사이즈
def imgCropResize(image, d, x, y):
img = image.crop((d.left(), d.top(), d.right(), d.bottom()))
img = img.resize((x, y), Image.ANTIALIAS)
return img
# Actions:
# resnet_vgg를 이용하여 피처 추출 후 피처값 Nomalize
# Params:
# img - Image 객첵
# Return value:
# Normalization된 피처값
def getFeatureByResnetVgg(img):
npImg = np.array(img)
npImg = npImg.reshape(1, 224, 224, 3) # 추후, 이미지로부터 shape를 뽑아내서 사용
with graph.as_default():
feature = resnet_vgg.predict(npImg)
# normalization. because training feature coverted normalization
feature = (feature - feature.min()) / feature.max() - feature.min()
return feature
# Actions:
# 리사이즈된 이미지 저장 디렉토리(name) 생성 및 이미지 리사이즈 & 저장
def imgResize(name):
if os.path.exists(PEOPLE_DIR + name):
print('Error: ' + name + ' dir alreay exists')
return 0
else:
os.makedirs(PEOPLE_DIR + name)
img_list = os.listdir(UPLOAD_DIR + name)
for i, img_one in enumerate(img_list):
try:
org_img = Image.open(UPLOAD_DIR + name + '/' + img_one)
dets = detector(np.array(org_img), 1)
if len(dets) != 1:
print("Only 1 Face is accepted")
continue
else:
img = imgCropResize(org_img, dets[0], 224, 224)
img.save(PEOPLE_DIR + name + '/' + img_one, 'JPEG')
except:
print('Some Error Occured... ')
return (i + 1)
# Actions:
# name 디렉토리 이미지 리사이즈 후 피처 추출
def getXY(name):
img_list = os.listdir(UPLOAD_DIR + name)
n = len(img_list)
i = 0
X = []
Y = []
for img_one in img_list:
try:
org_img = Image.open(UPLOAD_DIR + name + '/' + img_one)
dets = detector(np.array(org_img), 1)
if len(dets) == 1:
img = imgCropResize(org_img, dets[0], 224, 224)
feature = getFeatureByResnetVgg(img)
X.append(feature)
Y.append(name)
i += 1
else:
print('BB Detector length Err: ', len(dets))
except Exception as ex:
print('Some Error Occured...: ', ex)
return i, X, Y
# Actions:
# 기존 기존 피처 X,Y에 새로운 피처 X,Y 업데이트
def updateXY(X, Y):
if os.path.isfile(DATA_XY_FILE):
dataXY = np.load(DATA_XY_FILE)
xx, yy = dataXY['x'], dataXY['y']
X = np.vstack((X, xx))
Y.extend(yy)
print(type(X), len(X), X.shape)
print(type(Y), len(Y), Y)
return X, Y
# Action:
# 문자열의 레이블을 onehot encoding된 레이블로 변환
# Return value: Number of Classes
def getEncodedYY(y):
label_encoder = LabelEncoder()
onehot_encoder = OneHotEncoder(sparse=False)
label_encoder.fit(y)
y_ = label_encoder.transform(y) # 숫자화
y_ = y_.reshape(len(y_), 1)
yy = onehot_encoder.fit_transform(y_)
with open(MODEL_LABEL, 'wb') as f:
pickle.dump(label_encoder.classes_, f)
print('Model Classes....: ', label_encoder.classes_)
return len(label_encoder.classes_), label_encoder.classes_, yy
#util
# Action:
# x,y 데이타 셔플
def dataShuffle(x, y):
xy = list(zip(x, y))
random.shuffle(xy)
xx, yy = zip(*xy)
xx = np.array(xx)
yy = np.array(yy)
return xx, yy
# Action:
# 모델을 만들고 트레이닝 및 파일로 저장
def getModel(numClasses, train_features, train_labels, validation_features, validation_labels):
with graph.as_default():
print('getModel()..........numClasses: {}'.format(numClasses))
model = models.Sequential()
model.add(layers.Dense(512, activation='relu', input_dim=1 * 1 * 2048))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(numClasses, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
# optimizer=optimizers.RMSprop(lr=2e-4),
optimizer=optimizers.sgd(),
metrics=['acc'])
early_stopping = EarlyStopping(patience=15, mode='auto', monitor='val_loss')
history = model.fit(train_features,
train_labels,
epochs=500,
batch_size=200,
validation_data=(validation_features, validation_labels),
callbacks=[early_stopping])
model.save(MODEL_NAME)
return model
def facePridict(mode, strImg):
"""이미지를 전달받아 분류 예측을 하는 function.
이미지 하나에 여러명의 분류를 하는 것이 가능하며 MAX_DETECT값 으로 몇명의 사람얼굴을 분류 할 것인지 조절한다.
unknown은 만들어진 모델을 사용하여 임의의 인물과 정답의 인물 각 probability와 L2distance의 여러케이스를
수집하여 정답 비정답 출력의 data set을 만들어 svm으로 unkown판정을 하는 모델을 만들어 사용하였다.
# Arguments
mode:unknown을 판정을 할 것인지 가장 트레이닝 분류중 가장 유사한 분류로 판정 할 것 인지 여부
(0:unkown 1:유사분류)
strImg:base64 이미지 문자열
# Returns
예측된 결과갑 json 스트링
"""
recImg = util.bas64ToRGB(strImg)
dets = detector(np.array(recImg), 1)
print("Number of faces detected: {}".format(len(dets)))
result = {}
if len(dets) < 1:
result = {'result': '0', 'msg': "can't find a face"}
elif len(dets) > MAX_DETECT:
result = {'result': '0', 'msg': "found too many faces"}
else:
features = []
people = []
if os.path.isfile(DATA_XY_FILE):
dataXY = np.load(DATA_XY_FILE)
xx, yy = dataXY['x'], dataXY['y']
# with graph.as_default():
for k, d in enumerate(dets):
img = imgCropResize(recImg, d, 224, 224)
feature = getFeatureByResnetVgg(img)
features.append(feature)
print("-----------------dnn predict!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!-----------------")
for f in features:
with graph.as_default():
predictions = myModel.predict_classes(f.reshape(1, -1))
prob = myModel.predict(f.reshape(1, -1))
name, score = LabelDic[predictions[0]], str(np.amax(prob[0]))
if (mode == "1"):
# idx = np.where(np.argmax(yy, axis=1) == predictions[0])[0]
idx = np.where(yy == name)[0]
l2dists = []
for k in idx:
dist = np.linalg.norm(f.reshape(-1) - xx[k], axis=None, ord=None)
l2dists.append(dist)
temp = [[float(score), np.min(l2dists)]]
if 0 == int(svm_unknown.predict(temp)):
name, score = 'unknown', '0'
print(name + '////prob--' + score + '////--' + str(result))
people.append({'name': name, 'prob': score})
result = {'result': '1', 'people': people}
jsonString = json.dumps(result)
return jsonString
# test function
# img_path : '../visitor/pre_over_100/people/test/ParkMinjeong/51.jpg'
def test_facePridict(img_path):
img = Image.open(img_path)
dets = detector(np.array(img), 1)
print("Number of faces detected: {}".format(len(dets)))
result = {}
if len(dets) < 1:
result = {'result': '0', 'msg': "can't find a face"}
elif len(dets) > MAX_DETECT:
result = {'result': '0', 'msg': "found too many faces"}
else:
features = []
people = []
for k, d in enumerate(dets):
img = imgCropResize(img, d, 224, 224)
feature = getFeatureByResnetVgg(img)
features.append(feature)
print("-----------------dnn predict!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!-----------------")
for f in features:
predictions = myModel.predict_classes(f.reshape(1, -1))
with graph.as_default():
prob = myModel.predict(f.reshape(1, -1))
print("Predict result: ", LabelDic[predictions[0]], np.max(prob[0]), ", PROB_BENCHMARK(",
PROB_BENCHMARK, ")")
if (np.amax(prob[0]) > PROB_BENCHMARK):
dnnPridict = LabelDic[predictions[0]]
people.append({'name': dnnPridict, 'prob': str(np.amax(prob[0]))})
else:
people.append({'name': 'unknown', 'prob': 0})
result = {'result': '1', 'people': people}
jsonString = json.dumps(result)
print("JSON Result: ", jsonString)
return
# https://<<domain>>:<<port>>/api/request_training
# Params:
# name - 모델에 추가할 클래스(사람) 이름
# Return values:
# 1 - 트레이닝 성공
# 0 - 트레이닝 실패
# Actions:
# name 디렉토리 이미지 리사이징
# name 클래스의 피처 추출 및 저장
# model 만들기 및 테스트
def faceTraining(name):
num, X, Y = getXY(name)
X, Y = updateXY(X, Y)
print(type(X), type(Y))
np.savez(DATA_XY_FILE, x=X, y=Y)
print('Total dataset size. X: ', len(X), ', Y: ', len(Y))
X, Y = dataShuffle(X, Y)
numClasses, dicClassess, enY = getEncodedYY(Y)
if numClasses == 1:
# 클래스가 1개인 경우 무조건 ok
print('Number of Clases is 1!')
result = {'result': '1', 'msg': "OK"}
jsonString = json.dumps(result)
return jsonString
X = X.reshape(len(X), 2048)
totalNum = len(X)
print('After shuffle....')
print(len(X), len(Y), len(enY), type(X), type(Y), type(enY), X.shape, Y.shape, enY.shape)
# 데이타셋 나누기 train:validation:test = 80:15:5
num_train = int(totalNum * 0.8)
num_validation = int(totalNum * 0.15) + num_train
train_features, train_labels, validation_features, validation_labels, test_features, test_labels = \
X[:num_train], enY[:num_train], X[num_train:num_validation], enY[num_train:num_validation], X[num_validation:], Y[num_validation:]
print('Total data is {}, train({}), validation({}), test({})'.format(totalNum, len(train_features),len(validation_features), len(test_features)))
new_model = getModel(numClasses, train_features, train_labels, validation_features, validation_labels)
# test
with graph.as_default():
predictions = new_model.predict_classes(test_features)
prob = new_model.predict(test_features)
print(predictions)
print(prob)
rightPred = 0
for i, pred in enumerate(predictions):
print('Real value: ', test_labels[i], '..... Predict value: ', dicClassess[pred], np.max(prob[i]))
if test_labels[i] == dicClassess[pred]:
rightPred += 1
if rightPred == len(predictions):
print('Model is good!!!!!')
# 새 모델로 변수 업데이트
global myModel, LabelDic
myModel = new_model
LabelDic = dicClassess
result = {'result': '1', 'msg': "OK"}
jsonString = json.dumps(result)
# Path('./DeepCore.py').touch() ## Work around - for updating global variables
return jsonString
print('Start............................')
# 모델과 label dictionary가 존재하면 로딩....
myModel, LabelDic = loadModel()
if myModel is None:
print("Model doesn't eixst....Do train first!!!!")
print('End............................')
########################## TEST ################################
# faceTraining('ParkSunhee')
# print('\n\ntest_facePridict()............\n')
# test_facePridict('../visitor/pre_over_100/people/test/ParkMinjeong/98.jpg')
########################## TEST ################################
| [
"keras.models.load_model",
"numpy.load",
"pickle.dump",
"keras.backend.tf.ConfigProto",
"random.shuffle",
"json.dumps",
"utils.util.bas64ToRGB",
"keras.optimizers.sgd",
"os.path.isfile",
"pickle.load",
"api_face.keras_vggface.vggface.VGGFace",
"tensorflow.get_default_graph",
"os.path.exists"... | [((490, 508), 'keras.backend.tf.ConfigProto', 'K.tf.ConfigProto', ([], {}), '()\n', (506, 508), True, 'import keras.backend as K\n'), ((558, 585), 'keras.backend.tf.Session', 'K.tf.Session', ([], {'config': 'config'}), '(config=config)\n', (570, 585), True, 'import keras.backend as K\n'), ((1106, 1128), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1126, 1128), True, 'import tensorflow as tf\n'), ((1141, 1173), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1171, 1173), False, 'import dlib\n'), ((1187, 1296), 'api_face.keras_vggface.vggface.VGGFace', 'VGGFace', ([], {'model': '"""resnet50"""', 'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""max"""', 'weights': '"""vggface"""'}), "(model='resnet50', include_top=False, input_shape=(224, 224, 3),\n pooling='max', weights='vggface')\n", (1194, 1296), False, 'from api_face.keras_vggface.vggface import VGGFace\n'), ((1440, 1454), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1451, 1454), False, 'import pickle\n'), ((1748, 1774), 'os.path.isfile', 'os.path.isfile', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (1762, 1774), False, 'import os\n'), ((2373, 2386), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2381, 2386), True, 'import numpy as np\n'), ((2788, 2821), 'os.path.exists', 'os.path.exists', (['(PEOPLE_DIR + name)'], {}), '(PEOPLE_DIR + name)\n', (2802, 2821), False, 'import os\n'), ((3636, 3665), 'os.listdir', 'os.listdir', (['(UPLOAD_DIR + name)'], {}), '(UPLOAD_DIR + name)\n', (3646, 3665), False, 'import os\n'), ((4372, 4400), 'os.path.isfile', 'os.path.isfile', (['DATA_XY_FILE'], {}), '(DATA_XY_FILE)\n', (4386, 4400), False, 'import os\n'), ((4752, 4766), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4764, 4766), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((4788, 4815), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (4801, 4815), False, 'from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n'), ((5256, 5274), 'random.shuffle', 'random.shuffle', (['xy'], {}), '(xy)\n', (5270, 5274), False, 'import random\n'), ((5306, 5318), 'numpy.array', 'np.array', (['xx'], {}), '(xx)\n', (5314, 5318), True, 'import numpy as np\n'), ((5328, 5340), 'numpy.array', 'np.array', (['yy'], {}), '(yy)\n', (5336, 5340), True, 'import numpy as np\n'), ((7021, 7044), 'utils.util.bas64ToRGB', 'util.bas64ToRGB', (['strImg'], {}), '(strImg)\n', (7036, 7044), False, 'from utils import util\n'), ((8866, 8884), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (8876, 8884), False, 'import json\n'), ((9042, 9062), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (9052, 9062), False, 'from PIL import Image\n'), ((10799, 10831), 'numpy.savez', 'np.savez', (['DATA_XY_FILE'], {'x': 'X', 'y': 'Y'}), '(DATA_XY_FILE, x=X, y=Y)\n', (10807, 10831), True, 'import numpy as np\n'), ((12679, 12697), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (12689, 12697), False, 'import json\n'), ((1793, 1815), 'keras.models.load_model', 'load_model', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (1803, 1815), False, 'from keras.models import load_model\n'), ((2913, 2943), 'os.makedirs', 'os.makedirs', (['(PEOPLE_DIR + name)'], {}), '(PEOPLE_DIR + name)\n', (2924, 2943), False, 'import os\n'), ((2963, 2992), 'os.listdir', 'os.listdir', (['(UPLOAD_DIR + name)'], {}), '(UPLOAD_DIR + name)\n', (2973, 2992), False, 'import os\n'), ((4419, 4440), 'numpy.load', 'np.load', (['DATA_XY_FILE'], {}), '(DATA_XY_FILE)\n', (4426, 4440), True, 'import numpy as np\n'), ((4495, 4513), 'numpy.vstack', 'np.vstack', (['(X, xx)'], {}), '((X, xx))\n', (4504, 4513), True, 'import numpy as np\n'), ((5007, 5045), 'pickle.dump', 'pickle.dump', (['label_encoder.classes_', 'f'], {}), '(label_encoder.classes_, f)\n', (5018, 5045), False, 'import pickle\n'), ((5610, 5629), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (5627, 5629), False, 'from keras import models, layers, optimizers\n'), ((6073, 6132), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(15)', 'mode': '"""auto"""', 'monitor': '"""val_loss"""'}), "(patience=15, mode='auto', monitor='val_loss')\n", (6086, 6132), False, 'from keras.callbacks import EarlyStopping\n'), ((7065, 7081), 'numpy.array', 'np.array', (['recImg'], {}), '(recImg)\n', (7073, 7081), True, 'import numpy as np\n'), ((9083, 9096), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (9091, 9096), True, 'import numpy as np\n'), ((11136, 11154), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (11146, 11154), False, 'import json\n'), ((1883, 1897), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1894, 1897), False, 'import pickle\n'), ((3785, 3830), 'PIL.Image.open', 'Image.open', (["(UPLOAD_DIR + name + '/' + img_one)"], {}), "(UPLOAD_DIR + name + '/' + img_one)\n", (3795, 3830), False, 'from PIL import Image\n'), ((5648, 5708), 'keras.layers.Dense', 'layers.Dense', (['(512)'], {'activation': '"""relu"""', 'input_dim': '(1 * 1 * 2048)'}), "(512, activation='relu', input_dim=1 * 1 * 2048)\n", (5660, 5708), False, 'from keras import models, layers, optimizers\n'), ((5728, 5747), 'keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (5742, 5747), False, 'from keras import models, layers, optimizers\n'), ((5767, 5813), 'keras.layers.Dense', 'layers.Dense', (['numClasses'], {'activation': '"""softmax"""'}), "(numClasses, activation='softmax')\n", (5779, 5813), False, 'from keras import models, layers, optimizers\n'), ((7406, 7434), 'os.path.isfile', 'os.path.isfile', (['DATA_XY_FILE'], {}), '(DATA_XY_FILE)\n', (7420, 7434), False, 'import os\n'), ((10394, 10412), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (10404, 10412), False, 'import json\n'), ((12352, 12367), 'numpy.max', 'np.max', (['prob[i]'], {}), '(prob[i])\n', (12358, 12367), True, 'import numpy as np\n'), ((3084, 3129), 'PIL.Image.open', 'Image.open', (["(UPLOAD_DIR + name + '/' + img_one)"], {}), "(UPLOAD_DIR + name + '/' + img_one)\n", (3094, 3129), False, 'from PIL import Image\n'), ((3860, 3877), 'numpy.array', 'np.array', (['org_img'], {}), '(org_img)\n', (3868, 3877), True, 'import numpy as np\n'), ((5990, 6006), 'keras.optimizers.sgd', 'optimizers.sgd', ([], {}), '()\n', (6004, 6006), False, 'from keras import models, layers, optimizers\n'), ((7457, 7478), 'numpy.load', 'np.load', (['DATA_XY_FILE'], {}), '(DATA_XY_FILE)\n', (7464, 7478), True, 'import numpy as np\n'), ((3163, 3180), 'numpy.array', 'np.array', (['org_img'], {}), '(org_img)\n', (3171, 3180), True, 'import numpy as np\n'), ((9951, 9966), 'numpy.max', 'np.max', (['prob[0]'], {}), '(prob[0])\n', (9957, 9966), True, 'import numpy as np\n'), ((10052, 10068), 'numpy.amax', 'np.amax', (['prob[0]'], {}), '(prob[0])\n', (10059, 10068), True, 'import numpy as np\n'), ((8099, 8115), 'numpy.amax', 'np.amax', (['prob[0]'], {}), '(prob[0])\n', (8106, 8115), True, 'import numpy as np\n'), ((8259, 8279), 'numpy.where', 'np.where', (['(yy == name)'], {}), '(yy == name)\n', (8267, 8279), True, 'import numpy as np\n'), ((8528, 8543), 'numpy.min', 'np.min', (['l2dists'], {}), '(l2dists)\n', (8534, 8543), True, 'import numpy as np\n'), ((10213, 10229), 'numpy.amax', 'np.amax', (['prob[0]'], {}), '(prob[0])\n', (10220, 10229), True, 'import numpy as np\n')] |
from collections import OrderedDict
import numpy as np
from .object import OMBaseObject
class Wavelet(OMBaseObject):
tid = "wavelet"
_TID_FRIENDLY_NAME = "Wavelet"
_ATTRIBUTES = OrderedDict()
_ATTRIBUTES['_type'] = {
'default_value': "Ricker",
'type': str
}
_ATTRIBUTES['f0'] = {
'default_value': 0.0,
'type': float
}
_ATTRIBUTES['amp'] = {
'default_value': 0.0,
'type': float
}
_SHOWN_ATTRIBUTES = [
('_type', 'Type'),
('f0', 'Base freq'),
('amp', 'Amplitude')
]
def __init__(self, **attributes):
super().__init__(**attributes)
def get_amplitude_data(self, time_data):
# ## Source signal - Ricker-wavelet
tau = np.pi * self.f0 * (time_data - 1.5 / self.f0)
print('tau')
print(tau.min(), tau.max())
return self.amp * (1.0 - 2.0 * tau**2.0) * np.exp(-tau**2) | [
"collections.OrderedDict",
"numpy.exp"
] | [((197, 210), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (208, 210), False, 'from collections import OrderedDict\n'), ((1052, 1069), 'numpy.exp', 'np.exp', (['(-tau ** 2)'], {}), '(-tau ** 2)\n', (1058, 1069), True, 'import numpy as np\n')] |
import importlib
from keras.layers import LSTM
from minos.utils import save_sisy_model
from minos.utils import load_sisy_model
def ui():
import os
import webbrowser
path = os.path.realpath(os.path.dirname(__file__) + "/../ui/index.html")
webbrowser.open_new_tab(path)
if __name__ == "__main__":
ui()
def frange(x, y, jump=0.1):
import numpy as np
return list(np.arange(x, y, jump))
class SisyLayerParams(object):
def __init__(self):
self.p = {}
def __getitem__(self, item):
return self.p[item]
def __setitem__(self, key, value):
self.p[key] = value
def run_sisy_experiment(sisy_layout: list,
experiment_label: str,
XYTrainTuple: tuple,
XYTestTuple: tuple,
generations=10,
batch_size=1,
autoloop=True,
population_size=25,
epochs=50,
devices=['/gpu:0', '/gpu:1'],
n_jobs=1,
optimizer='sgd',
loss='categorical_crossentropy',
metric='acc',
offspring=1,
mutation=1,
fitness_type='FitnessMax',
shuffle=True):
from collections import defaultdict
from copy import deepcopy
from keras.datasets import boston_housing
from keras.layers import Dense, Dropout
from minos.experiment.experiment import Experiment, ExperimentSettings
from minos.experiment.ga import run_ga_search_experiment
from minos.experiment.training import Training, EpochStoppingCondition
from minos.model.model import Layout, Objective, Optimizer, Metric
from minos.model.parameter import int_param, string_param, float_param
from minos.model.parameters import register_custom_layer, reference_parameters
from minos.train.utils import SimpleBatchIterator, GpuEnvironment
from minos.experiment.experiment import ExperimentParameters
if len(sisy_layout) < 2:
print("Sisy Layout must be at least size 2, an input, middle, and output layer")
return;
if len(XYTrainTuple) != 2:
print("XYTrainTuple must be a tuple of length 2, (X_train,y_train) ")
return;
if len(XYTestTuple) != 2:
print("XYTestTuple must be a tuple of length 2, (X_test,y_test) ")
return;
X_train = XYTrainTuple[0]
y_train = XYTrainTuple[1]
X_test = XYTestTuple[0]
y_test = XYTestTuple[1]
input = sisy_layout[0]
output = sisy_layout[-1]
input_size = -1
if 'units' in input[1]:
input_size = input[1]['units']
elif 'input_length' in input[1]:
input_size = input[1]['input_length']
else:
print("You must specify the parameter 'units' for the Input layer");
return
if 'activation' not in output[1]:
print("You must specify the parameter 'activation' for the Output layer");
return;
if 'units' not in output[1]:
print("You must specify the parameter 'units' for the Output layer");
return;
output_activation = output[1]['activation']
output_initializer = 'normal'
if 'kernel_initializer' in output[1]:
output_initializer = output[1]['kernel_initializer']
output_size = output[1]['units']
batch_iterator = SimpleBatchIterator(X_train, y_train, batch_size=batch_size, autoloop=autoloop, preload=True,
shuffle=shuffle)
test_batch_iterator = SimpleBatchIterator(X_test, y_test, batch_size=batch_size, autoloop=autoloop, preload=True,
shuffle=shuffle)
# our training , MSE for the loss and metric, stopping condition of 5 since our epochs are only 10
training = Training(
Objective(loss),
Optimizer(optimizer=optimizer),
Metric(metric),
EpochStoppingCondition(epochs),
1)
parameters = defaultdict(SisyLayerParams)
blocks = []
# really need to change this to just register every layer with defaults
# instead of testing each one
for i, e in enumerate(sisy_layout[1:-1]):
block = None
layer_name = e[0]
layer = deepcopy(e[1])
cloneable_layers = reference_parameters['layers'].keys()
if layer_name in cloneable_layers:
layer_key = f'{layer_name}{i}'
for key in list(layer.keys()):
layers_module = importlib.import_module('keras.layers.core')
custom_class = getattr(layers_module,layer_name)
if type(layer[key]) == list or type(layer[key]) == range:
register_custom_layer(
layer_key,
custom_class,
deepcopy(reference_parameters['layers'][layer_name]),
True)
parameters[layer_key][key] = layer[key]
del layer[key]
block = (layer_key, layer)
else:
block = e
blocks.append(block)
# parameters[key] = layer[key]
# del layer[key]
# If its a list we know its one of ours
# if layer_name == 'Dense':
# key = f'Dense{i}'
# register_custom_layer(
# key,
# Dense,
# deepcopy(reference_parameters['layers']['Dense']),
# True)
# parameters[key].units = layer['units']
# del layer['units']
# if 'activation' in layer:
# if type(layer['activation']) == list:
# parameters[key].activation = layer['activation']
# del layer['activation']
# block = (key, layer)
# elif layer_name == 'Dropout':
# key = f'Dropout{i}'
# if 'rate' in layer:
# if type(layer['rate']) == float:
# pass
# else:
# register_custom_layer(
# key,
# Dropout,
# deepcopy(reference_parameters['layers']['Dropout']),
# True)
# parameters[key].rate = layer['rate']
# del layer['rate']
# block = (key, layer)
# # elif layer_name == 'LSTM':
# # key = f'LSTM{i}'
# # if 'dropout' in layer:
# # if type['layer'] == float:
# # pass
# # else:
# # register_custom_layer(
# # key,
# # LSTM,
# # deepcopy(reference_parameters['layers']['LSTM']),
# # True)
# # parameters[key].rate = layer['rate']
# # del layer['rate']
# # block = (key, layer)
# else:
# block = e
# blocks.append(block)
layout = Layout(
input_size, # Input size, 13 features I think
output_size, # Output size, we want just the price
output_activation=output_activation, # linear activation since its continous number
output_initializer=output_initializer,
block=blocks
)
experiment_parameters = ExperimentParameters(use_default_values=True)
experiment_settings = ExperimentSettings()
experiment_parameters.layout_parameter('rows', 1)
experiment_parameters.layout_parameter('blocks', 1)
experiment_parameters.layout_parameter('layers', 1)
for key in parameters.keys():
layer = parameters[key]
for x in layer.p.keys():
a = layer.p[x]
# Its a numeric parameter
if type(a) == range:
# Its an int
if type(list(a)[0]) == int:
experiment_parameters.layer_parameter(f'{key}.{x}', int_param(a[0], a[-1]))
if type(list(a)[0]) == float:
experiment_parameters.layer_parameter(f'{key}.{x}', float_param(a[0], a[-1]))
# Its a string parameter
if type(a) == list:
experiment_parameters.layer_parameter(f'{key}.{x}', string_param(a))
# if len(layer.activation):
# experiment_parameters.layer_parameter(f'{key}.activation', string_param(layer.activation))
#
# units = layer.units
# if type(units) == int:
# experiment_parameters.layer_parameter(f'{key}.units', units)
# elif type(units) == list and len(units):
# experiment_parameters.layer_parameter(f'{key}.units', int_param(units[0], units[-1]))
#
# rate = layer.rate
# if type(rate) == list and len(rate):
# experiment_parameters.layer_parameter(f'{key}.rate', float_param(rate[0], rate[-1]))
#
# for i,units in enumerate(units_list):
# key = f'Dense{i}.units'
# if type(units) == int:
# experiment_parameters.layer_parameter(key, units)
# else:
# experiment_parameters.layer_parameter(key, int_param(units[0], units[-1]))
experiment_settings.ga['population_size'] = population_size
experiment_settings.ga['generations'] = generations
experiment_settings.ga['p_offspring'] = offspring
experiment_settings.ga['p_mutation'] = mutation
experiment_settings.ga['fitness_type'] = fitness_type
experiment = Experiment(
experiment_label,
layout=layout,
training=training,
batch_iterator=batch_iterator,
test_batch_iterator=test_batch_iterator,
environment=GpuEnvironment(devices=devices, n_jobs=n_jobs),
parameters=experiment_parameters,
settings=experiment_settings
)
return run_ga_search_experiment(
experiment,
resume=False,
log_level='DEBUG')
| [
"minos.experiment.ga.run_ga_search_experiment",
"collections.defaultdict",
"numpy.arange",
"minos.model.model.Metric",
"os.path.dirname",
"webbrowser.open_new_tab",
"minos.model.model.Layout",
"minos.experiment.experiment.ExperimentParameters",
"minos.experiment.experiment.ExperimentSettings",
"mi... | [((259, 288), 'webbrowser.open_new_tab', 'webbrowser.open_new_tab', (['path'], {}), '(path)\n', (282, 288), False, 'import webbrowser\n'), ((3466, 3581), 'minos.train.utils.SimpleBatchIterator', 'SimpleBatchIterator', (['X_train', 'y_train'], {'batch_size': 'batch_size', 'autoloop': 'autoloop', 'preload': '(True)', 'shuffle': 'shuffle'}), '(X_train, y_train, batch_size=batch_size, autoloop=\n autoloop, preload=True, shuffle=shuffle)\n', (3485, 3581), False, 'from minos.train.utils import SimpleBatchIterator, GpuEnvironment\n'), ((3644, 3757), 'minos.train.utils.SimpleBatchIterator', 'SimpleBatchIterator', (['X_test', 'y_test'], {'batch_size': 'batch_size', 'autoloop': 'autoloop', 'preload': '(True)', 'shuffle': 'shuffle'}), '(X_test, y_test, batch_size=batch_size, autoloop=\n autoloop, preload=True, shuffle=shuffle)\n', (3663, 3757), False, 'from minos.train.utils import SimpleBatchIterator, GpuEnvironment\n'), ((4087, 4115), 'collections.defaultdict', 'defaultdict', (['SisyLayerParams'], {}), '(SisyLayerParams)\n', (4098, 4115), False, 'from collections import defaultdict\n'), ((7145, 7270), 'minos.model.model.Layout', 'Layout', (['input_size', 'output_size'], {'output_activation': 'output_activation', 'output_initializer': 'output_initializer', 'block': 'blocks'}), '(input_size, output_size, output_activation=output_activation,\n output_initializer=output_initializer, block=blocks)\n', (7151, 7270), False, 'from minos.model.model import Layout, Objective, Optimizer, Metric\n'), ((7464, 7509), 'minos.experiment.experiment.ExperimentParameters', 'ExperimentParameters', ([], {'use_default_values': '(True)'}), '(use_default_values=True)\n', (7484, 7509), False, 'from minos.experiment.experiment import ExperimentParameters\n'), ((7536, 7556), 'minos.experiment.experiment.ExperimentSettings', 'ExperimentSettings', ([], {}), '()\n', (7554, 7556), False, 'from minos.experiment.experiment import Experiment, ExperimentSettings\n'), ((9954, 10023), 'minos.experiment.ga.run_ga_search_experiment', 'run_ga_search_experiment', (['experiment'], {'resume': '(False)', 'log_level': '"""DEBUG"""'}), "(experiment, resume=False, log_level='DEBUG')\n", (9978, 10023), False, 'from minos.experiment.ga import run_ga_search_experiment\n'), ((396, 417), 'numpy.arange', 'np.arange', (['x', 'y', 'jump'], {}), '(x, y, jump)\n', (405, 417), True, 'import numpy as np\n'), ((3937, 3952), 'minos.model.model.Objective', 'Objective', (['loss'], {}), '(loss)\n', (3946, 3952), False, 'from minos.model.model import Layout, Objective, Optimizer, Metric\n'), ((3962, 3992), 'minos.model.model.Optimizer', 'Optimizer', ([], {'optimizer': 'optimizer'}), '(optimizer=optimizer)\n', (3971, 3992), False, 'from minos.model.model import Layout, Objective, Optimizer, Metric\n'), ((4002, 4016), 'minos.model.model.Metric', 'Metric', (['metric'], {}), '(metric)\n', (4008, 4016), False, 'from minos.model.model import Layout, Objective, Optimizer, Metric\n'), ((4026, 4056), 'minos.experiment.training.EpochStoppingCondition', 'EpochStoppingCondition', (['epochs'], {}), '(epochs)\n', (4048, 4056), False, 'from minos.experiment.training import Training, EpochStoppingCondition\n'), ((4353, 4367), 'copy.deepcopy', 'deepcopy', (['e[1]'], {}), '(e[1])\n', (4361, 4367), False, 'from copy import deepcopy\n'), ((206, 231), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'import os\n'), ((9809, 9855), 'minos.train.utils.GpuEnvironment', 'GpuEnvironment', ([], {'devices': 'devices', 'n_jobs': 'n_jobs'}), '(devices=devices, n_jobs=n_jobs)\n', (9823, 9855), False, 'from minos.train.utils import SimpleBatchIterator, GpuEnvironment\n'), ((4597, 4641), 'importlib.import_module', 'importlib.import_module', (['"""keras.layers.core"""'], {}), "('keras.layers.core')\n", (4620, 4641), False, 'import importlib\n'), ((8374, 8389), 'minos.model.parameter.string_param', 'string_param', (['a'], {}), '(a)\n', (8386, 8389), False, 'from minos.model.parameter import int_param, string_param, float_param\n'), ((4921, 4973), 'copy.deepcopy', 'deepcopy', (["reference_parameters['layers'][layer_name]"], {}), "(reference_parameters['layers'][layer_name])\n", (4929, 4973), False, 'from copy import deepcopy\n'), ((8067, 8089), 'minos.model.parameter.int_param', 'int_param', (['a[0]', 'a[-1]'], {}), '(a[0], a[-1])\n', (8076, 8089), False, 'from minos.model.parameter import int_param, string_param, float_param\n'), ((8210, 8234), 'minos.model.parameter.float_param', 'float_param', (['a[0]', 'a[-1]'], {}), '(a[0], a[-1])\n', (8221, 8234), False, 'from minos.model.parameter import int_param, string_param, float_param\n')] |
from __future__ import annotations
from typing import Optional, List, Dict, Tuple
import logging
import textwrap
import pandas as pd
import numpy as np
import h5py
from tqdm import tqdm
from .catmaid_interface import Catmaid, Bbox, ConnectorDetail
from .utils import CoordZYX
logger = logging.getLogger(__name__)
def trim_cols(df, required: List[str], name=None):
try:
return df[required]
except KeyError:
name = f"{name} " if name else ""
msg = f"Invalid {name}dataframe columns.\n\tRequired: {required}\n\t Got: {list(df.columns)}"
raise ValueError(msg)
class TransformerMixin:
_transformer_attr: Optional[str] = None
def world_to_px(self, world_coords, as_int=False, round_z=False):
if self._transformer_attr is None:
out = (np.asarray(world_coords) - self.offset) / self.resolution
if round_z:
out[..., 0] = np.round(out[..., 0]).astype(out.dtype)
if as_int:
out = out.astype(np.uint64)
return out
else:
return getattr(self, self._transformer_attr).world_to_px(
world_coords, as_int, round_z
)
def px_to_world(self, px_coords):
if self._transformer_attr is None:
return (
np.asarray(px_coords, dtype=np.float64) * self.resolution + self.offset
)
else:
return getattr(self, self._transformer_attr).px_to_world(px_coords)
class Image(TransformerMixin):
def __init__(
self, array, resolution=(1, 1, 1), offset=(0, 0, 0), dims=("z", "y", "x")
):
self.array = np.asarray(array)
self.resolution = np.asarray(resolution, dtype=float)
self.offset = np.asarray(offset, dtype=float)
if list(dims) != ["z", "y", "x"]:
raise NotImplementedError("Non-ZYX orientations are not supported")
self.dims = dims
def extents(self):
"""[[mins], [maxes]]"""
return np.array([self.offset, self.offset + self.resolution * self.array.shape])
def to_hdf5(self, f, name, mode="a", attrs=None):
if not isinstance(f, h5py.Group):
with h5py.File(f, mode) as f2:
return self.to_hdf5(f2, name)
ds = f.create_dataset(name, data=self.array, compression="gzip")
ds.attrs["resolution"] = self.resolution
ds.attrs["offset"] = self.offset
ds.attrs["dims"] = self.dims
if attrs is not None:
ds.attrs.update(attrs)
return ds
def is_compatible(self, other: Image):
try:
self.raise_on_incompatible(other)
except ValueError:
return False
return True
def raise_on_incompatible(self, other: Image, names=("left", "right")):
features = {}
if not isinstance(self, Image) or not isinstance(other, Image):
features["class"] = (type(self), type(other))
if self.array.shape != other.array.shape:
features["shape"] = (self.array.shape, other.array.shape)
if tuple(self.resolution) != tuple(other.resolution):
features["resolution"] = (tuple(self.resolution), tuple(other.resolution))
if tuple(self.offset) != tuple(other.offset):
features["offset"] = (tuple(self.offset), tuple(other.offset))
if tuple(self.dims) != tuple(other.dims):
features["dims"] = (tuple(self.dims), tuple(other.dims))
if not features:
return
left_name, right_name = pad_strs(names)
lines = []
for k, (l_val, r_val) in features.items():
lines.append(k)
lines.append(f" {left_name}: {l_val}")
lines.append(f" {right_name}: {r_val}")
msg = textwrap.indent("\n".join(lines), " ")
raise ValueError("Images not compatible.\n" + msg)
@classmethod
def from_hdf5(cls, f, name=None):
if isinstance(f, h5py.Dataset):
return cls(f[:], f.attrs["resolution"], f.attrs["offset"], f.attrs["dims"])
elif isinstance(f, h5py.Group):
return cls.from_hdf5(f[name])
else:
with h5py.File(f, "r") as f2:
return cls.from_hdf5(f2[name])
def max_plus_one(self):
if not issubclass(self.array.dtype, np.integer):
raise TypeError("Array is not of integer subtype")
return self.array.data.max() + 1
def contains(self, coord: Tuple[float, float, float]) -> bool:
"""Whether a real-world coordinate tuple is inside the array"""
diffs = self.extents - coord
return np.all(diffs[0] <= 0) and np.all(diffs[1] >= 0)
def sub_image_px(
self, internal_offset: Tuple[int, int, int], shape: Tuple[int, int, int]
) -> Image:
int_off = np.asarray(internal_offset, int)
if np.any(int_off < 0):
raise ValueError("internal_offset must be positive")
if np.any(int_off + shape > self.array.shape):
raise ValueError("sub-image extends beyond image")
slicing = tuple(slice(o, o + s) for o, s in zip(int_off, shape))
arr = self.array[slicing]
return type(self)(
arr, self.resolution, self.offset + int_off * self.resolution, self.dims
)
def sub_image(
self,
internal_offset: Tuple[float, float, float],
shape: Tuple[float, float, float],
) -> Image:
"""Start and stop points are found in world coordinates; then rounded to pixels"""
offset_px = np.round(self.offset + internal_offset).astype(int)
stop_px = np.round(self.offset + internal_offset + shape).astype(int)
return self.sub_image_px(offset_px, stop_px - offset_px)
def pad_strs(strs, prefix=True, pad=" "):
if len(pad) != 1:
raise ValueError("Pad string must be 1 character long")
length = max(len(s) for s in strs)
return [s + pad * (length - len(s)) for s in strs]
def serialize_treenodes(tns: pd.DataFrame):
tns = tns.copy()
tns["parent_id"] = np.array(tns["parent_id"].fillna(0), dtype=int)
return tns
def deserialize_treenodes(tns: pd.DataFrame):
tns = tns.copy()
ids = pd.array(tns["parent_id"], dtype="UInt64")
ids[ids == 0] = pd.NA
tns["parent_id"] = ids
return tns
def remove_single_nodes(treenodes: pd.DataFrame):
"""Remove all nodes belonging to skeletons with only 1 treenode in the dataframe"""
skids, counts = np.unique(treenodes["skeleton_id"], return_counts=True)
single_tns = skids[counts == 1]
to_drop = np.zeros(len(treenodes), bool)
for skid in single_tns:
to_drop |= treenodes["skeleton_id"] == skid
return treenodes.loc[~to_drop].copy()
class CatnapIO(TransformerMixin):
_transformer_attr = "raw"
def __init__(
self,
raw: Image,
treenodes: pd.DataFrame,
connectors: pd.DataFrame,
partners: pd.DataFrame,
labels: Optional[Image] = None,
):
self.raw: Image = raw
self.treenodes = remove_single_nodes(
trim_cols(
treenodes,
["treenode_id", "parent_id", "skeleton_id", "z", "y", "x"],
"treenode",
)
)
self.connectors = trim_cols(
connectors, ["connector_id", "z", "y", "x"], "connector"
)
self.partners = trim_cols(
partners,
["skeleton_id", "treenode_id", "connector_id", "is_presynaptic"],
"partners",
)
self.labels: Optional[Image] = None
self.set_labels(labels)
def to_hdf5(self, fpath, gname=""):
gname = gname.rstrip("/") if gname else ""
prefix = f"{gname}/tables"
with pd.HDFStore(fpath, "w") as f:
serialize_treenodes(self.treenodes).to_hdf(f, f"{prefix}/treenodes")
self.connectors.to_hdf(f, f"{prefix}/connectors")
self.partners.to_hdf(f, f"{prefix}/partners")
prefix = f"{gname}/volumes"
with h5py.File(fpath, "a") as f:
self.raw.to_hdf5(f, f"{prefix}/raw")
if self.labels is not None:
self.labels.to_hdf5(f, f"{prefix}/labels")
@classmethod
def from_hdf5(cls, fpath, gname="", ignore_labels=False):
prefix = f"{gname}/tables"
with pd.HDFStore(fpath, "r") as f:
treenodes = deserialize_treenodes(pd.read_hdf(f, f"{prefix}/treenodes"))
connectors = pd.read_hdf(f, f"{prefix}/connectors")
partners = pd.read_hdf(f, f"{prefix}/partners")
prefix = f"{gname}/volumes"
with h5py.File(fpath, "r") as f:
raw = Image.from_hdf5(f[f"{prefix}/raw"])
labels = None
if not ignore_labels:
try:
labels = Image.from_hdf5(f[f"{prefix}/labels"])
except KeyError:
pass
return cls(raw, treenodes, connectors, partners, labels)
@classmethod
def from_catmaid(cls, catmaid: Catmaid, raw: Image, labels=None):
dims = raw.dims
extents = [CoordZYX({d: x for d, x in zip(dims, ext)}) for ext in raw.extents()]
logger.info("Fetching annotations from CATMAID")
raw_conns: pd.DataFrame
treenodes, raw_conns = catmaid.nodes_in_bbox(Bbox.from_start_stop(*extents))
connectors, partners = ConnectorDetail.to_connector_partners_df(
tqdm(
catmaid.connector_detail_many(raw_conns.connector_id),
desc="connector details",
total=len(raw_conns.connector_id),
)
)
return cls(raw, treenodes, connectors, partners, labels)
def set_labels(self, labels: Optional[Image]) -> bool:
"""Returns old labels"""
if labels is not None:
self.raw.raise_on_incompatible(labels, ("raw", "labels"))
ret = self.labels is not None
self.labels = labels
return ret
def coords_in_raw(self, zyx) -> np.ndarray:
mins, maxes = self.raw.extents()
zyx_arr = np.asarray(zyx)
return np.all(np.logical_and(maxes > zyx_arr, zyx_arr >= mins), axis=1)
def make_labels(self, treenode_radius=None, set_labels=False) -> Image:
labels = Image(
np.zeros_like(self.raw.array, np.uint64),
self.raw.resolution,
self.raw.offset,
self.raw.dims,
)
if set_labels:
self.labels = labels
if not treenode_radius:
return labels
tns = self.treenodes.copy()
zyx_world = tns[["z", "y", "x"]]
idxs = self.coords_in_raw(zyx_world)
zyx_px = ((zyx_world - labels.offset) / labels.resolution).astype(int)
skels = tns["skeleton_id"][idxs]
locs = zyx_px[idxs]
skid_to_label: Dict[int, int] = {
skid: label for label, skid in enumerate(np.unique(skels), 1)
}
for skid, (z, y, x) in zip(skels, locs.itertuples(index=False)):
# +1 accounts for 0-based index
slicing = (
z,
slice(max(0, y - treenode_radius + 1), y + treenode_radius + 1),
slice(max(0, x - treenode_radius + 1), x + treenode_radius + 1),
)
labels.array[tuple(slicing)] = skid_to_label[skid]
return labels
def join_tables(self):
merged = pd.merge(
self.treenodes,
self.partners,
on="treenode_id",
suffixes=("_t", "_tc"),
)
return pd.merge(
merged, self.connectors, on="connector_id", suffixes=("_t", "_c")
)
| [
"h5py.File",
"numpy.zeros_like",
"pandas.HDFStore",
"pandas.read_hdf",
"pandas.array",
"numpy.logical_and",
"numpy.asarray",
"pandas.merge",
"numpy.all",
"logging.getLogger",
"numpy.any",
"numpy.array",
"numpy.round",
"numpy.unique"
] | [((288, 315), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (305, 315), False, 'import logging\n'), ((6205, 6247), 'pandas.array', 'pd.array', (["tns['parent_id']"], {'dtype': '"""UInt64"""'}), "(tns['parent_id'], dtype='UInt64')\n", (6213, 6247), True, 'import pandas as pd\n'), ((6476, 6531), 'numpy.unique', 'np.unique', (["treenodes['skeleton_id']"], {'return_counts': '(True)'}), "(treenodes['skeleton_id'], return_counts=True)\n", (6485, 6531), True, 'import numpy as np\n'), ((1654, 1671), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (1664, 1671), True, 'import numpy as np\n'), ((1698, 1733), 'numpy.asarray', 'np.asarray', (['resolution'], {'dtype': 'float'}), '(resolution, dtype=float)\n', (1708, 1733), True, 'import numpy as np\n'), ((1756, 1787), 'numpy.asarray', 'np.asarray', (['offset'], {'dtype': 'float'}), '(offset, dtype=float)\n', (1766, 1787), True, 'import numpy as np\n'), ((2006, 2079), 'numpy.array', 'np.array', (['[self.offset, self.offset + self.resolution * self.array.shape]'], {}), '([self.offset, self.offset + self.resolution * self.array.shape])\n', (2014, 2079), True, 'import numpy as np\n'), ((4819, 4851), 'numpy.asarray', 'np.asarray', (['internal_offset', 'int'], {}), '(internal_offset, int)\n', (4829, 4851), True, 'import numpy as np\n'), ((4863, 4882), 'numpy.any', 'np.any', (['(int_off < 0)'], {}), '(int_off < 0)\n', (4869, 4882), True, 'import numpy as np\n'), ((4960, 5002), 'numpy.any', 'np.any', (['(int_off + shape > self.array.shape)'], {}), '(int_off + shape > self.array.shape)\n', (4966, 5002), True, 'import numpy as np\n'), ((10094, 10109), 'numpy.asarray', 'np.asarray', (['zyx'], {}), '(zyx)\n', (10104, 10109), True, 'import numpy as np\n'), ((11425, 11510), 'pandas.merge', 'pd.merge', (['self.treenodes', 'self.partners'], {'on': '"""treenode_id"""', 'suffixes': "('_t', '_tc')"}), "(self.treenodes, self.partners, on='treenode_id', suffixes=('_t',\n '_tc'))\n", (11433, 11510), True, 'import pandas as pd\n'), ((11581, 11656), 'pandas.merge', 'pd.merge', (['merged', 'self.connectors'], {'on': '"""connector_id"""', 'suffixes': "('_t', '_c')"}), "(merged, self.connectors, on='connector_id', suffixes=('_t', '_c'))\n", (11589, 11656), True, 'import pandas as pd\n'), ((4633, 4654), 'numpy.all', 'np.all', (['(diffs[0] <= 0)'], {}), '(diffs[0] <= 0)\n', (4639, 4654), True, 'import numpy as np\n'), ((4659, 4680), 'numpy.all', 'np.all', (['(diffs[1] >= 0)'], {}), '(diffs[1] >= 0)\n', (4665, 4680), True, 'import numpy as np\n'), ((7755, 7778), 'pandas.HDFStore', 'pd.HDFStore', (['fpath', '"""w"""'], {}), "(fpath, 'w')\n", (7766, 7778), True, 'import pandas as pd\n'), ((8036, 8057), 'h5py.File', 'h5py.File', (['fpath', '"""a"""'], {}), "(fpath, 'a')\n", (8045, 8057), False, 'import h5py\n'), ((8341, 8364), 'pandas.HDFStore', 'pd.HDFStore', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (8352, 8364), True, 'import pandas as pd\n'), ((8481, 8519), 'pandas.read_hdf', 'pd.read_hdf', (['f', 'f"""{prefix}/connectors"""'], {}), "(f, f'{prefix}/connectors')\n", (8492, 8519), True, 'import pandas as pd\n'), ((8543, 8579), 'pandas.read_hdf', 'pd.read_hdf', (['f', 'f"""{prefix}/partners"""'], {}), "(f, f'{prefix}/partners')\n", (8554, 8579), True, 'import pandas as pd\n'), ((8630, 8651), 'h5py.File', 'h5py.File', (['fpath', '"""r"""'], {}), "(fpath, 'r')\n", (8639, 8651), False, 'import h5py\n'), ((10132, 10180), 'numpy.logical_and', 'np.logical_and', (['(maxes > zyx_arr)', '(zyx_arr >= mins)'], {}), '(maxes > zyx_arr, zyx_arr >= mins)\n', (10146, 10180), True, 'import numpy as np\n'), ((10303, 10343), 'numpy.zeros_like', 'np.zeros_like', (['self.raw.array', 'np.uint64'], {}), '(self.raw.array, np.uint64)\n', (10316, 10343), True, 'import numpy as np\n'), ((2194, 2212), 'h5py.File', 'h5py.File', (['f', 'mode'], {}), '(f, mode)\n', (2203, 2212), False, 'import h5py\n'), ((5553, 5592), 'numpy.round', 'np.round', (['(self.offset + internal_offset)'], {}), '(self.offset + internal_offset)\n', (5561, 5592), True, 'import numpy as np\n'), ((5623, 5670), 'numpy.round', 'np.round', (['(self.offset + internal_offset + shape)'], {}), '(self.offset + internal_offset + shape)\n', (5631, 5670), True, 'import numpy as np\n'), ((8417, 8454), 'pandas.read_hdf', 'pd.read_hdf', (['f', 'f"""{prefix}/treenodes"""'], {}), "(f, f'{prefix}/treenodes')\n", (8428, 8454), True, 'import pandas as pd\n'), ((808, 832), 'numpy.asarray', 'np.asarray', (['world_coords'], {}), '(world_coords)\n', (818, 832), True, 'import numpy as np\n'), ((1313, 1352), 'numpy.asarray', 'np.asarray', (['px_coords'], {'dtype': 'np.float64'}), '(px_coords, dtype=np.float64)\n', (1323, 1352), True, 'import numpy as np\n'), ((4179, 4196), 'h5py.File', 'h5py.File', (['f', '"""r"""'], {}), "(f, 'r')\n", (4188, 4196), False, 'import h5py\n'), ((10927, 10943), 'numpy.unique', 'np.unique', (['skels'], {}), '(skels)\n', (10936, 10943), True, 'import numpy as np\n'), ((920, 941), 'numpy.round', 'np.round', (['out[..., 0]'], {}), '(out[..., 0])\n', (928, 941), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.utils.data
import config as c
x_test = torch.Tensor(np.load('./dkfz_data/x_test.npy'))
y_test = torch.Tensor(np.load('./dkfz_data/y_test.npy'))
x_train = np.concatenate([np.load('./dkfz_data/x_train.npy'),
np.load('./dkfz_data/x_additional.npy')], axis=0)
y_train = np.concatenate([np.load('./dkfz_data/y_train.npy'),
np.load('./dkfz_data/y_additional.npy')], axis=0)
# There is a single NaN in the dataset
nan_index = np.unique(np.argwhere(y_train != y_train)[:,0])
x_train = np.delete(x_train, nan_index, axis=0)
y_train = np.delete(y_train, nan_index, axis=0)
x_train = torch.Tensor(x_train)
y_train = torch.Tensor(y_train)
c.test_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(x_test, y_test),
batch_size=c.batch_size, shuffle=False, drop_last=True)
c.train_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(x_train, y_train),
batch_size=c.batch_size, shuffle=True, drop_last=True)
c.ndim_x = 13
c.ndim_pad_x = 0
c.ndim_y = 8
c.ndim_z = 5
c.ndim_pad_zy = 0
c.mmd_back_weighted = True
c.filename_out = 'output/dkfz_inn.pt'
if __name__ == "__main__":
import train
train.main()
| [
"numpy.load",
"train.main",
"torch.Tensor",
"torch.utils.data.TensorDataset",
"numpy.argwhere",
"numpy.delete"
] | [((579, 616), 'numpy.delete', 'np.delete', (['x_train', 'nan_index'], {'axis': '(0)'}), '(x_train, nan_index, axis=0)\n', (588, 616), True, 'import numpy as np\n'), ((627, 664), 'numpy.delete', 'np.delete', (['y_train', 'nan_index'], {'axis': '(0)'}), '(y_train, nan_index, axis=0)\n', (636, 664), True, 'import numpy as np\n'), ((676, 697), 'torch.Tensor', 'torch.Tensor', (['x_train'], {}), '(x_train)\n', (688, 697), False, 'import torch\n'), ((708, 729), 'torch.Tensor', 'torch.Tensor', (['y_train'], {}), '(y_train)\n', (720, 729), False, 'import torch\n'), ((99, 132), 'numpy.load', 'np.load', (['"""./dkfz_data/x_test.npy"""'], {}), "('./dkfz_data/x_test.npy')\n", (106, 132), True, 'import numpy as np\n'), ((156, 189), 'numpy.load', 'np.load', (['"""./dkfz_data/y_test.npy"""'], {}), "('./dkfz_data/y_test.npy')\n", (163, 189), True, 'import numpy as np\n'), ((780, 826), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (810, 826), False, 'import torch\n'), ((939, 987), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (969, 987), False, 'import torch\n'), ((1256, 1268), 'train.main', 'train.main', ([], {}), '()\n', (1266, 1268), False, 'import train\n'), ((218, 252), 'numpy.load', 'np.load', (['"""./dkfz_data/x_train.npy"""'], {}), "('./dkfz_data/x_train.npy')\n", (225, 252), True, 'import numpy as np\n'), ((280, 319), 'numpy.load', 'np.load', (['"""./dkfz_data/x_additional.npy"""'], {}), "('./dkfz_data/x_additional.npy')\n", (287, 319), True, 'import numpy as np\n'), ((357, 391), 'numpy.load', 'np.load', (['"""./dkfz_data/y_train.npy"""'], {}), "('./dkfz_data/y_train.npy')\n", (364, 391), True, 'import numpy as np\n'), ((419, 458), 'numpy.load', 'np.load', (['"""./dkfz_data/y_additional.npy"""'], {}), "('./dkfz_data/y_additional.npy')\n", (426, 458), True, 'import numpy as np\n'), ((531, 562), 'numpy.argwhere', 'np.argwhere', (['(y_train != y_train)'], {}), '(y_train != y_train)\n', (542, 562), True, 'import numpy as np\n')] |
import numpy as np
from pydrake.all import (AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer,
Simulator, SpatialForce, RigidTransform)
from pydrake.trajectories import PiecewisePolynomial
from pydrake.systems.framework import DiagramBuilder
from pydrake.multibody.plant import ExternallyAppliedSpatialForce
from pydrake.systems.primitives import TrajectorySource, LogOutput
from ..utils import *
from ..robot_internal_controller import RobotInternalController
def run_sim(q_traj_iiwa: PiecewisePolynomial,
Kp_iiwa: np.array,
gravity: np.array,
f_C_W,
time_step,
add_schunk: bool,
is_visualizing=False):
# Build diagram.
builder = DiagramBuilder()
# MultibodyPlant
plant = MultibodyPlant(time_step)
plant.mutable_gravity_field().set_gravity_vector(gravity)
_, scene_graph = AddMultibodyPlantSceneGraph(builder, plant=plant)
parser = Parser(plant=plant, scene_graph=scene_graph)
add_package_paths(parser)
if add_schunk:
ProcessModelDirectives(
LoadModelDirectives(
os.path.join(models_dir, 'iiwa_and_schunk.yml')),
plant, parser)
schunk_model = plant.GetModelInstanceByName('schunk')
else:
ProcessModelDirectives(
LoadModelDirectives(os.path.join(models_dir, 'iiwa.yml')),
plant, parser)
iiwa_model = plant.GetModelInstanceByName('iiwa')
plant.Finalize()
# IIWA controller
plant_robot, _ = create_iiwa_controller_plant(
gravity, add_schunk_inertia=add_schunk)
controller_iiwa = RobotInternalController(
plant_robot=plant_robot, joint_stiffness=Kp_iiwa,
controller_mode="impedance")
builder.AddSystem(controller_iiwa)
builder.Connect(controller_iiwa.GetOutputPort("joint_torques"),
plant.get_actuation_input_port(iiwa_model))
builder.Connect(plant.get_state_output_port(iiwa_model),
controller_iiwa.robot_state_input_port)
# IIWA Trajectory source
traj_source_iiwa = TrajectorySource(q_traj_iiwa)
builder.AddSystem(traj_source_iiwa)
builder.Connect(
traj_source_iiwa.get_output_port(0),
controller_iiwa.joint_angle_commanded_input_port)
# meshcat visualizer
if is_visualizing:
viz = ConnectMeshcatVisualizer(
builder, scene_graph, frames_to_draw={"iiwa": {"link_ee"}})
# Logs
iiwa_log = LogOutput(plant.get_state_output_port(iiwa_model), builder)
iiwa_log.set_publish_period(0.001)
diagram = builder.Build()
# %% Run simulation.
sim = Simulator(diagram)
context = sim.get_context()
context_controller = diagram.GetSubsystemContext(controller_iiwa, context)
context_plant = diagram.GetSubsystemContext(plant, context)
controller_iiwa.tau_feedforward_input_port.FixValue(
context_controller, np.zeros(7))
# robot initial configuration.
q_iiwa_0 = q_traj_iiwa.value(0).squeeze()
t_final = q_traj_iiwa.end_time()
plant.SetPositions(context_plant, iiwa_model, q_iiwa_0)
if add_schunk:
plant.get_actuation_input_port(schunk_model).FixValue(
context_plant, np.zeros(2))
# constant force on link 7.
easf = ExternallyAppliedSpatialForce()
easf.F_Bq_W = SpatialForce([0, 0, 0], f_C_W)
easf.body_index = plant.GetBodyByName("iiwa_link_7").index()
plant.get_applied_spatial_force_input_port().FixValue(
context_plant, [easf])
# %%
sim.Initialize()
sim.set_target_realtime_rate(0)
sim.AdvanceTo(t_final)
return iiwa_log, controller_iiwa
| [
"pydrake.all.ConnectMeshcatVisualizer",
"pydrake.systems.framework.DiagramBuilder",
"pydrake.all.Simulator",
"numpy.zeros",
"pydrake.all.AddMultibodyPlantSceneGraph",
"pydrake.multibody.plant.ExternallyAppliedSpatialForce",
"pydrake.all.SpatialForce",
"pydrake.systems.primitives.TrajectorySource"
] | [((723, 739), 'pydrake.systems.framework.DiagramBuilder', 'DiagramBuilder', ([], {}), '()\n', (737, 739), False, 'from pydrake.systems.framework import DiagramBuilder\n'), ((884, 933), 'pydrake.all.AddMultibodyPlantSceneGraph', 'AddMultibodyPlantSceneGraph', (['builder'], {'plant': 'plant'}), '(builder, plant=plant)\n', (911, 933), False, 'from pydrake.all import AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer, Simulator, SpatialForce, RigidTransform\n'), ((2088, 2117), 'pydrake.systems.primitives.TrajectorySource', 'TrajectorySource', (['q_traj_iiwa'], {}), '(q_traj_iiwa)\n', (2104, 2117), False, 'from pydrake.systems.primitives import TrajectorySource, LogOutput\n'), ((2635, 2653), 'pydrake.all.Simulator', 'Simulator', (['diagram'], {}), '(diagram)\n', (2644, 2653), False, 'from pydrake.all import AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer, Simulator, SpatialForce, RigidTransform\n'), ((3273, 3304), 'pydrake.multibody.plant.ExternallyAppliedSpatialForce', 'ExternallyAppliedSpatialForce', ([], {}), '()\n', (3302, 3304), False, 'from pydrake.multibody.plant import ExternallyAppliedSpatialForce\n'), ((3323, 3353), 'pydrake.all.SpatialForce', 'SpatialForce', (['[0, 0, 0]', 'f_C_W'], {}), '([0, 0, 0], f_C_W)\n', (3335, 3353), False, 'from pydrake.all import AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer, Simulator, SpatialForce, RigidTransform\n'), ((2345, 2434), 'pydrake.all.ConnectMeshcatVisualizer', 'ConnectMeshcatVisualizer', (['builder', 'scene_graph'], {'frames_to_draw': "{'iiwa': {'link_ee'}}"}), "(builder, scene_graph, frames_to_draw={'iiwa': {\n 'link_ee'}})\n", (2369, 2434), False, 'from pydrake.all import AddMultibodyPlantSceneGraph, ConnectMeshcatVisualizer, Simulator, SpatialForce, RigidTransform\n'), ((2915, 2926), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (2923, 2926), True, 'import numpy as np\n'), ((3216, 3227), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3224, 3227), True, 'import numpy as np\n')] |
import algopy
import numpy as np
from scipy.integrate import quad
def quad(f, a, b):
n = 1000
Q = np.linspace(a, b, n)
W = 1 / n * np.ones(n)
y = 0
for w, q in zip(W, Q):
y += f(q) * w
return y
def eval_f(x):
integrand = lambda t: x[0] * np.sin(x[1] * t)
# return integrand(1.0)
# print(integrand(1.0))
return quad(integrand, 0, 10)
# STEP 1: trace the function evaluation
cg = algopy.CGraph()
x = algopy.Function([1, 2])
y = eval_f(x)
cg.trace_off()
cg.independentFunctionList = [x]
cg.dependentFunctionList = [y]
# STEP 2: use the computational graph to evaluate derivatives
print('gradient =', cg.gradient([1.0, 2.0]))
print('Jacobian =', cg.jacobian([1.0, 2.0]))
print('Hessian =', cg.hessian([1.0, 2.0]))
print('Hessian vector product =', cg.hess_vec([1.0, 2.0], [1.0, 0.0]))
| [
"algopy.CGraph",
"scipy.integrate.quad",
"numpy.ones",
"numpy.sin",
"numpy.linspace",
"algopy.Function"
] | [((433, 448), 'algopy.CGraph', 'algopy.CGraph', ([], {}), '()\n', (446, 448), False, 'import algopy\n'), ((453, 476), 'algopy.Function', 'algopy.Function', (['[1, 2]'], {}), '([1, 2])\n', (468, 476), False, 'import algopy\n'), ((108, 128), 'numpy.linspace', 'np.linspace', (['a', 'b', 'n'], {}), '(a, b, n)\n', (119, 128), True, 'import numpy as np\n'), ((363, 385), 'scipy.integrate.quad', 'quad', (['integrand', '(0)', '(10)'], {}), '(integrand, 0, 10)\n', (367, 385), False, 'from scipy.integrate import quad\n'), ((145, 155), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (152, 155), True, 'import numpy as np\n'), ((278, 294), 'numpy.sin', 'np.sin', (['(x[1] * t)'], {}), '(x[1] * t)\n', (284, 294), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import cv2
import numpy as np
# Read Image
im = cv2.imread("headPose.jpg");
size = im.shape
# 2D image points. If you change the image, you need to change vector
image_points = np.array([
(359, 391), # Nose tip
(467, 311), # Left eye left corner
(352, 311), # Right eye right corne
(642, 372), # Left ear corner
(314, 346) # Right ear corner
], dtype="double")
# 3D model points.
model_points = np.array([
(0.55592, 6.5629, -25.944448), # Nose tip
(28.76499, -35.484287, -1.172675), # Left eye left corner
(-28.272964, -35.134495, -0.147273), # Right eye right corne
(70.486404, -11.666193, 44.142567), # Left ear corner
(-72.77502, -10.949766, 45.909405) # Right ear corner
])
# Camera internals
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
print("Camera Matrix :\n {0}".format(camera_matrix))
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs)
print("Rotation Vector:\n {0}".format(rotation_vector))
print("Translation Vector:\n {0}".format(translation_vector))
# Project a 3D point (0, 0, 1000.0) onto the image plane.
# We use this to draw a line sticking out of the nose
# axis = np.float32([[3, 0, 0], [0, 3, 0], [0, 0, -3]]).reshape(-1, 3)
axis = np.array([(0.0, 0.0, 1000.0)])
(nose_end_point2D, jacobian) = cv2.projectPoints(axis, rotation_vector, translation_vector,
camera_matrix, dist_coeffs)
for p in image_points:
cv2.circle(im, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
cv2.line(im, p1, p2, (255, 0, 0), 2)
# Display image
cv2.imshow("Output", im)
cv2.waitKey(0)
| [
"cv2.line",
"cv2.waitKey",
"cv2.solvePnP",
"numpy.zeros",
"cv2.projectPoints",
"cv2.imread",
"numpy.array",
"cv2.imshow"
] | [((72, 98), 'cv2.imread', 'cv2.imread', (['"""headPose.jpg"""'], {}), "('headPose.jpg')\n", (82, 98), False, 'import cv2\n'), ((202, 292), 'numpy.array', 'np.array', (['[(359, 391), (467, 311), (352, 311), (642, 372), (314, 346)]'], {'dtype': '"""double"""'}), "([(359, 391), (467, 311), (352, 311), (642, 372), (314, 346)],\n dtype='double')\n", (210, 292), True, 'import numpy as np\n'), ((446, 639), 'numpy.array', 'np.array', (['[(0.55592, 6.5629, -25.944448), (28.76499, -35.484287, -1.172675), (-\n 28.272964, -35.134495, -0.147273), (70.486404, -11.666193, 44.142567),\n (-72.77502, -10.949766, 45.909405)]'], {}), '([(0.55592, 6.5629, -25.944448), (28.76499, -35.484287, -1.172675),\n (-28.272964, -35.134495, -0.147273), (70.486404, -11.666193, 44.142567),\n (-72.77502, -10.949766, 45.909405)])\n', (454, 639), True, 'import numpy as np\n'), ((850, 951), 'numpy.array', 'np.array', (['[[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0, 1]]'], {'dtype': '"""double"""'}), "([[focal_length, 0, center[0]], [0, focal_length, center[1]], [0, 0,\n 1]], dtype='double')\n", (858, 951), True, 'import numpy as np\n'), ((1033, 1049), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (1041, 1049), True, 'import numpy as np\n'), ((1130, 1198), 'cv2.solvePnP', 'cv2.solvePnP', (['model_points', 'image_points', 'camera_matrix', 'dist_coeffs'], {}), '(model_points, image_points, camera_matrix, dist_coeffs)\n', (1142, 1198), False, 'import cv2\n'), ((1511, 1541), 'numpy.array', 'np.array', (['[(0.0, 0.0, 1000.0)]'], {}), '([(0.0, 0.0, 1000.0)])\n', (1519, 1541), True, 'import numpy as np\n'), ((1573, 1665), 'cv2.projectPoints', 'cv2.projectPoints', (['axis', 'rotation_vector', 'translation_vector', 'camera_matrix', 'dist_coeffs'], {}), '(axis, rotation_vector, translation_vector, camera_matrix,\n dist_coeffs)\n', (1590, 1665), False, 'import cv2\n'), ((1926, 1962), 'cv2.line', 'cv2.line', (['im', 'p1', 'p2', '(255, 0, 0)', '(2)'], {}), '(im, p1, p2, (255, 0, 0), 2)\n', (1934, 1962), False, 'import cv2\n'), ((1980, 2004), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'im'], {}), "('Output', im)\n", (1990, 2004), False, 'import cv2\n'), ((2005, 2019), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2016, 2019), False, 'import cv2\n')] |
import numpy as np
import tensorflow as tf
from .parse_dict import get_dict
class TextRecognition(object):
def __init__(self, pb_file, dict_file, seq_len):
self.pb_file = pb_file
self.dict_file = dict_file
self.seq_len = seq_len
self.init_model()
self.init_dict()
# 加载模型文件
def init_model(self):
self.graph = tf.Graph()
with self.graph.as_default():
with tf.io.gfile.GFile(self.pb_file, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# 配置Tensorflow GPU设置
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True # 允许动态增长显存使用
# 创建Tensorflow会话
self.sess = tf.Session(graph=self.graph, config=config)
# 获得模型参量对象
self.img_ph = self.sess.graph.get_tensor_by_name('image:0')
self.label_ph = self.sess.graph.get_tensor_by_name('label:0')
self.is_training = self.sess.graph.get_tensor_by_name('is_training:0')
self.dropout = self.sess.graph.get_tensor_by_name('dropout_keep_prob:0')
self.preds = self.sess.graph.get_tensor_by_name('sequence_preds:0')
self.probs = self.sess.graph.get_tensor_by_name('sequence_probs:0')
# 这部分的参量来自AttentionOCR作者的Docker里的Demo模型(text_recognition.pb)
''' self.img_ph = self.sess.graph.get_tensor_by_name('image:0')
self.is_training = self.sess.graph.get_tensor_by_name('is_training:0')
self.dropout = self.sess.graph.get_tensor_by_name('dropout:0')
self.preds = self.sess.graph.get_tensor_by_name('sequence_preds:0')
self.probs = self.sess.graph.get_tensor_by_name('sequence_probs:0')'''
# 加载标签字典文件
def init_dict(self):
self.lable_dict = get_dict(self.dict_file)
# 执行OCR预测过程
def predict(self, image_padded, EOS='EOS'):
results = []
probabilities = []
pred_sentences, pred_probs = self.sess.run([self.preds, self.probs], \
#feed_dict={self.is_training: False, self.dropout: 1.0, self.img_ph: image})
feed_dict={self.is_training: False, self.dropout: 1.0, self.img_ph: image_padded, self.label_ph: np.ones((1, self.seq_len), np.int32)})
for pred_sentence in pred_sentences:
char_list = []
for char in pred_sentence:
if self.lable_dict[char] == EOS: # 当识别到句子末尾结束标志时,结束循环
break
char_list.append(self.lable_dict[char])
results.append(char_list)
for pred_prob in pred_probs:
char_probs = pred_prob[:min(len(results)+1, self.seq_len)]
probabilities.append(char_probs)
return results, probabilities | [
"tensorflow.Session",
"numpy.ones",
"tensorflow.ConfigProto",
"tensorflow.Graph",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"tensorflow.io.gfile.GFile"
] | [((375, 385), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (383, 385), True, 'import tensorflow as tf\n'), ((694, 735), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (708, 735), True, 'import tensorflow as tf\n'), ((841, 884), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph', 'config': 'config'}), '(graph=self.graph, config=config)\n', (851, 884), True, 'import tensorflow as tf\n'), ((441, 478), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['self.pb_file', '"""rb"""'], {}), "(self.pb_file, 'rb')\n", (458, 478), True, 'import tensorflow as tf\n'), ((513, 526), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (524, 526), True, 'import tensorflow as tf\n'), ((599, 638), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (618, 638), True, 'import tensorflow as tf\n'), ((2321, 2357), 'numpy.ones', 'np.ones', (['(1, self.seq_len)', 'np.int32'], {}), '((1, self.seq_len), np.int32)\n', (2328, 2357), True, 'import numpy as np\n')] |
from __future__ import division
import time
import numpy as np
import pandas as pd
import logging
from memoized_property import memoized_property
# The ZBD object loads a few data files from disk. If this import fails then the functions that use it below will still
# work, but only with default arguments.
try:
from equipment.vdi.zbd import ZBD
zbd = ZBD()
except ImportError:
zbd = None
from kid_readout.measurement import core, basic
logger = logging.getLogger(__file__)
class MMWSweepList(basic.SweepStreamList):
def __init__(self, sweep, stream_list, state, description=''):
super(MMWSweepList, self).__init__(sweep=sweep, stream_list=stream_list, state=state, description=description)
def single_sweep_stream_list(self, index):
return MMWResponse(self.sweep.sweep(index),
core.MeasurementList(sa.stream(index) for sa in self.stream_list),
number=index,
state=self.state, description=self.description)
def to_dataframe(self,add_origin=True):
rows = []
for number in range(self.sweep.num_channels):
sssl = self.single_sweep_stream_list(number)
this_df = sssl.to_dataframe()
rows.append(this_df)
return pd.concat(rows,ignore_index=True)
class MMWResponse(basic.SingleSweepStreamList):
def __init__(self, single_sweep, stream_list, state, number=0, description=''):
super(MMWResponse,self).__init__(single_sweep=single_sweep, stream_list=stream_list, state=state, number=number,
description=description)
@property
def lockin_rms_voltage(self):
try:
return np.array(self.state_vector('lockin','rms_voltage'),dtype='float')
except KeyError:
return np.nan
def zbd_power(self, linearize=False):
return zbd_voltage_to_power(self.zbd_voltage(linearize=linearize), mmw_frequency=self.mmw_frequency)
def zbd_voltage(self, linearize=False):
return lockin_rms_to_zbd_voltage(self.lockin_rms_voltage, linearize=linearize)
@property
def hittite_frequency(self):
return np.array(self.state_vector('hittite','frequency'), dtype='float')
@property
def mmw_frequency(self):
return 12.*self.hittite_frequency
@memoized_property
def sweep_stream_list(self):
return self.get_sweep_stream_list()
def get_sweep_stream_list(self, deglitch=False):
result = []
for stream in self.stream_list:
sss = basic.SingleSweepStream(sweep=self.single_sweep, stream=stream, state=stream.state,
description=stream.description)
result.append(sss)
return result
@memoized_property
def folded_x(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fx = sss.fold(sss.x_raw)
# TODO: this is a hack
xfft = np.fft.rfft(fx)
phase = np.angle(xfft[1])
roll_by = 0
try:
roll_by = int(np.round(phase*fx.shape[0]/(2*np.pi))) + fx.shape[0]//4
except ValueError:
logger.debug("NaN values encounterd while trying to fold data for stream %r. Data won't be aligned" %
sss.number)
result.append(np.roll(fx,roll_by))
return np.array(result)
@memoized_property
def folded_q(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fq = sss.fold(sss.q_raw)
result.append(fq)
return np.array(result)
@memoized_property
def folded_normalized_s21(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fs21 = sss.fold(sss.normalized_s21)
result.append(fs21)
return np.array(result)
@memoized_property
def folded_s21_raw(self):
sweep_stream_list = self.sweep_stream_list
result = []
for sss in sweep_stream_list:
fs21 = sss.fold(sss.stream.s21_raw)
result.append(fs21)
return np.array(result)
@memoized_property
def fractional_frequency_response(self):
return self.get_fractional_frequency_response()
def get_fractional_frequency_response(self):
folded = self.folded_x
period = folded.shape[-1]
template = np.ones((period,),dtype='float')
template[:period//2] = -1
response = np.abs(np.fft.irfft(np.fft.rfft(template)*np.fft.rfft(folded,axis=-1),axis=-1)*2./period).max(-1)
return response
def to_dataframe(self, add_origin=True):
data = {'number': self.number, 'analysis_epoch':time.time(), 'start_epoch':self.start_epoch()}
try:
for thermometer, temperature in self.state['temperature'].items():
data['temperature_{}'.format(thermometer)] = temperature
except KeyError:
pass
try:
for key, value in self.stream_list[0].roach_state.items():
data['roach_{}'.format(key)] = value
except KeyError:
pass
flat_state = self.state.flatten(wrap_lists=True)
data.update(flat_state)
for param in self.single_sweep.resonator.current_result.params.values():
data['res_{}'.format(param.name)] = param.value
data['res_{}_error'.format(param.name)] = param.stderr
data['res_redchi'] = self.single_sweep.resonator.current_result.redchi
data['res_Q_i'] = self.single_sweep.resonator.Q_i
data['res_Q_e'] = self.single_sweep.resonator.Q_e
data['res_s21_data'] = [self.single_sweep.resonator.data]
data['res_frequency_data'] = [self.single_sweep.resonator.frequency]
data['res_s21_errors'] = [self.single_sweep.resonator.errors]
modelf = np.linspace(self.single_sweep.resonator.frequency.min(),self.single_sweep.resonator.frequency.max(),1000)
models21 = self.single_sweep.resonator.model.eval(params=self.single_sweep.resonator.current_params,f=modelf)
data['res_model_frequency'] = [modelf]
data['res_model_s21'] = [models21]
data['fractional_frequency_response'] = [self.fractional_frequency_response]
data['folded_s21_raw'] = [self.folded_s21_raw]
data['folded_x'] = [self.folded_x]
data['mmw_frequency'] = [self.mmw_frequency]
data['lockin_rms_voltage'] = [self.lockin_rms_voltage]
data['zbd_power_linearized'] = [self.zbd_power(linearize=True)]
dataframe = pd.DataFrame(data, index=[0])
if add_origin:
self.add_origin(dataframe)
return dataframe
class MMWSweepOnMod(core.Measurement):
_version = 0
def __init__(self, sweep, off_stream, on_stream, mod_stream, state=None, description=''):
self.sweep = sweep
self.on_stream = on_stream
self.mod_stream = mod_stream
if off_stream:
self.off_stream = off_stream
else:
self.off_stream = None
super(MMWSweepOnMod, self).__init__(state=state, description=description)
@property
def on_sweep_stream_array(self):
return basic.SweepStreamArray(sweep_array=self.sweep, stream_array=self.on_stream,state=self.state,
description=self.description)
@property
def off_sweep_stream_array(self):
if self.off_stream:
return basic.SweepStreamArray(sweep_array=self.sweep, stream_array=self.off_stream,state=self.state,
description=self.description)
else:
raise AttributeError("No off stream measurement defined")
@property
def mod_sweep_stream_array(self):
return basic.SweepStreamArray(sweep_array=self.sweep, stream_array=self.mod_stream,state=self.state,
description=self.description)
def sweep_stream_set(self,number):
sweep = self.sweep.sweep(number)
on_sweep_stream = self.on_stream.stream(number)
mod_sweep_stream = self.mod_stream.stream(number)
try:
off_sweep_stream = self.off_stream.stream(number)
except AttributeError:
off_sweep_stream = None
if off_sweep_stream:
return (basic.SingleSweepStream(sweep,off_sweep_stream,number=number,state=self.state,
description=self.description),
basic.SingleSweepStream(sweep,on_sweep_stream,number=number,state=self.state,
description=self.description),
basic.SingleSweepStream(sweep,mod_sweep_stream,number=number,state=self.state,
description=self.description),
)
else:
return (None,
basic.SingleSweepStream(sweep,on_sweep_stream,number=number,state=self.state,
description=self.description),
basic.SingleSweepStream(sweep,mod_sweep_stream,number=number,state=self.state,
description=self.description),
)
def to_dataframe(self, add_origin=True):
on_rows = []
mod_rows = []
off_rows = []
for n in range(self.sweep.num_channels):
off_ss, on_ss, mod_ss = self.sweep_stream_set(n)
on_rows.append(on_ss.to_dataframe())
mod_rows.append(mod_ss.to_dataframe())
if off_ss:
off_rows.append(off_ss.to_dataframe())
df_on = pd.concat(on_rows,ignore_index=True)
df_mod = pd.concat(mod_rows,ignore_index=True)
dfs = [df_on,df_mod]
if off_rows:
df_off = pd.concat(off_rows,ignore_index=True)
dfs.append(df_off)
else:
df_off = None
if add_origin:
if self._io is None:
self.sweep.add_origin(df_on,prefix='sweep_')
self.on_stream.add_origin(df_on,prefix='stream_')
self.sweep.add_origin(df_mod,prefix='sweep_')
self.mod_stream.add_origin(df_mod,prefix='stream_')
if off_rows:
self.sweep.add_origin(df_off,prefix='sweep_')
self.off_stream.add_origin(df_off,prefix='stream_')
else:
self.add_origin(df_on)
self.add_origin(df_mod)
if off_rows:
self.add_origin(df_off)
df_on['lockin_rms_voltage'] = df_mod['lockin_rms_voltage']
if df_off is not None:
df_off['lockin_rms_voltage'] = df_mod['lockin_rms_voltage']
return pd.concat(dfs,ignore_index=True)
def lockin_rms_to_zbd_voltage(lockin_rms_voltage, linearize=False):
zbd_voltage = (np.pi / np.sqrt(2)) * lockin_rms_voltage
if linearize:
zbd_voltage /= zbd.linearity(zbd_voltage)
return zbd_voltage
def zbd_voltage_to_power(zbd_voltage, mmw_frequency=None):
if mmw_frequency is None:
volts_per_watt = 2200 # 2200 V/W is the approximate responsivity
else:
volts_per_watt = zbd.responsivity(mmw_frequency)
return zbd_voltage / volts_per_watt
| [
"pandas.DataFrame",
"numpy.fft.rfft",
"numpy.roll",
"numpy.angle",
"numpy.ones",
"kid_readout.measurement.basic.SingleSweepStream",
"time.time",
"numpy.array",
"equipment.vdi.zbd.ZBD",
"kid_readout.measurement.basic.SweepStreamArray",
"numpy.round",
"pandas.concat",
"logging.getLogger",
"n... | [((461, 488), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (478, 488), False, 'import logging\n'), ((361, 366), 'equipment.vdi.zbd.ZBD', 'ZBD', ([], {}), '()\n', (364, 366), False, 'from equipment.vdi.zbd import ZBD\n'), ((1294, 1328), 'pandas.concat', 'pd.concat', (['rows'], {'ignore_index': '(True)'}), '(rows, ignore_index=True)\n', (1303, 1328), True, 'import pandas as pd\n'), ((3471, 3487), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3479, 3487), True, 'import numpy as np\n'), ((3727, 3743), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (3735, 3743), True, 'import numpy as np\n'), ((4009, 4025), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (4017, 4025), True, 'import numpy as np\n'), ((4284, 4300), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (4292, 4300), True, 'import numpy as np\n'), ((4560, 4593), 'numpy.ones', 'np.ones', (['(period,)'], {'dtype': '"""float"""'}), "((period,), dtype='float')\n", (4567, 4593), True, 'import numpy as np\n'), ((6736, 6765), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': '[0]'}), '(data, index=[0])\n', (6748, 6765), True, 'import pandas as pd\n'), ((7369, 7496), 'kid_readout.measurement.basic.SweepStreamArray', 'basic.SweepStreamArray', ([], {'sweep_array': 'self.sweep', 'stream_array': 'self.on_stream', 'state': 'self.state', 'description': 'self.description'}), '(sweep_array=self.sweep, stream_array=self.on_stream,\n state=self.state, description=self.description)\n', (7391, 7496), False, 'from kid_readout.measurement import core, basic\n'), ((7946, 8074), 'kid_readout.measurement.basic.SweepStreamArray', 'basic.SweepStreamArray', ([], {'sweep_array': 'self.sweep', 'stream_array': 'self.mod_stream', 'state': 'self.state', 'description': 'self.description'}), '(sweep_array=self.sweep, stream_array=self.mod_stream,\n state=self.state, description=self.description)\n', (7968, 8074), False, 'from kid_readout.measurement import core, basic\n'), ((9841, 9878), 'pandas.concat', 'pd.concat', (['on_rows'], {'ignore_index': '(True)'}), '(on_rows, ignore_index=True)\n', (9850, 9878), True, 'import pandas as pd\n'), ((9895, 9933), 'pandas.concat', 'pd.concat', (['mod_rows'], {'ignore_index': '(True)'}), '(mod_rows, ignore_index=True)\n', (9904, 9933), True, 'import pandas as pd\n'), ((10950, 10983), 'pandas.concat', 'pd.concat', (['dfs'], {'ignore_index': '(True)'}), '(dfs, ignore_index=True)\n', (10959, 10983), True, 'import pandas as pd\n'), ((2579, 2699), 'kid_readout.measurement.basic.SingleSweepStream', 'basic.SingleSweepStream', ([], {'sweep': 'self.single_sweep', 'stream': 'stream', 'state': 'stream.state', 'description': 'stream.description'}), '(sweep=self.single_sweep, stream=stream, state=\n stream.state, description=stream.description)\n', (2602, 2699), False, 'from kid_readout.measurement import core, basic\n'), ((3038, 3053), 'numpy.fft.rfft', 'np.fft.rfft', (['fx'], {}), '(fx)\n', (3049, 3053), True, 'import numpy as np\n'), ((3074, 3091), 'numpy.angle', 'np.angle', (['xfft[1]'], {}), '(xfft[1])\n', (3082, 3091), True, 'import numpy as np\n'), ((4870, 4881), 'time.time', 'time.time', ([], {}), '()\n', (4879, 4881), False, 'import time\n'), ((7629, 7757), 'kid_readout.measurement.basic.SweepStreamArray', 'basic.SweepStreamArray', ([], {'sweep_array': 'self.sweep', 'stream_array': 'self.off_stream', 'state': 'self.state', 'description': 'self.description'}), '(sweep_array=self.sweep, stream_array=self.off_stream,\n state=self.state, description=self.description)\n', (7651, 7757), False, 'from kid_readout.measurement import core, basic\n'), ((10004, 10042), 'pandas.concat', 'pd.concat', (['off_rows'], {'ignore_index': '(True)'}), '(off_rows, ignore_index=True)\n', (10013, 10042), True, 'import pandas as pd\n'), ((11096, 11106), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11103, 11106), True, 'import numpy as np\n'), ((3435, 3455), 'numpy.roll', 'np.roll', (['fx', 'roll_by'], {}), '(fx, roll_by)\n', (3442, 3455), True, 'import numpy as np\n'), ((8494, 8610), 'kid_readout.measurement.basic.SingleSweepStream', 'basic.SingleSweepStream', (['sweep', 'off_sweep_stream'], {'number': 'number', 'state': 'self.state', 'description': 'self.description'}), '(sweep, off_sweep_stream, number=number, state=self.\n state, description=self.description)\n', (8517, 8610), False, 'from kid_readout.measurement import core, basic\n'), ((8668, 8783), 'kid_readout.measurement.basic.SingleSweepStream', 'basic.SingleSweepStream', (['sweep', 'on_sweep_stream'], {'number': 'number', 'state': 'self.state', 'description': 'self.description'}), '(sweep, on_sweep_stream, number=number, state=self.\n state, description=self.description)\n', (8691, 8783), False, 'from kid_readout.measurement import core, basic\n'), ((8841, 8957), 'kid_readout.measurement.basic.SingleSweepStream', 'basic.SingleSweepStream', (['sweep', 'mod_sweep_stream'], {'number': 'number', 'state': 'self.state', 'description': 'self.description'}), '(sweep, mod_sweep_stream, number=number, state=self.\n state, description=self.description)\n', (8864, 8957), False, 'from kid_readout.measurement import core, basic\n'), ((9077, 9192), 'kid_readout.measurement.basic.SingleSweepStream', 'basic.SingleSweepStream', (['sweep', 'on_sweep_stream'], {'number': 'number', 'state': 'self.state', 'description': 'self.description'}), '(sweep, on_sweep_stream, number=number, state=self.\n state, description=self.description)\n', (9100, 9192), False, 'from kid_readout.measurement import core, basic\n'), ((9250, 9366), 'kid_readout.measurement.basic.SingleSweepStream', 'basic.SingleSweepStream', (['sweep', 'mod_sweep_stream'], {'number': 'number', 'state': 'self.state', 'description': 'self.description'}), '(sweep, mod_sweep_stream, number=number, state=self.\n state, description=self.description)\n', (9273, 9366), False, 'from kid_readout.measurement import core, basic\n'), ((3163, 3206), 'numpy.round', 'np.round', (['(phase * fx.shape[0] / (2 * np.pi))'], {}), '(phase * fx.shape[0] / (2 * np.pi))\n', (3171, 3206), True, 'import numpy as np\n'), ((4666, 4687), 'numpy.fft.rfft', 'np.fft.rfft', (['template'], {}), '(template)\n', (4677, 4687), True, 'import numpy as np\n'), ((4688, 4716), 'numpy.fft.rfft', 'np.fft.rfft', (['folded'], {'axis': '(-1)'}), '(folded, axis=-1)\n', (4699, 4716), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 28 08:55:59 2020
@author: heiko
"""
import numpy as np;
import json;
from scipy.integrate import dblquad as int2d;
from scipy.special import jv as bessel;
from scipy.signal import hilbert as KramerKronig;
from scipy.optimize import fsolve;
#from scipy.fftpack import hilbert as KramerKronig;
from scipy.interpolate import interp1d;
pi=np.pi;
#wave=wavefield.wavefield(np.zeros(20),np.linspace(1,20),2)
class Cone:
m = -1;
q = -1;
z1 = -1;
z2 = 0;
I = 0;
g=9.81;
zCoG = -1;
depth=30;
def __init__ (self, z1, r1, z2, r2, zCoG, xi, g, h):
#Frequency parameters
self.xi=xi;
#geometric parameters
self.m=(r1-r2)/(z1-z2);
self.z1=z1;
self.z2=z2;
self.g=g;
self.rho=1000;
self.q=-r1+self.m*self.z1;
self.omega=np.sqrt(self.xi*g);
self.zCoG=zCoG;
self.z0=0;
self.x0=0;
self.delta0=0;
self.depth=h;
def Calculate(self, z0, x0, delta0, eta):
return self.Calculate_xi(z0, x0, delta0, eta, self.xi)
def Calculate_xi(self, z0, x0, delta0, eta, xi0):
#Acos=0.5*Acos
#Asin=0.5*Asin
z0=-z0;
#eta=np.sum(Acos);
#Calculate integartion limits (dependent on buoy position)
z1=self.z1+z0;
z2=np.min([self.z2+z0,eta]);
q=-self.q-(z0)*self.m;
empty=[0,0,0]
empty2=[xi0*0,xi0*0*0,xi0*0*0]
#If segment not in water, return zero
if (z1>z2):
return [empty,empty2];
if (z1>eta):
return [empty,empty2];
#buoyancy force (only heave)
F_st_h=-self.g*self.rho*(2*pi*self.m*(((self.m*np.power(z2,3)/3+q*np.power(z2,2)/2))-(self.m*np.power(z1,3)/3+q*np.power(z1,2)/2)))
F_st=[0,F_st_h,0]
return np.array([F_st,F_st]);
def Radius(self,z0):
if(z0>=self.z1) and (z0<=self.z2):
r=-self.q+(z0)*self.m;
return r;
else:
return 0;
def max_Radius(self,z0):
#get maximum radius for this section if the lowest part of this section is below z0
if (z0>self.z2):
return np.max([-self.q+(self.z1)*self.m,-self.q+(self.z2)*self.m]);
if(z0>=self.z1) and (z0<=self.z2):
r=np.max([np.abs(-self.q+(z0)*self.m),-self.q+(self.z1)*self.m]);
return r;
else:
return 0;
def Area (self, z0):
#get the area at z0
r=self.Radius(z0);#r = z0*self.m-q;
return r*r*pi;
def AreaSurge (self, z0):
ru=self.Radius(z0);#r = z0*self.m-q;
rb=self.Radius(self.z1);
h=np.max([z0-self.z1,0]);
return (ru+rb)*h;
def Volume(self, z):
z1l=np.min([self.z1,z])
z2l=np.min([self.z2,z])
z1l1=(z1l-self.q/self.m)
z2l2=(z2l-self.q/self.m)
r2=z2l2*self.m;
r1=z1l1*self.m;
return 1/3*pi*(r2*r2*z2l2-r1*r1*z1l1);
idname="test.csv";
class Floater:
volume=0;
mode=0;
g=9.81;
d=300;
Cog=0;
rho=1000;
xi=np.array([]);
def __init__ (self, xi, g, depth, CoG, *args):
self.xi=xi;
self.g=g;
self.omega=np.sqrt(self.xi*self.g)
self.elements=[];
self.d=depth;
self.CoG=CoG;
self.file = open(idname+"_f.csv", "w")
self.file.write("time,wave,buoyancy,FK,rad\r\n");
if len(args)>0:
with open(args[0]) as file:
geo=json.load(file);
for g in geo["geo"]:
if g["type"] == "cone":
self.addCone(g["coord"][0],g["coord"][1],g["coord"][2],g["coord"][3])
def addCone(self, z1, r1, z2, r2):
self.elements.append(Cone(z1, r1, z2, r2,self.CoG,self.xi,self.g,self.d))
self.volume=self.volume+self.elements[-1].Volume(z2);
def set_mode(self, mode):
self.mode=mode;
def Calc_CoG(self):
mi=0;
ms=0;
rge=np.linspace(np.min([e.z1 for e in self.elements]),np.max([e.z2 for e in self.elements]),10);
for i in rge:
ai=self.Area(i);
mi=mi+ai*i;
ms=ms+ai;
return mi/ms;
def get_parameters(self, z0, x0, delta0, eta):
a=self.Calculate();
a=[a[9],a[1],a[2],a[3]]
def Calculate(self, z0, x0, delta0, eta):
forces=np.array([[0,0,0],[0,0,0]]);
for e in self.elements:
forces = forces + e.Calculate(z0, x0, delta0, eta);
if np.sum(np.abs(forces[0]))==0:
return [forces[0],0,0,0]
return [forces[0],0,[0,0,0],0];
def get_forces(self, t, wave, z0, x0, delta0, v, a):
return;
def get_force_lin(self, t, wave, z0, x0, delta0, v, a):
return;
def Area(self, z0):
area=0;
for e in self.elements:
area = e.Area(z0);
if area >0:
return area;
return area;
def AreaProjectedHeave(self, z0):
area=0;
res=self.getGeoBox();
zrmax=res[2];
rmax=res[3];
if (z0>zrmax):
return rmax**2*np.pi;
for e in self.elements:
area = area + e.Area(z0);
return area;
def AreaProjectedSurge(self, z0):
area=0;
for e in self.elements:
area = area + e.AreaSurge(z0);
return area;
def getGeoBox(self,z0=np.NaN):
#gets the max and min heave position (z_min and z_max) and the maximal radius r_max and its vertical position z_r_max
z_min=np.NaN;
z_max=np.NaN;
r_max=0;
z_r_max=0;
for e in self.elements:
z1=e.z1;
z2=e.z2;
if (not np.isnan(z0)):
z1=np.min([z1,z0]);
z2=np.min([z2,z0]);
if np.isnan(z_min):
z_min=z2;
z_max=z1;
if z_min>z1:
z_min=z1;
if z_max<z2:
z_max=z2;
if e.Radius(z1)>r_max:
r_max=e.Radius(z1);
z_r_max=z1;
if e.Radius(z2)>r_max:
r_max=e.Radius(z2);
z_r_max=z2;
return (z_min,z_max,z_r_max,r_max)
def max_radius(self,z0):
#get maximal radius for the body below z0
return np.max([e.max_Radius(z0) for e in self.elements]);
def radius(self,z0):
#get radius for the body at z0
return np.max([e.Radius(z0) for e in self.elements]);
def Volume(self, z0):
vol=0;
for e in self.elements:
vol = vol + e.Volume(z0);
return vol;
def clear(self):
self.file.close();
self.elements.clear();
| [
"json.load",
"numpy.abs",
"numpy.power",
"numpy.isnan",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.sqrt"
] | [((3088, 3100), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3096, 3100), True, 'import numpy as np\n'), ((865, 885), 'numpy.sqrt', 'np.sqrt', (['(self.xi * g)'], {}), '(self.xi * g)\n', (872, 885), True, 'import numpy as np\n'), ((1333, 1360), 'numpy.min', 'np.min', (['[self.z2 + z0, eta]'], {}), '([self.z2 + z0, eta])\n', (1339, 1360), True, 'import numpy as np\n'), ((1843, 1865), 'numpy.array', 'np.array', (['[F_st, F_st]'], {}), '([F_st, F_st])\n', (1851, 1865), True, 'import numpy as np\n'), ((2678, 2703), 'numpy.max', 'np.max', (['[z0 - self.z1, 0]'], {}), '([z0 - self.z1, 0])\n', (2684, 2703), True, 'import numpy as np\n'), ((2770, 2790), 'numpy.min', 'np.min', (['[self.z1, z]'], {}), '([self.z1, z])\n', (2776, 2790), True, 'import numpy as np\n'), ((2800, 2820), 'numpy.min', 'np.min', (['[self.z2, z]'], {}), '([self.z2, z])\n', (2806, 2820), True, 'import numpy as np\n'), ((3215, 3240), 'numpy.sqrt', 'np.sqrt', (['(self.xi * self.g)'], {}), '(self.xi * self.g)\n', (3222, 3240), True, 'import numpy as np\n'), ((4450, 4482), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0]])\n', (4458, 4482), True, 'import numpy as np\n'), ((2188, 2252), 'numpy.max', 'np.max', (['[-self.q + self.z1 * self.m, -self.q + self.z2 * self.m]'], {}), '([-self.q + self.z1 * self.m, -self.q + self.z2 * self.m])\n', (2194, 2252), True, 'import numpy as np\n'), ((4068, 4105), 'numpy.min', 'np.min', (['[e.z1 for e in self.elements]'], {}), '([e.z1 for e in self.elements])\n', (4074, 4105), True, 'import numpy as np\n'), ((4106, 4143), 'numpy.max', 'np.max', (['[e.z2 for e in self.elements]'], {}), '([e.z2 for e in self.elements])\n', (4112, 4143), True, 'import numpy as np\n'), ((5978, 5993), 'numpy.isnan', 'np.isnan', (['z_min'], {}), '(z_min)\n', (5986, 5993), True, 'import numpy as np\n'), ((3498, 3513), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3507, 3513), False, 'import json\n'), ((4602, 4619), 'numpy.abs', 'np.abs', (['forces[0]'], {}), '(forces[0])\n', (4608, 4619), True, 'import numpy as np\n'), ((5859, 5871), 'numpy.isnan', 'np.isnan', (['z0'], {}), '(z0)\n', (5867, 5871), True, 'import numpy as np\n'), ((5893, 5909), 'numpy.min', 'np.min', (['[z1, z0]'], {}), '([z1, z0])\n', (5899, 5909), True, 'import numpy as np\n'), ((5929, 5945), 'numpy.min', 'np.min', (['[z2, z0]'], {}), '([z2, z0])\n', (5935, 5945), True, 'import numpy as np\n'), ((2316, 2345), 'numpy.abs', 'np.abs', (['(-self.q + z0 * self.m)'], {}), '(-self.q + z0 * self.m)\n', (2322, 2345), True, 'import numpy as np\n'), ((1706, 1721), 'numpy.power', 'np.power', (['z2', '(3)'], {}), '(z2, 3)\n', (1714, 1721), True, 'import numpy as np\n'), ((1725, 1740), 'numpy.power', 'np.power', (['z2', '(2)'], {}), '(z2, 2)\n', (1733, 1740), True, 'import numpy as np\n'), ((1752, 1767), 'numpy.power', 'np.power', (['z1', '(3)'], {}), '(z1, 3)\n', (1760, 1767), True, 'import numpy as np\n'), ((1771, 1786), 'numpy.power', 'np.power', (['z1', '(2)'], {}), '(z1, 2)\n', (1779, 1786), True, 'import numpy as np\n')] |
import json
import math
import numpy as np
import pandas as pd
from scipy import signal
def _fir_rand_input(signal_length, response_length, num_channels=1):
print("Generating FIR random data with ", signal_length, " samples and ", num_channels, "channels...")
t = np.zeros((signal_length, 1))
t[:, 0] = [i for i in range(signal_length)]
x = np.random.randn(num_channels, signal_length)
b = [1 / (i + 1) for i in range(response_length)]
a = [1]
y = signal.lfilter(b, a, x, zi=None)
data_id = np.zeros((signal_length, 1))
for i in range(0, signal_length, int(signal_length / 100)):
data_id[i:i + int(signal_length / 100)].fill(i)
return t, np.transpose(x), np.transpose(y), data_id
def _generate_fir_response(signal_input, response_length):
b = [np.random.rand() * (1 - i / response_length) for i in range(response_length)]
a = [1]
return signal.lfilter(b, a, signal_input, zi=None, axis=0)
def _sine(signal_length, frequency, amp, offset, delta_t):
print("Generating Sine data with {} samples, frequency {} Hz, Amplitude {}, offset {}, delta_t {}".format(
signal_length, frequency, amp, offset, delta_t))
t = np.zeros((signal_length + delta_t, 1))
t[:, 0] = np.arange(0, signal_length + delta_t, 1)
x = np.sin(frequency * 2 * math.pi * t) * amp + offset
y = x[delta_t:]
x = x[:-delta_t]
t = t[:-delta_t]
data_id = np.zeros((signal_length, 1))
for i in range(0, signal_length, int(signal_length / 100)):
data_id[i:i + int(signal_length / 100)].fill(i)
return t, x, y, data_id
def _rand_sine_sum(signal_length, ground_frequency, amp, offset, num_superpos, delta_t):
signal_params = {}
signal_params['signal_length'] = signal_length
signal_params['ground_frequency'] = ground_frequency
signal_params['amp'] = amp
signal_params['offset'] = offset
signal_params['num_superpos'] = num_superpos
signal_params['delta_t'] = delta_t
print("Generating Sine data with {} samples, ground frequency {} Hz, Amplitude {}, offset {}, delta_t {}".format(
signal_length, ground_frequency, amp, offset, delta_t))
t = np.zeros((signal_length + delta_t, 1))
t[:, 0] = np.arange(0, signal_length + delta_t, 1)
# x = np.sin(ground_frequency*2*math.pi*t)*amp + offset
x = np.zeros((signal_length + delta_t, 1))
f_max = 0.5
base = math.pow(f_max * signal_length, 1 / (num_superpos - 1))
for i in range(num_superpos):
f_i = 1 / (signal_length / math.pow(base, i))
superpos_freq = np.random.normal(f_i, (1 / 3) * f_i)
superpos_amp = np.random.normal(0, 1 / 3 * amp)
x = x + np.sin(superpos_freq * 2 * math.pi * t) * superpos_amp + np.random.rand(signal_length + delta_t,
1) * 0.05
print("Superimposed frequency:", superpos_freq, "Hz and Amplitude", superpos_amp)
signal_params['freq_' + str(i)] = superpos_freq
signal_params['amp_' + str(i)] = superpos_amp
y = x[delta_t:]
x = x[:-delta_t]
t = t[:-delta_t]
data_id = np.zeros((signal_length, 1))
for i in range(0, signal_length, int(signal_length / 100)):
data_id[i:i + int(signal_length / 100)].fill(i)
return t, x, y, data_id, signal_params
if __name__ == "__main__":
output_path = "./data/Synthetic/"
signal_length_ = 10000
ground_frequency_ = 1e-5
amp_ = 1
offset_ = 0
num_superpos_ = 1000
delta_t_ = 1
response_length_ = 50
num_filterings_ = 0
t1, x1, y1, id1, signal_params_1 = _rand_sine_sum(signal_length=signal_length_, ground_frequency=ground_frequency_,
amp=amp_, offset=offset_, num_superpos=num_superpos_,
delta_t=delta_t_)
t2, x2, y2, id2, signal_params_2 = _rand_sine_sum(signal_length=signal_length_, ground_frequency=ground_frequency_,
amp=amp_, offset=offset_, num_superpos=num_superpos_,
delta_t=delta_t_)
y1_filtered = y1
y2_filtered = y2
for filtering in range(num_filterings_):
y1_filtered = y1_filtered + _generate_fir_response(y1, response_length_)
y2_filtered = y2_filtered + _generate_fir_response(y2, response_length_)
x = np.concatenate((x1, x2), axis=1)
x_reverse = np.concatenate((x2, x1), axis=1)
y = np.concatenate((y1_filtered, y2_filtered), axis=1)
y_reverse = np.concatenate((y2_filtered, y1_filtered), axis=1)
columns = ['t[s]'] + ['Ist'] * x.shape[1] + ['Soll'] * y.shape[1] + ['data_id']
df = pd.DataFrame(np.concatenate((t1, x, y, id1), axis=1), columns=columns)
df.to_pickle(output_path + "synthetic_data.p")
df_reverse = pd.DataFrame(np.concatenate((t1, x_reverse, y_reverse, id1), axis=1), columns=columns)
df_reverse.to_pickle(output_path + "synthetic_data_reverse.p")
with open(output_path + "config_Dataset_A.txt", "w") as config_file:
json.dump(signal_params_1, config_file)
config_file.close()
with open(output_path + "config_Dataset_B.txt", "w") as config_file:
json.dump(signal_params_2, config_file)
config_file.close()
| [
"json.dump",
"math.pow",
"numpy.random.randn",
"scipy.signal.lfilter",
"numpy.zeros",
"numpy.transpose",
"numpy.sin",
"numpy.arange",
"numpy.random.normal",
"numpy.random.rand",
"numpy.concatenate"
] | [((275, 303), 'numpy.zeros', 'np.zeros', (['(signal_length, 1)'], {}), '((signal_length, 1))\n', (283, 303), True, 'import numpy as np\n'), ((360, 404), 'numpy.random.randn', 'np.random.randn', (['num_channels', 'signal_length'], {}), '(num_channels, signal_length)\n', (375, 404), True, 'import numpy as np\n'), ((479, 511), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'x'], {'zi': 'None'}), '(b, a, x, zi=None)\n', (493, 511), False, 'from scipy import signal\n'), ((526, 554), 'numpy.zeros', 'np.zeros', (['(signal_length, 1)'], {}), '((signal_length, 1))\n', (534, 554), True, 'import numpy as np\n'), ((902, 953), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'signal_input'], {'zi': 'None', 'axis': '(0)'}), '(b, a, signal_input, zi=None, axis=0)\n', (916, 953), False, 'from scipy import signal\n'), ((1191, 1229), 'numpy.zeros', 'np.zeros', (['(signal_length + delta_t, 1)'], {}), '((signal_length + delta_t, 1))\n', (1199, 1229), True, 'import numpy as np\n'), ((1244, 1284), 'numpy.arange', 'np.arange', (['(0)', '(signal_length + delta_t)', '(1)'], {}), '(0, signal_length + delta_t, 1)\n', (1253, 1284), True, 'import numpy as np\n'), ((1420, 1448), 'numpy.zeros', 'np.zeros', (['(signal_length, 1)'], {}), '((signal_length, 1))\n', (1428, 1448), True, 'import numpy as np\n'), ((2165, 2203), 'numpy.zeros', 'np.zeros', (['(signal_length + delta_t, 1)'], {}), '((signal_length + delta_t, 1))\n', (2173, 2203), True, 'import numpy as np\n'), ((2218, 2258), 'numpy.arange', 'np.arange', (['(0)', '(signal_length + delta_t)', '(1)'], {}), '(0, signal_length + delta_t, 1)\n', (2227, 2258), True, 'import numpy as np\n'), ((2327, 2365), 'numpy.zeros', 'np.zeros', (['(signal_length + delta_t, 1)'], {}), '((signal_length + delta_t, 1))\n', (2335, 2365), True, 'import numpy as np\n'), ((2393, 2448), 'math.pow', 'math.pow', (['(f_max * signal_length)', '(1 / (num_superpos - 1))'], {}), '(f_max * signal_length, 1 / (num_superpos - 1))\n', (2401, 2448), False, 'import math\n'), ((3141, 3169), 'numpy.zeros', 'np.zeros', (['(signal_length, 1)'], {}), '((signal_length, 1))\n', (3149, 3169), True, 'import numpy as np\n'), ((4434, 4466), 'numpy.concatenate', 'np.concatenate', (['(x1, x2)'], {'axis': '(1)'}), '((x1, x2), axis=1)\n', (4448, 4466), True, 'import numpy as np\n'), ((4483, 4515), 'numpy.concatenate', 'np.concatenate', (['(x2, x1)'], {'axis': '(1)'}), '((x2, x1), axis=1)\n', (4497, 4515), True, 'import numpy as np\n'), ((4524, 4574), 'numpy.concatenate', 'np.concatenate', (['(y1_filtered, y2_filtered)'], {'axis': '(1)'}), '((y1_filtered, y2_filtered), axis=1)\n', (4538, 4574), True, 'import numpy as np\n'), ((4591, 4641), 'numpy.concatenate', 'np.concatenate', (['(y2_filtered, y1_filtered)'], {'axis': '(1)'}), '((y2_filtered, y1_filtered), axis=1)\n', (4605, 4641), True, 'import numpy as np\n'), ((689, 704), 'numpy.transpose', 'np.transpose', (['x'], {}), '(x)\n', (701, 704), True, 'import numpy as np\n'), ((706, 721), 'numpy.transpose', 'np.transpose', (['y'], {}), '(y)\n', (718, 721), True, 'import numpy as np\n'), ((2561, 2595), 'numpy.random.normal', 'np.random.normal', (['f_i', '(1 / 3 * f_i)'], {}), '(f_i, 1 / 3 * f_i)\n', (2577, 2595), True, 'import numpy as np\n'), ((2621, 2653), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1 / 3 * amp)'], {}), '(0, 1 / 3 * amp)\n', (2637, 2653), True, 'import numpy as np\n'), ((4748, 4787), 'numpy.concatenate', 'np.concatenate', (['(t1, x, y, id1)'], {'axis': '(1)'}), '((t1, x, y, id1), axis=1)\n', (4762, 4787), True, 'import numpy as np\n'), ((4887, 4942), 'numpy.concatenate', 'np.concatenate', (['(t1, x_reverse, y_reverse, id1)'], {'axis': '(1)'}), '((t1, x_reverse, y_reverse, id1), axis=1)\n', (4901, 4942), True, 'import numpy as np\n'), ((5109, 5148), 'json.dump', 'json.dump', (['signal_params_1', 'config_file'], {}), '(signal_params_1, config_file)\n', (5118, 5148), False, 'import json\n'), ((5254, 5293), 'json.dump', 'json.dump', (['signal_params_2', 'config_file'], {}), '(signal_params_2, config_file)\n', (5263, 5293), False, 'import json\n'), ((801, 817), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (815, 817), True, 'import numpy as np\n'), ((1293, 1328), 'numpy.sin', 'np.sin', (['(frequency * 2 * math.pi * t)'], {}), '(frequency * 2 * math.pi * t)\n', (1299, 1328), True, 'import numpy as np\n'), ((2518, 2535), 'math.pow', 'math.pow', (['base', 'i'], {}), '(base, i)\n', (2526, 2535), False, 'import math\n'), ((2727, 2769), 'numpy.random.rand', 'np.random.rand', (['(signal_length + delta_t)', '(1)'], {}), '(signal_length + delta_t, 1)\n', (2741, 2769), True, 'import numpy as np\n'), ((2670, 2709), 'numpy.sin', 'np.sin', (['(superpos_freq * 2 * math.pi * t)'], {}), '(superpos_freq * 2 * math.pi * t)\n', (2676, 2709), True, 'import numpy as np\n')] |
import numpy as np
from .base import WeightComputer
class UnconditionalWeightComputer(WeightComputer):
"""Mockup class for returning the uniform weight."""
def __init__(self, n_candidates: int):
"""Constructor."""
self.n_candidates = n_candidates
def __call__(self, _=None) -> np.ndarray:
"""Return the uniform weight.
Returns:
The uniform weight array (each being equal to 1/n) for the number of the candidates.
"""
return np.ones((1, self.n_candidates)) / self.n_candidates
| [
"numpy.ones"
] | [((500, 531), 'numpy.ones', 'np.ones', (['(1, self.n_candidates)'], {}), '((1, self.n_candidates))\n', (507, 531), True, 'import numpy as np\n')] |
from social_epi import sampling_social_networks as ssn
from social_epi import nx_conversion as nxc
from social_epi import CCMnet_constr_py as ccm
import numpy as np
import matplotlib.pyplot as plt
import json,time
def determine_burnin(social_config, contact_network, transmission_network, burnin_list):
config = json.load(open(social_config))
for bu in burnin_list:
starttime = time.time()
config["burnin"] = bu
print("Burnin: {}".format(bu))
social_network, overlap_network = ssn.run(config, contact_network, transmission_network)
print("The overlap network is conserved: {}".format(check_overlap(social_network,overlap_network)))
print("Error in degree distribution: {}".format(compare_dists(config,social_network)))
print("Time taken: {}".format(time.time()-starttime))
def check_overlap(SN,ON):
return set(ON.edges()).issubset(set(SN.edges()))
def compare_dists(config,SN):
N = SN.number_of_nodes()
deg_dict = config["degree_distribution"]
deg_dist = np.asarray(nxc.pad_deg_dist(deg_dict,config["small_prob"],N))
sn_degs = [deg for _, deg in SN.degree()]
sn_deg_dist = np.array([sn_degs.count(i)/N for i in range(N)])
return sum(np.abs(deg_dist-sn_deg_dist))
def assess_burnin_L1(burnin_config_file,contact_network_file,transmission_network_file=None):
if isinstance(contact_network_file,str):
contact_network,_ = nxc.favitescontacttransmission2nx(contact_network_file,transmission_network_file)
else:
contact_network =contact_network_file
if isinstance(burnin_config_file,str):
config = json.load(open(burnin_config_file))
else:
config = burnin_config_file
N=int(contact_network.number_of_nodes())
# make CCM config dictionary and then run CCM
ccmc = ssn.gen_config(config,N)
_, dd_stats = ccm.CCMnet_constr_py(**ccmc)
dd_stats.to_csv("deg_hist_results.csv")
# get desired deg dist
deg_dict = config["degree_distribution"]
deg_dist = np.asarray(nxc.pad_deg_dist(deg_dict,config["small_prob"],N))
L1 = []
for dd in dd_stats.iterrows():
dd = np.array(dd[1])
L1.append(sum(np.abs(deg_dist-dd/N)))
json.dump(L1,open("L1_results.json","w"))
plt.scatter(range(len(L1)),L1)
plt.savefig("convergence.png")
plt.show()
if __name__ == "__main__":
# cd tests/chain_test/FAVITES_output_20220524141705
contact_network = "contact_network.txt"
transmission_network = "error_free_files/transmission_network.txt"
# social_config = "../../../src/configs/sampling_social_networks_config.json"
# burnin_list = np.arange(1000,10000,1000)
# determine_burnin2(social_config,contact_network,transmission_network,burnin_list)
bc = "../../../src/configs/burnin_config.json"
assess_burnin_L1(bc,contact_network,transmission_network)
| [
"matplotlib.pyplot.show",
"numpy.abs",
"social_epi.sampling_social_networks.run",
"time.time",
"social_epi.sampling_social_networks.gen_config",
"social_epi.CCMnet_constr_py.CCMnet_constr_py",
"social_epi.nx_conversion.pad_deg_dist",
"numpy.array",
"social_epi.nx_conversion.favitescontacttransmissio... | [((1816, 1841), 'social_epi.sampling_social_networks.gen_config', 'ssn.gen_config', (['config', 'N'], {}), '(config, N)\n', (1830, 1841), True, 'from social_epi import sampling_social_networks as ssn\n'), ((1859, 1887), 'social_epi.CCMnet_constr_py.CCMnet_constr_py', 'ccm.CCMnet_constr_py', ([], {}), '(**ccmc)\n', (1879, 1887), True, 'from social_epi import CCMnet_constr_py as ccm\n'), ((2288, 2318), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""convergence.png"""'], {}), "('convergence.png')\n", (2299, 2318), True, 'import matplotlib.pyplot as plt\n'), ((2323, 2333), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2331, 2333), True, 'import matplotlib.pyplot as plt\n'), ((396, 407), 'time.time', 'time.time', ([], {}), '()\n', (405, 407), False, 'import json, time\n'), ((519, 573), 'social_epi.sampling_social_networks.run', 'ssn.run', (['config', 'contact_network', 'transmission_network'], {}), '(config, contact_network, transmission_network)\n', (526, 573), True, 'from social_epi import sampling_social_networks as ssn\n'), ((1052, 1103), 'social_epi.nx_conversion.pad_deg_dist', 'nxc.pad_deg_dist', (['deg_dict', "config['small_prob']", 'N'], {}), "(deg_dict, config['small_prob'], N)\n", (1068, 1103), True, 'from social_epi import nx_conversion as nxc\n'), ((1231, 1261), 'numpy.abs', 'np.abs', (['(deg_dist - sn_deg_dist)'], {}), '(deg_dist - sn_deg_dist)\n', (1237, 1261), True, 'import numpy as np\n'), ((1430, 1516), 'social_epi.nx_conversion.favitescontacttransmission2nx', 'nxc.favitescontacttransmission2nx', (['contact_network_file', 'transmission_network_file'], {}), '(contact_network_file,\n transmission_network_file)\n', (1463, 1516), True, 'from social_epi import nx_conversion as nxc\n'), ((2030, 2081), 'social_epi.nx_conversion.pad_deg_dist', 'nxc.pad_deg_dist', (['deg_dict', "config['small_prob']", 'N'], {}), "(deg_dict, config['small_prob'], N)\n", (2046, 2081), True, 'from social_epi import nx_conversion as nxc\n'), ((2141, 2156), 'numpy.array', 'np.array', (['dd[1]'], {}), '(dd[1])\n', (2149, 2156), True, 'import numpy as np\n'), ((2179, 2204), 'numpy.abs', 'np.abs', (['(deg_dist - dd / N)'], {}), '(deg_dist - dd / N)\n', (2185, 2204), True, 'import numpy as np\n'), ((815, 826), 'time.time', 'time.time', ([], {}), '()\n', (824, 826), False, 'import json, time\n')] |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == 'nt':
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file = sys.stderr)
print("Can't set CUDA DLLs search path.", file = sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(';')
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file = sys.stderr)
exit(1)
import pycuda.driver as cuda
import PyNvCodec as nvc
import numpy as np
def decode(gpuID, encFilePath, decFilePath):
cuda.init()
cuda_ctx = cuda.Device(gpuID).retain_primary_context()
cuda_ctx.push()
cuda_str = cuda.Stream()
cuda_ctx.pop()
decFile = open(decFilePath, "wb")
nvDmx = nvc.PyFFmpegDemuxer(encFilePath)
nvDec = nvc.PyNvDecoder(nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), nvDmx.Codec(), cuda_ctx.handle, cuda_str.handle)
nvCvt = nvc.PySurfaceConverter(nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), nvc.PixelFormat.YUV420, cuda_ctx.handle, cuda_str.handle)
nvDwn = nvc.PySurfaceDownloader(nvDmx.Width(), nvDmx.Height(), nvCvt.Format(), cuda_ctx.handle, cuda_str.handle)
packet = np.ndarray(shape=(0), dtype=np.uint8)
frameSize = int(nvDmx.Width() * nvDmx.Height() * 3 / 2)
rawFrame = np.ndarray(shape=(frameSize), dtype=np.uint8)
pdata_in, pdata_out = nvc.PacketData(), nvc.PacketData()
# Determine colorspace conversion parameters.
# Some video streams don't specify these parameters so default values
# are most widespread bt601 and mpeg.
cspace, crange = nvDmx.ColorSpace(), nvDmx.ColorRange()
if nvc.ColorSpace.UNSPEC == cspace:
cspace = nvc.ColorSpace.BT_601
if nvc.ColorRange.UDEF == crange:
crange = nvc.ColorRange.MPEG
cc_ctx = nvc.ColorspaceConversionContext(cspace, crange)
print('Color space: ', str(cspace))
print('Color range: ', str(crange))
while True:
# Demuxer has sync design, it returns packet every time it's called.
# If demuxer can't return packet it usually means EOF.
if not nvDmx.DemuxSinglePacket(packet):
break
# Get last packet data to obtain frame timestamp
nvDmx.LastPacketData(pdata_in)
# Decoder is async by design.
# As it consumes packets from demuxer one at a time it may not return
# decoded surface every time the decoding function is called.
surface_nv12 = nvDec.DecodeSurfaceFromPacket(pdata_in, packet, pdata_out)
if not surface_nv12.Empty():
surface_yuv420 = nvCvt.Execute(surface_nv12, cc_ctx)
if surface_yuv420.Empty():
break
if not nvDwn.DownloadSingleSurface(surface_yuv420, rawFrame):
break
bits = bytearray(rawFrame)
decFile.write(bits)
# Now we flush decoder to emtpy decoded frames queue.
while True:
surface_nv12 = nvDec.FlushSingleSurface()
if surface_nv12.Empty():
break
surface_yuv420 = nvCvt.Execute(surface_nv12, cc_ctx)
if surface_yuv420.Empty():
break
if not nvDwn.DownloadSingleSurface(surface_yuv420, rawFrame):
break
bits = bytearray(rawFrame)
decFile.write(bits)
if __name__ == "__main__":
print("This sample decodes input video to raw YUV420 file on given GPU.")
print("Usage: SampleDecode.py $gpu_id $input_file $output_file.")
if(len(sys.argv) < 4):
print("Provide gpu ID, path to input and output files")
exit(1)
gpuID = int(sys.argv[1])
encFilePath = sys.argv[2]
decFilePath = sys.argv[3]
decode(gpuID, encFilePath, decFilePath)
| [
"pycuda.driver.Stream",
"numpy.ndarray",
"os.path.isdir",
"PyNvCodec.PacketData",
"os.add_dll_directory",
"pycuda.driver.Device",
"PyNvCodec.PyFFmpegDemuxer",
"pycuda.driver.init",
"PyNvCodec.ColorspaceConversionContext"
] | [((1502, 1513), 'pycuda.driver.init', 'cuda.init', ([], {}), '()\n', (1511, 1513), True, 'import pycuda.driver as cuda\n'), ((1608, 1621), 'pycuda.driver.Stream', 'cuda.Stream', ([], {}), '()\n', (1619, 1621), True, 'import pycuda.driver as cuda\n'), ((1693, 1725), 'PyNvCodec.PyFFmpegDemuxer', 'nvc.PyFFmpegDemuxer', (['encFilePath'], {}), '(encFilePath)\n', (1712, 1725), True, 'import PyNvCodec as nvc\n'), ((2121, 2156), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(0)', 'dtype': 'np.uint8'}), '(shape=0, dtype=np.uint8)\n', (2131, 2156), True, 'import numpy as np\n'), ((2234, 2277), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'frameSize', 'dtype': 'np.uint8'}), '(shape=frameSize, dtype=np.uint8)\n', (2244, 2277), True, 'import numpy as np\n'), ((2735, 2782), 'PyNvCodec.ColorspaceConversionContext', 'nvc.ColorspaceConversionContext', (['cspace', 'crange'], {}), '(cspace, crange)\n', (2766, 2782), True, 'import PyNvCodec as nvc\n'), ((832, 863), 'os.add_dll_directory', 'os.add_dll_directory', (['cuda_path'], {}), '(cuda_path)\n', (852, 863), False, 'import os\n'), ((2306, 2322), 'PyNvCodec.PacketData', 'nvc.PacketData', ([], {}), '()\n', (2320, 2322), True, 'import PyNvCodec as nvc\n'), ((2324, 2340), 'PyNvCodec.PacketData', 'nvc.PacketData', ([], {}), '()\n', (2338, 2340), True, 'import PyNvCodec as nvc\n'), ((1215, 1234), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1228, 1234), False, 'import os\n'), ((1529, 1547), 'pycuda.driver.Device', 'cuda.Device', (['gpuID'], {}), '(gpuID)\n', (1540, 1547), True, 'import pycuda.driver as cuda\n'), ((1252, 1278), 'os.add_dll_directory', 'os.add_dll_directory', (['path'], {}), '(path)\n', (1272, 1278), False, 'import os\n')] |
# This script takes velocity data from within the Gulf Stream and makes
# a stream plot.
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
########## Prepare data ##########
data = sio.loadmat('data/AVISO_uv_1993_24yrs.mat')
u = np.transpose( data['u_gulf'], (1,0,2) )
v = np.transpose( data['v_gulf'], (1,0,2) )
lon, lat = np.linspace( -80, -50, 121 ), np.linspace( 30, 50, 81 )
speed = np.sqrt( np.square( u, u ) + np.square( v, v ) )
# make a land mask (from nan values)
landMask = np.squeeze( speed[:,:,100] )
landMask[ ~np.isnan( landMask ) ] = 0
landMask[ np.isnan( landMask ) ] = 1
landMask[ landMask == 0 ] = np.nan
lonGrid, latGrid = np.meshgrid( lon, lat )
#lonLand, latLand = lonGrid[ np.isnan( landMask ) ], latGrid[ np.isnan( landMask ) ]
#land = landMask[ np.isnan( landMask ) ]
#land = 1
########## Plotting ##########
# prepare subplots
fig, axArr = plt.subplots(2,2, sharex=True, sharey=True )
fig.set_size_inches( (11,8) )
# sub-sampling rate for quiver
n=1
# choose which day to plot
t0 = 1000
# use each method
im0 = axArr[0,0].pcolor( lon, lat, speed[:,:,t0], cmap='hot', vmin=0, vmax=1.6 )
im1 = axArr[0,1].contourf( lon, lat, speed[:,:,t0], cmap='hot', vmin=0, vmax=1.6 )
im2 = axArr[1,0].quiver( lon[::n], lat[::n], u[::n,::n,t0], v[::n,::n,t0], speed[::n,::n,t0], cmap='hot', clim=(0,1.6) )
im3 = axArr[1,1].streamplot( lon, lat, u[:,:,t0], v[:,:,t0], color=speed[:,:,t0],
density=[7,7], cmap='hot', linewidth=2*speed[:,:,t0], arrowsize=0.5 )
# add land to each plot
axArr[0,0].contourf( lonGrid, latGrid, landMask, cmap='Greys' )
axArr[0,1].contourf( lonGrid, latGrid, landMask, cmap='Greys' )
axArr[1,0].contourf( lonGrid, latGrid, landMask, cmap='Greys' )
axArr[1,1].contourf( lonGrid, latGrid, landMask, cmap='Greys' )
# polish each plot
axArr[0,0].set_ylim( (30,50) )
axArr[0,1].set_ylim( (30,50) )
axArr[1,0].set_ylim( (30,50) )
axArr[1,1].set_ylim( (30,50) )
axArr[0,0].set_xlim( (-80,-50) )
axArr[0,1].set_xlim( (-80,-50) )
axArr[1,0].set_xlim( (-80,-50) )
axArr[1,1].set_xlim( (-80,-50) )
axArr[0,0].set_title( 'pcolor of $\sqrt{u^2+v^2}$', weight='bold' )
axArr[0,1].set_title( 'contourf of $\sqrt{u^2+v^2}$', weight='bold' )
axArr[1,0].set_title( 'quiver of $(u,v)$', weight='bold' )
axArr[1,1].set_title( 'streamplot of $(u,v)$', weight='bold' )
axArr[0,0].set_yticks( [32,36,40,44,48] )
axArr[1,0].set_yticks( [32,36,40,44,48] )
axArr[0,0].set_yticklabels( ['32$^\circ$','36$^\circ$','40$^\circ$','44$^\circ$','48$^\circ$'])
axArr[1,0].set_yticklabels( ['32$^\circ$','36$^\circ$','40$^\circ$','44$^\circ$','48$^\circ$'])
axArr[1,0].set_xticks( [-75,-70,-65,-60,-55] )
axArr[1,1].set_xticks( [-75,-70,-65,-60,-55] )
axArr[1,0].set_xticklabels( ['75$^\circ$','70$^\circ$','65$^\circ$','60$^\circ$','55$^\circ$'])
axArr[1,1].set_xticklabels( ['75$^\circ$','70$^\circ$','65$^\circ$','60$^\circ$','55$^\circ$'])
# adjust spacing and add colorbar
plt.subplots_adjust( wspace=0, hspace=0.15 )
plt.subplots_adjust( right=0.88 )
cbarAx = fig.add_axes( [0.9,0.11,0.03,0.77] )
cbar = fig.colorbar( im0, cax=cbarAx )
cbar.set_label('Speed (ms$^{-1}$)', weight='bold', fontsize=11 )
cbar.ax.tick_params( labelsize=12 )
# save the figure
plt.savefig( 'gulfStreamUV_snapshots.png', format='png', dpi=600 )
plt.show() | [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"scipy.io.loadmat",
"numpy.square",
"numpy.transpose",
"numpy.isnan",
"numpy.linspace",
"numpy.squeeze",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots_adjust"
] | [((209, 252), 'scipy.io.loadmat', 'sio.loadmat', (['"""data/AVISO_uv_1993_24yrs.mat"""'], {}), "('data/AVISO_uv_1993_24yrs.mat')\n", (220, 252), True, 'import scipy.io as sio\n'), ((258, 297), 'numpy.transpose', 'np.transpose', (["data['u_gulf']", '(1, 0, 2)'], {}), "(data['u_gulf'], (1, 0, 2))\n", (270, 297), True, 'import numpy as np\n'), ((302, 341), 'numpy.transpose', 'np.transpose', (["data['v_gulf']", '(1, 0, 2)'], {}), "(data['v_gulf'], (1, 0, 2))\n", (314, 341), True, 'import numpy as np\n'), ((517, 545), 'numpy.squeeze', 'np.squeeze', (['speed[:, :, 100]'], {}), '(speed[:, :, 100])\n', (527, 545), True, 'import numpy as np\n'), ((676, 697), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (687, 697), True, 'import numpy as np\n'), ((902, 946), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, sharex=True, sharey=True)\n', (914, 946), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2998), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0.15)'}), '(wspace=0, hspace=0.15)\n', (2975, 2998), True, 'import matplotlib.pyplot as plt\n'), ((3001, 3032), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.88)'}), '(right=0.88)\n', (3020, 3032), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3305), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""gulfStreamUV_snapshots.png"""'], {'format': '"""png"""', 'dpi': '(600)'}), "('gulfStreamUV_snapshots.png', format='png', dpi=600)\n", (3252, 3305), True, 'import matplotlib.pyplot as plt\n'), ((3309, 3319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3317, 3319), True, 'import matplotlib.pyplot as plt\n'), ((354, 380), 'numpy.linspace', 'np.linspace', (['(-80)', '(-50)', '(121)'], {}), '(-80, -50, 121)\n', (365, 380), True, 'import numpy as np\n'), ((384, 407), 'numpy.linspace', 'np.linspace', (['(30)', '(50)', '(81)'], {}), '(30, 50, 81)\n', (395, 407), True, 'import numpy as np\n'), ((594, 612), 'numpy.isnan', 'np.isnan', (['landMask'], {}), '(landMask)\n', (602, 612), True, 'import numpy as np\n'), ((428, 443), 'numpy.square', 'np.square', (['u', 'u'], {}), '(u, u)\n', (437, 443), True, 'import numpy as np\n'), ((448, 463), 'numpy.square', 'np.square', (['v', 'v'], {}), '(v, v)\n', (457, 463), True, 'import numpy as np\n'), ((557, 575), 'numpy.isnan', 'np.isnan', (['landMask'], {}), '(landMask)\n', (565, 575), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import os
from skimage import morphology
from matplotlib import pyplot as plt
for index, path in enumerate(os.listdir('./test')):
img = cv2.imread(os.path.join('./test', path))
img1 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
ret, threshold = cv2.threshold(src=img1, thresh=220, maxval=255, type=cv2.THRESH_BINARY)
median = cv2.medianBlur(threshold, 5)
kernel = np.ones((2, 2), dtype=np.uint8)
dilation = cv2.dilate(src=median, kernel=kernel, iterations=1)
binarized = np.where(dilation > 0.1, 1, 0)
processed = morphology.remove_small_objects(binarized.astype(bool), min_size=50, connectivity=1).astype(int)
mask_x, mask_y = np.where(processed == 0)
dilation[mask_x, mask_y] = 0
plt.subplot(3, 4, index+1)
plt.imshow(dilation, cmap='gray')
# 将原图中的目标区域覆盖到mask上
img_fg = cv2.bitwise_and(img, img, mask=dilation)
# plt.imshow(img_fg)
plt.show()
| [
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.cvtColor",
"cv2.dilate",
"cv2.threshold",
"matplotlib.pyplot.imshow",
"numpy.ones",
"numpy.where",
"os.path.join",
"os.listdir"
] | [((969, 979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (977, 979), True, 'from matplotlib import pyplot as plt\n'), ((138, 158), 'os.listdir', 'os.listdir', (['"""./test"""'], {}), "('./test')\n", (148, 158), False, 'import os\n'), ((223, 259), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (235, 259), False, 'import cv2\n'), ((271, 309), 'cv2.cvtColor', 'cv2.cvtColor', (['img1', 'cv2.COLOR_RGB2GRAY'], {}), '(img1, cv2.COLOR_RGB2GRAY)\n', (283, 309), False, 'import cv2\n'), ((331, 402), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'img1', 'thresh': '(220)', 'maxval': '(255)', 'type': 'cv2.THRESH_BINARY'}), '(src=img1, thresh=220, maxval=255, type=cv2.THRESH_BINARY)\n', (344, 402), False, 'import cv2\n'), ((416, 444), 'cv2.medianBlur', 'cv2.medianBlur', (['threshold', '(5)'], {}), '(threshold, 5)\n', (430, 444), False, 'import cv2\n'), ((458, 489), 'numpy.ones', 'np.ones', (['(2, 2)'], {'dtype': 'np.uint8'}), '((2, 2), dtype=np.uint8)\n', (465, 489), True, 'import numpy as np\n'), ((505, 556), 'cv2.dilate', 'cv2.dilate', ([], {'src': 'median', 'kernel': 'kernel', 'iterations': '(1)'}), '(src=median, kernel=kernel, iterations=1)\n', (515, 556), False, 'import cv2\n'), ((573, 603), 'numpy.where', 'np.where', (['(dilation > 0.1)', '(1)', '(0)'], {}), '(dilation > 0.1, 1, 0)\n', (581, 603), True, 'import numpy as np\n'), ((738, 762), 'numpy.where', 'np.where', (['(processed == 0)'], {}), '(processed == 0)\n', (746, 762), True, 'import numpy as np\n'), ((801, 829), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(index + 1)'], {}), '(3, 4, index + 1)\n', (812, 829), True, 'from matplotlib import pyplot as plt\n'), ((832, 865), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dilation'], {'cmap': '"""gray"""'}), "(dilation, cmap='gray')\n", (842, 865), True, 'from matplotlib import pyplot as plt\n'), ((903, 943), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'dilation'}), '(img, img, mask=dilation)\n', (918, 943), False, 'import cv2\n'), ((182, 210), 'os.path.join', 'os.path.join', (['"""./test"""', 'path'], {}), "('./test', path)\n", (194, 210), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so.
import bisect
import numpy as np
import xarray as xr
from ..pyrf import resample, avg_4sc, time_clip, wavelet
def fk_power_spectrum_4sc(e, r, b, tints, cav: int = 8, num_k: int = 500,
num_f: int = 200, df: float = None,
w_width: int = 1, f_range: list = None):
"""Calculates the frequency-wave number power spectrum using the four
MMS spacecraft. Uses a generalization of mms.fk_powerspectrum. Wavelet
based cross-spectral analysis is used to calculate the phase difference
each spacecraft pair and determine 3D wave vector. A generalization of
the method used in mms.fk_powerspectrum to four point measurements.
Parameters
----------
e : list of xarray.DataArray
Fields to apply 4SC cross-spectral analysis to. e.g., e or b fields
(if multiple components only the first is used).
r : list of xarray.DataArray
Positions of the four spacecraft.
b : list of xarray.DataArray
background magnetic field in the same coordinates as r. Used to
determine the parallel and perpendicular wave numbers using 4SC average.
tints : list of str
Time interval over which the power spectrum is calculated. To avoid
boundary effects use a longer time interval for e and b.
cav : int, Optional
Number of points in time series used to estimate phase.
Default ``cav`` = 8.
num_k : int, Optional
Number of wave numbers used in spectrogram. Default ``num_k`` = 500.
df : float, Optional
Linear spacing of frequencies. Default ``df`` = None (log spacing).
num_f : int, Optional
Number of frequencies used in spectrogram. Default ``num_f`` = 200.
w_width : float, Optional
Multiplier for Morlet wavelet width. Default ``w_width`` = 1.
f_range : list of float, Optional
Frequency range for k-k plots. [minf maxf].
Returns
-------
out : xarray.Dataset
Dataset of array of powers as a function of frequency and
wavenumber. Power is normalized to the maximum value.
Notes
-----
Wavelength must be larger than twice the spacecraft separations,
otherwise spatial aliasing will occur.
Examples
--------
>>> from pyrfu.mms import get_data, fk_power_spectrum_4sc
>>> from pyrfu.pyrf import extend_tint, convert_fac
Load data
>>> tint = ["2015-10-16T13:05:24.00", "2015-10-16T13:05:50.000"]
>>> ic = range(1, 5)
>>> b_fgm_mms = [get_data("b_gse_fgm_brst_l2", tint, i) for i in ic]
>>> b_scm_mms = [get_data("b_gse_scm_brst_l2", tint, i) for i in ic]
Load spacecraft position
>>> tint_long = extend_tint(tint, [-60, 60])
>>> r_gse_mms = [get_data("r_gse", tint_long, i) for i in range(1, 5)]
Convert magnetic field fluctuations to field aligned coordinates
>>> b_scm_fac = [convert_fac(b_scm, b_fgm) for b_scm, b_fgm in zip(b_scm_mms, b_fgm_mms)]
>>> b_scm_par = [b_scm[:, 0] for b_scm in b_scm_fac]
Compute dispersion relation
>>> tint = ["2015-10-16T13:05:26.500", "2015-10-16T13:05:27.000"]
>>> pwer = fk_power_spectrum_4sc(b_scm_par, r_gse_mms, b_fgm_mms, tint, 4,
... 500, 2, 10, 2)
"""
ic = np.arange(1, 5)
e = [resample(e[i - 1], e[0]) for i in ic]
r = [resample(r[i - 1], e[0]) for i in ic]
b = [resample(b[i - 1], e[0]) for i in ic]
b_avg = avg_4sc(b)
times = e[0].time
use_linear = df is not None
idx = time_clip(e[0].time, tints)
# If odd, remove last data point (as is done in irf_wavelet)
if len(idx) % 2:
idx = idx[:-1]
if use_linear:
cwt_options = dict(linear=df, returnpower=False,
wavelet_width=5.36 * w_width)
else:
cwt_options = dict(nf=num_f, returnpower=False,
wavelet_width=5.36 * w_width)
w = [wavelet(e[i], **cwt_options) for i in range(4)]
num_f = len(w[0].frequency)
times = time_clip(times, tints)
nt = len(times)
w = [time_clip(w[i], tints) for i in range(4)]
fk_power = 0
for i in range(4):
fk_power += w[i].data * np.conj(w[i].data) / 4
n = int(np.floor(nt/cav)-1)
pos_av = cav / 2 + np.arange(n) * cav
av_times = times[pos_av.astype(int)]
b_avg = resample(b_avg, av_times)
r = [resample(r[i], av_times) for i in range(4)]
cx12, cx13, cx14 = [np.zeros((n, num_f), dtype="complex128") for _ in range(3)]
cx23, cx24, cx34 = [np.zeros((n, num_f), dtype="complex128") for _ in range(3)]
power_avg = np.zeros((n, num_f), dtype="complex128")
for m, pos_avm in enumerate(pos_av):
lb, ub = [int(pos_avm - cav / 2 + 1), int(pos_avm + cav / 2)]
cx12[m, :] = np.nanmean(w[0].data[lb:ub, :] * np.conj(w[1].data[lb:ub, :]), axis=0)
cx13[m, :] = np.nanmean(w[0].data[lb:ub, :] * np.conj(w[2].data[lb:ub, :]), axis=0)
cx14[m, :] = np.nanmean(w[0].data[lb:ub, :] * np.conj(w[3].data[lb:ub, :]), axis=0)
cx23[m, :] = np.nanmean(w[1].data[lb:ub, :] * np.conj(w[2].data[lb:ub, :]), axis=0)
cx24[m, :] = np.nanmean(w[1].data[lb:ub, :] * np.conj(w[3].data[lb:ub, :]), axis=0)
cx34[m, :] = np.nanmean(w[2].data[lb:ub, :] * np.conj(w[3].data[lb:ub, :]), axis=0)
power_avg[m, :] = np.nanmean(fk_power[lb:ub, :], axis=0)
# Compute phase differences between each spacecraft pair
th12 = np.arctan2(np.imag(cx12), np.real(cx12))
th13 = np.arctan2(np.imag(cx13), np.real(cx13))
th14 = np.arctan2(np.imag(cx14), np.real(cx14))
th23 = np.arctan2(np.imag(cx23), np.real(cx23))
th24 = np.arctan2(np.imag(cx24), np.real(cx24))
th34 = np.arctan2(np.imag(cx34), np.real(cx34))
w_mat = 2 * np.pi * np.tile(w[0].frequency.data, (n, 1))
# Convert phase difference to time delay
dt12, dt13, dt14, dt23, dt24, dt34 = [th / w_mat for th in [th12, th13, th14, th23, th24, th34]]
# Weighted averaged time delay using all spacecraft pairs
dt2 = 0.5 * dt12 + 0.2 * (dt13 - dt23) + 0.2 * (dt14 - dt24) + 0.1 * (dt14 - dt34 - dt23)
dt3 = 0.5 * dt13 + 0.2 * (dt12 + dt23) + 0.2 * (dt14 - dt34) + 0.1 * (dt12 + dt24 - dt34)
dt4 = 0.5 * dt14 + 0.2 * (dt12 + dt24) + 0.2 * (dt13 + dt34) + 0.1 * (dt12 + dt23 + dt34)
# Compute phase speeds
r = [r[i].data for i in range(4)]
k_x, k_y, k_z = [np.zeros((n, num_f)) for _ in range(3)]
# Volumetric tensor with SC1 as center.
dr = np.reshape(np.hstack(r[1:]), (n, 3, 3)) - np.reshape(np.tile(r[0], (1, 3)), (n, 3, 3))
dr = np.transpose(dr, [0, 2, 1])
# Delay tensor with SC1 as center.
# dT = np.reshape(np.hstack([dt2,dt3,dt4]),(N,num_f,3))
tau = np.dstack([dt2, dt3, dt4])
for ii in range(num_f):
m = np.linalg.solve(dr, np.squeeze(tau[:, ii, :]))
k_x[:, ii] = 2 * np.pi * w[0].frequency[ii].data * m[:, 0]
k_y[:, ii] = 2 * np.pi * w[0].frequency[ii].data * m[:, 1]
k_z[:, ii] = 2 * np.pi * w[0].frequency[ii].data * m[:, 2]
k_x, k_y, k_z = [k / 1e3 for k in [k_x, k_y, k_z]]
k_mag = np.linalg.norm(np.array([k_x, k_y, k_z]), axis=0)
b_avg_x_mat = np.tile(b_avg.data[:, 0], (num_f, 1)).T
b_avg_y_mat = np.tile(b_avg.data[:, 1], (num_f, 1)).T
b_avg_z_mat = np.tile(b_avg.data[:, 2], (num_f, 1)).T
b_avg_abs = np.linalg.norm(b_avg, axis=1)
b_avg_abs_mat = np.tile(b_avg_abs, (num_f, 1)).T
k_par = (k_x * b_avg_x_mat + k_y * b_avg_y_mat + k_z * b_avg_z_mat) / b_avg_abs_mat
k_perp = np.sqrt(k_mag ** 2 - k_par ** 2)
k_max = np.max(k_mag) * 1.1
k_min = -k_max
k_vec = np.linspace(-k_max, k_max, num_k)
k_mag_vec = np.linspace(0, k_max, num_k)
dk_mag = k_max / num_k
dk = 2 * k_max / num_k
# Sort power into frequency and wave vector
print("notice : Computing power versus kx,f; ky,f, kz,f")
power_k_x_f, power_k_y_f, power_k_z_f = [np.zeros((num_f, num_k)) for _ in range(3)]
power_k_mag_f = np.zeros((num_f, num_k))
for nn in range(num_f):
k_x_number = np.floor((k_x[:, nn] - k_min) / dk).astype(int)
k_y_number = np.floor((k_y[:, nn] - k_min) / dk).astype(int)
k_z_number = np.floor((k_z[:, nn] - k_min) / dk).astype(int)
k_number = np.floor((k_mag[:, nn]) / dk_mag).astype(int)
power_k_x_f[nn, k_x_number] += np.real(power_avg[:, nn])
power_k_y_f[nn, k_y_number] += np.real(power_avg[:, nn])
power_k_z_f[nn, k_z_number] += np.real(power_avg[:, nn])
power_k_mag_f[nn, k_number] += np.real(power_avg[:, nn])
# power_k_x_f[power_k_x_f == 0] = np.nan
# power_k_y_f[power_k_y_f == 0] = np.nan
# power_k_z_f[power_k_z_f == 0] = np.nan
# power_k_mag_f[power_k_mag_f == 0] = np.nan
power_k_x_f /= np.max(power_k_x_f)
power_k_y_f /= np.max(power_k_y_f)
power_k_z_f /= np.max(power_k_z_f)
power_k_mag_f /= np.max(power_k_mag_f)
# power_k_x_f[power_k_x_f < 1.0e-6] = 1e-6
# power_k_y_f[power_k_y_f < 1.0e-6] = 1e-6
# power_k_z_f[power_k_z_f < 1.0e-6] = 1e-6
# power_k_mag_f[power_k_mag_f < 1.0e-6] = 1e-6
frequencies = w[0].frequency.data
idx_f = np.arange(num_f)
if f_range is not None:
idx_min_freq = bisect.bisect_left(frequencies, np.min(f_range))
idx_max_freq = bisect.bisect_left(frequencies, np.max(f_range))
idx_f = idx_f[idx_min_freq:idx_max_freq]
print("notice : Computing power versus kx,ky; kx,kz; ky,kz\n")
power_k_x_k_y = np.zeros((num_k, num_k))
power_k_x_k_z = np.zeros((num_k, num_k))
power_k_y_k_z = np.zeros((num_k, num_k))
power_k_perp_k_par = np.zeros((num_k, num_k))
for nn in idx_f:
k_x_number = np.floor((k_x[:, nn] - k_min) / dk).astype(int)
k_y_number = np.floor((k_y[:, nn] - k_min) / dk).astype(int)
k_z_number = np.floor((k_z[:, nn] - k_min) / dk).astype(int)
k_par_number = np.floor((k_par[:, nn] - k_min) / dk).astype(int)
k_perp_number = np.floor((k_perp[:, nn]) / dk_mag).astype(int)
power_k_x_k_y[k_y_number, k_x_number] += np.real(power_avg[:, nn])
power_k_x_k_z[k_z_number, k_x_number] += np.real(power_avg[:, nn])
power_k_y_k_z[k_z_number, k_y_number] += np.real(power_avg[:, nn])
power_k_perp_k_par[k_par_number, k_perp_number] += np.real(power_avg[:, nn])
# power_k_x_k_y[power_k_x_k_y == 0] = np.nan
# power_k_x_k_z[power_k_x_k_z == 0] = np.nan
# power_k_y_k_z[power_k_y_k_z == 0] = np.nan
# power_k_perp_k_par[power_k_perp_k_par == 0] = np.nan
power_k_x_k_y /= np.max(power_k_x_k_y)
power_k_x_k_z /= np.max(power_k_x_k_z)
power_k_y_k_z /= np.max(power_k_y_k_z)
power_k_perp_k_par /= np.max(power_k_perp_k_par)
# power_k_x_k_y(power_k_x_k_y < 1.0e-6) = 1e-6
# power_k_x_k_z(power_k_x_k_z < 1.0e-6) = 1e-6
# power_k_y_k_z(power_k_y_k_z < 1.0e-6) = 1e-6
# power_k_perp_k_par[power_k_perp_k_par < 1.0e-6] = 1e-6
out_dict = {"k_x_f": (["k_x", "f"], power_k_x_f.T), "k_y_f": (["k_x", "f"], power_k_y_f.T),
"k_z_f": (["k_x", "f"], power_k_z_f.T),
"k_mag_f": (["k_mag", "f"], power_k_mag_f.T),
"k_x_k_y": (["k_x", "k_y"], power_k_x_k_y.T),
"k_x_k_z": (["kx", "kz"], power_k_x_k_z.T),
"k_y_k_z": (["k_y", "k_z"], power_k_y_k_z.T),
"k_perp_k_par": (["k_perp", "k_par"], power_k_perp_k_par.T),
"k_x": k_vec, "k_y": k_vec, "k_z": k_vec, "k_mag": k_mag_vec, "k_perp": k_mag_vec,
"k_par": k_vec,
"f": frequencies}
out = xr.Dataset(out_dict)
return out
| [
"numpy.floor",
"numpy.imag",
"numpy.linalg.norm",
"numpy.arange",
"numpy.tile",
"numpy.nanmean",
"numpy.transpose",
"numpy.max",
"numpy.linspace",
"numpy.real",
"numpy.dstack",
"numpy.conj",
"xarray.Dataset",
"numpy.hstack",
"numpy.min",
"numpy.squeeze",
"numpy.zeros",
"numpy.array... | [((3773, 3788), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (3782, 3788), True, 'import numpy as np\n'), ((5107, 5147), 'numpy.zeros', 'np.zeros', (['(n, num_f)'], {'dtype': '"""complex128"""'}), "((n, num_f), dtype='complex128')\n", (5115, 5147), True, 'import numpy as np\n'), ((7085, 7112), 'numpy.transpose', 'np.transpose', (['dr', '[0, 2, 1]'], {}), '(dr, [0, 2, 1])\n', (7097, 7112), True, 'import numpy as np\n'), ((7223, 7249), 'numpy.dstack', 'np.dstack', (['[dt2, dt3, dt4]'], {}), '([dt2, dt3, dt4])\n', (7232, 7249), True, 'import numpy as np\n'), ((7851, 7880), 'numpy.linalg.norm', 'np.linalg.norm', (['b_avg'], {'axis': '(1)'}), '(b_avg, axis=1)\n', (7865, 7880), True, 'import numpy as np\n'), ((8036, 8068), 'numpy.sqrt', 'np.sqrt', (['(k_mag ** 2 - k_par ** 2)'], {}), '(k_mag ** 2 - k_par ** 2)\n', (8043, 8068), True, 'import numpy as np\n'), ((8133, 8166), 'numpy.linspace', 'np.linspace', (['(-k_max)', 'k_max', 'num_k'], {}), '(-k_max, k_max, num_k)\n', (8144, 8166), True, 'import numpy as np\n'), ((8183, 8211), 'numpy.linspace', 'np.linspace', (['(0)', 'k_max', 'num_k'], {}), '(0, k_max, num_k)\n', (8194, 8211), True, 'import numpy as np\n'), ((8487, 8511), 'numpy.zeros', 'np.zeros', (['(num_f, num_k)'], {}), '((num_f, num_k))\n', (8495, 8511), True, 'import numpy as np\n'), ((9283, 9302), 'numpy.max', 'np.max', (['power_k_x_f'], {}), '(power_k_x_f)\n', (9289, 9302), True, 'import numpy as np\n'), ((9322, 9341), 'numpy.max', 'np.max', (['power_k_y_f'], {}), '(power_k_y_f)\n', (9328, 9341), True, 'import numpy as np\n'), ((9361, 9380), 'numpy.max', 'np.max', (['power_k_z_f'], {}), '(power_k_z_f)\n', (9367, 9380), True, 'import numpy as np\n'), ((9402, 9423), 'numpy.max', 'np.max', (['power_k_mag_f'], {}), '(power_k_mag_f)\n', (9408, 9423), True, 'import numpy as np\n'), ((9675, 9691), 'numpy.arange', 'np.arange', (['num_f'], {}), '(num_f)\n', (9684, 9691), True, 'import numpy as np\n'), ((10002, 10026), 'numpy.zeros', 'np.zeros', (['(num_k, num_k)'], {}), '((num_k, num_k))\n', (10010, 10026), True, 'import numpy as np\n'), ((10047, 10071), 'numpy.zeros', 'np.zeros', (['(num_k, num_k)'], {}), '((num_k, num_k))\n', (10055, 10071), True, 'import numpy as np\n'), ((10092, 10116), 'numpy.zeros', 'np.zeros', (['(num_k, num_k)'], {}), '((num_k, num_k))\n', (10100, 10116), True, 'import numpy as np\n'), ((10142, 10166), 'numpy.zeros', 'np.zeros', (['(num_k, num_k)'], {}), '((num_k, num_k))\n', (10150, 10166), True, 'import numpy as np\n'), ((11082, 11103), 'numpy.max', 'np.max', (['power_k_x_k_y'], {}), '(power_k_x_k_y)\n', (11088, 11103), True, 'import numpy as np\n'), ((11125, 11146), 'numpy.max', 'np.max', (['power_k_x_k_z'], {}), '(power_k_x_k_z)\n', (11131, 11146), True, 'import numpy as np\n'), ((11168, 11189), 'numpy.max', 'np.max', (['power_k_y_k_z'], {}), '(power_k_y_k_z)\n', (11174, 11189), True, 'import numpy as np\n'), ((11216, 11242), 'numpy.max', 'np.max', (['power_k_perp_k_par'], {}), '(power_k_perp_k_par)\n', (11222, 11242), True, 'import numpy as np\n'), ((12123, 12143), 'xarray.Dataset', 'xr.Dataset', (['out_dict'], {}), '(out_dict)\n', (12133, 12143), True, 'import xarray as xr\n'), ((4946, 4986), 'numpy.zeros', 'np.zeros', (['(n, num_f)'], {'dtype': '"""complex128"""'}), "((n, num_f), dtype='complex128')\n", (4954, 4986), True, 'import numpy as np\n'), ((5030, 5070), 'numpy.zeros', 'np.zeros', (['(n, num_f)'], {'dtype': '"""complex128"""'}), "((n, num_f), dtype='complex128')\n", (5038, 5070), True, 'import numpy as np\n'), ((5840, 5878), 'numpy.nanmean', 'np.nanmean', (['fk_power[lb:ub, :]'], {'axis': '(0)'}), '(fk_power[lb:ub, :], axis=0)\n', (5850, 5878), True, 'import numpy as np\n'), ((5963, 5976), 'numpy.imag', 'np.imag', (['cx12'], {}), '(cx12)\n', (5970, 5976), True, 'import numpy as np\n'), ((5978, 5991), 'numpy.real', 'np.real', (['cx12'], {}), '(cx12)\n', (5985, 5991), True, 'import numpy as np\n'), ((6015, 6028), 'numpy.imag', 'np.imag', (['cx13'], {}), '(cx13)\n', (6022, 6028), True, 'import numpy as np\n'), ((6030, 6043), 'numpy.real', 'np.real', (['cx13'], {}), '(cx13)\n', (6037, 6043), True, 'import numpy as np\n'), ((6067, 6080), 'numpy.imag', 'np.imag', (['cx14'], {}), '(cx14)\n', (6074, 6080), True, 'import numpy as np\n'), ((6082, 6095), 'numpy.real', 'np.real', (['cx14'], {}), '(cx14)\n', (6089, 6095), True, 'import numpy as np\n'), ((6119, 6132), 'numpy.imag', 'np.imag', (['cx23'], {}), '(cx23)\n', (6126, 6132), True, 'import numpy as np\n'), ((6134, 6147), 'numpy.real', 'np.real', (['cx23'], {}), '(cx23)\n', (6141, 6147), True, 'import numpy as np\n'), ((6171, 6184), 'numpy.imag', 'np.imag', (['cx24'], {}), '(cx24)\n', (6178, 6184), True, 'import numpy as np\n'), ((6186, 6199), 'numpy.real', 'np.real', (['cx24'], {}), '(cx24)\n', (6193, 6199), True, 'import numpy as np\n'), ((6223, 6236), 'numpy.imag', 'np.imag', (['cx34'], {}), '(cx34)\n', (6230, 6236), True, 'import numpy as np\n'), ((6238, 6251), 'numpy.real', 'np.real', (['cx34'], {}), '(cx34)\n', (6245, 6251), True, 'import numpy as np\n'), ((6278, 6314), 'numpy.tile', 'np.tile', (['w[0].frequency.data', '(n, 1)'], {}), '(w[0].frequency.data, (n, 1))\n', (6285, 6314), True, 'import numpy as np\n'), ((6895, 6915), 'numpy.zeros', 'np.zeros', (['(n, num_f)'], {}), '((n, num_f))\n', (6903, 6915), True, 'import numpy as np\n'), ((7624, 7649), 'numpy.array', 'np.array', (['[k_x, k_y, k_z]'], {}), '([k_x, k_y, k_z])\n', (7632, 7649), True, 'import numpy as np\n'), ((7678, 7715), 'numpy.tile', 'np.tile', (['b_avg.data[:, 0]', '(num_f, 1)'], {}), '(b_avg.data[:, 0], (num_f, 1))\n', (7685, 7715), True, 'import numpy as np\n'), ((7736, 7773), 'numpy.tile', 'np.tile', (['b_avg.data[:, 1]', '(num_f, 1)'], {}), '(b_avg.data[:, 1], (num_f, 1))\n', (7743, 7773), True, 'import numpy as np\n'), ((7794, 7831), 'numpy.tile', 'np.tile', (['b_avg.data[:, 2]', '(num_f, 1)'], {}), '(b_avg.data[:, 2], (num_f, 1))\n', (7801, 7831), True, 'import numpy as np\n'), ((7901, 7931), 'numpy.tile', 'np.tile', (['b_avg_abs', '(num_f, 1)'], {}), '(b_avg_abs, (num_f, 1))\n', (7908, 7931), True, 'import numpy as np\n'), ((8082, 8095), 'numpy.max', 'np.max', (['k_mag'], {}), '(k_mag)\n', (8088, 8095), True, 'import numpy as np\n'), ((8423, 8447), 'numpy.zeros', 'np.zeros', (['(num_f, num_k)'], {}), '((num_f, num_k))\n', (8431, 8447), True, 'import numpy as np\n'), ((8853, 8878), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (8860, 8878), True, 'import numpy as np\n'), ((8918, 8943), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (8925, 8943), True, 'import numpy as np\n'), ((8983, 9008), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (8990, 9008), True, 'import numpy as np\n'), ((9049, 9074), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (9056, 9074), True, 'import numpy as np\n'), ((10591, 10616), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (10598, 10616), True, 'import numpy as np\n'), ((10666, 10691), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (10673, 10691), True, 'import numpy as np\n'), ((10741, 10766), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (10748, 10766), True, 'import numpy as np\n'), ((10827, 10852), 'numpy.real', 'np.real', (['power_avg[:, nn]'], {}), '(power_avg[:, nn])\n', (10834, 10852), True, 'import numpy as np\n'), ((4725, 4743), 'numpy.floor', 'np.floor', (['(nt / cav)'], {}), '(nt / cav)\n', (4733, 4743), True, 'import numpy as np\n'), ((4768, 4780), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4777, 4780), True, 'import numpy as np\n'), ((7000, 7016), 'numpy.hstack', 'np.hstack', (['r[1:]'], {}), '(r[1:])\n', (7009, 7016), True, 'import numpy as np\n'), ((7042, 7063), 'numpy.tile', 'np.tile', (['r[0]', '(1, 3)'], {}), '(r[0], (1, 3))\n', (7049, 7063), True, 'import numpy as np\n'), ((7311, 7336), 'numpy.squeeze', 'np.squeeze', (['tau[:, ii, :]'], {}), '(tau[:, ii, :])\n', (7321, 7336), True, 'import numpy as np\n'), ((9776, 9791), 'numpy.min', 'np.min', (['f_range'], {}), '(f_range)\n', (9782, 9791), True, 'import numpy as np\n'), ((9848, 9863), 'numpy.max', 'np.max', (['f_range'], {}), '(f_range)\n', (9854, 9863), True, 'import numpy as np\n'), ((4689, 4707), 'numpy.conj', 'np.conj', (['w[i].data'], {}), '(w[i].data)\n', (4696, 4707), True, 'import numpy as np\n'), ((5315, 5343), 'numpy.conj', 'np.conj', (['w[1].data[lb:ub, :]'], {}), '(w[1].data[lb:ub, :])\n', (5322, 5343), True, 'import numpy as np\n'), ((5407, 5435), 'numpy.conj', 'np.conj', (['w[2].data[lb:ub, :]'], {}), '(w[2].data[lb:ub, :])\n', (5414, 5435), True, 'import numpy as np\n'), ((5499, 5527), 'numpy.conj', 'np.conj', (['w[3].data[lb:ub, :]'], {}), '(w[3].data[lb:ub, :])\n', (5506, 5527), True, 'import numpy as np\n'), ((5591, 5619), 'numpy.conj', 'np.conj', (['w[2].data[lb:ub, :]'], {}), '(w[2].data[lb:ub, :])\n', (5598, 5619), True, 'import numpy as np\n'), ((5683, 5711), 'numpy.conj', 'np.conj', (['w[3].data[lb:ub, :]'], {}), '(w[3].data[lb:ub, :])\n', (5690, 5711), True, 'import numpy as np\n'), ((5775, 5803), 'numpy.conj', 'np.conj', (['w[3].data[lb:ub, :]'], {}), '(w[3].data[lb:ub, :])\n', (5782, 5803), True, 'import numpy as np\n'), ((8562, 8597), 'numpy.floor', 'np.floor', (['((k_x[:, nn] - k_min) / dk)'], {}), '((k_x[:, nn] - k_min) / dk)\n', (8570, 8597), True, 'import numpy as np\n'), ((8631, 8666), 'numpy.floor', 'np.floor', (['((k_y[:, nn] - k_min) / dk)'], {}), '((k_y[:, nn] - k_min) / dk)\n', (8639, 8666), True, 'import numpy as np\n'), ((8700, 8735), 'numpy.floor', 'np.floor', (['((k_z[:, nn] - k_min) / dk)'], {}), '((k_z[:, nn] - k_min) / dk)\n', (8708, 8735), True, 'import numpy as np\n'), ((8767, 8798), 'numpy.floor', 'np.floor', (['(k_mag[:, nn] / dk_mag)'], {}), '(k_mag[:, nn] / dk_mag)\n', (8775, 8798), True, 'import numpy as np\n'), ((10210, 10245), 'numpy.floor', 'np.floor', (['((k_x[:, nn] - k_min) / dk)'], {}), '((k_x[:, nn] - k_min) / dk)\n', (10218, 10245), True, 'import numpy as np\n'), ((10279, 10314), 'numpy.floor', 'np.floor', (['((k_y[:, nn] - k_min) / dk)'], {}), '((k_y[:, nn] - k_min) / dk)\n', (10287, 10314), True, 'import numpy as np\n'), ((10348, 10383), 'numpy.floor', 'np.floor', (['((k_z[:, nn] - k_min) / dk)'], {}), '((k_z[:, nn] - k_min) / dk)\n', (10356, 10383), True, 'import numpy as np\n'), ((10420, 10457), 'numpy.floor', 'np.floor', (['((k_par[:, nn] - k_min) / dk)'], {}), '((k_par[:, nn] - k_min) / dk)\n', (10428, 10457), True, 'import numpy as np\n'), ((10494, 10526), 'numpy.floor', 'np.floor', (['(k_perp[:, nn] / dk_mag)'], {}), '(k_perp[:, nn] / dk_mag)\n', (10502, 10526), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from doubtlab.reason import RelativeDifferenceReason
@pytest.mark.parametrize("t, s", [(0.05, 4), (0.2, 3), (0.4, 2), (0.6, 1)])
def test_from_predict(t, s):
"""Test `from_predict` on an obvious examples"""
y = np.array([1.0, 1.0, 1.0, 1.0, 1.0])
preds = np.array([1.0, 1.1, 1.3, 1.5, 1.7])
predicate = RelativeDifferenceReason.from_predict(pred=preds, y=y, threshold=t)
assert np.sum(predicate) == s
def test_zero_error():
"""Ensure error is throw when `y=0`"""
y = np.array([0.0])
preds = np.array([1.0])
with pytest.raises(ValueError):
RelativeDifferenceReason.from_predict(pred=preds, y=y, threshold=0.1)
| [
"numpy.sum",
"doubtlab.reason.RelativeDifferenceReason.from_predict",
"pytest.raises",
"numpy.array",
"pytest.mark.parametrize"
] | [((90, 164), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""t, s"""', '[(0.05, 4), (0.2, 3), (0.4, 2), (0.6, 1)]'], {}), "('t, s', [(0.05, 4), (0.2, 3), (0.4, 2), (0.6, 1)])\n", (113, 164), False, 'import pytest\n'), ((255, 290), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0, 1.0])\n', (263, 290), True, 'import numpy as np\n'), ((303, 338), 'numpy.array', 'np.array', (['[1.0, 1.1, 1.3, 1.5, 1.7]'], {}), '([1.0, 1.1, 1.3, 1.5, 1.7])\n', (311, 338), True, 'import numpy as np\n'), ((356, 423), 'doubtlab.reason.RelativeDifferenceReason.from_predict', 'RelativeDifferenceReason.from_predict', ([], {'pred': 'preds', 'y': 'y', 'threshold': 't'}), '(pred=preds, y=y, threshold=t)\n', (393, 423), False, 'from doubtlab.reason import RelativeDifferenceReason\n'), ((534, 549), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (542, 549), True, 'import numpy as np\n'), ((562, 577), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (570, 577), True, 'import numpy as np\n'), ((435, 452), 'numpy.sum', 'np.sum', (['predicate'], {}), '(predicate)\n', (441, 452), True, 'import numpy as np\n'), ((587, 612), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (600, 612), False, 'import pytest\n'), ((622, 691), 'doubtlab.reason.RelativeDifferenceReason.from_predict', 'RelativeDifferenceReason.from_predict', ([], {'pred': 'preds', 'y': 'y', 'threshold': '(0.1)'}), '(pred=preds, y=y, threshold=0.1)\n', (659, 691), False, 'from doubtlab.reason import RelativeDifferenceReason\n')] |
import pickle
import numpy as np
import matplotlib.pyplot as plt
def load_label_names():
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
# note the encoding type is 'latin1'
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch #{}:'.format(batch_id))
print('# of Samples: {}\n'.format(len(features)))
label_names = load_label_names()
label_counts = dict(zip(*np.unique(labels, return_counts=True)))
for key, value in label_counts.items():
print('Label Counts of [{}]({}) : {}'.format(key, label_names[key].upper(), value))
sample_image = features[sample_id]
sample_label = labels[sample_id]
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.imshow(sample_image) | [
"matplotlib.pyplot.imshow",
"pickle.load",
"numpy.unique"
] | [((1689, 1713), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sample_image'], {}), '(sample_image)\n', (1699, 1713), True, 'import matplotlib.pyplot as plt\n'), ((412, 448), 'pickle.load', 'pickle.load', (['file'], {'encoding': '"""latin1"""'}), "(file, encoding='latin1')\n", (423, 448), False, 'import pickle\n'), ((1119, 1156), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (1128, 1156), True, 'import numpy as np\n')] |
import numpy as np
from OpenGL.GL import *
from obj_loader import Obj
class Mesh:
def __init__(self, filename):
self.obj = Obj(filename)
self.verticeBufferId = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.verticeBufferId)
vertices = np.array(self.obj.plain_vertecies, dtype='float32')
glBufferData(GL_ARRAY_BUFFER, vertices, GL_STATIC_DRAW)
self.length = len(vertices) * 3
self.textureCoordBufferId = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.textureCoordBufferId)
coords = np.array(self.obj.plain_texcoords, dtype='float32')
glBufferData(GL_ARRAY_BUFFER, coords, GL_STATIC_DRAW)
| [
"numpy.array",
"obj_loader.Obj"
] | [((137, 150), 'obj_loader.Obj', 'Obj', (['filename'], {}), '(filename)\n', (140, 150), False, 'from obj_loader import Obj\n'), ((278, 329), 'numpy.array', 'np.array', (['self.obj.plain_vertecies'], {'dtype': '"""float32"""'}), "(self.obj.plain_vertecies, dtype='float32')\n", (286, 329), True, 'import numpy as np\n'), ((569, 620), 'numpy.array', 'np.array', (['self.obj.plain_texcoords'], {'dtype': '"""float32"""'}), "(self.obj.plain_texcoords, dtype='float32')\n", (577, 620), True, 'import numpy as np\n')] |
from numpy import random,zeros,ones,divide
from math import pow,sqrt
class Legendre:
def legendreIterativo(self,grau,x):
if grau == 0:
return 1.0
elif grau == 1:
return x
else:
legendre = 0
legendre0 = ones(x.shape)
legendre1 = x
for idx in range(2,grau+1):
legendre = ( (2 * idx - 1) / idx) * x * legendre1 - ((idx - 1) / idx) * legendre0
legendre0 = legendre1
legendre1 = legendre
return legendre
def geraCoeficientes(self,Qf):
coeficientes = random.randn(Qf+1)
k = 0
for l in range(Qf+1):
k = k + (pow(coeficientes[l],2) / (2 * l + 1))
return divide(coeficientes,sqrt(2*k))
def gerandoFuncaoAlvo(self,xArray,sigma,nlist,Qf,i,coeficientes):
eArray = random.randn(nlist[i])
yArray = zeros(nlist[i])
for l in range(nlist[i]):
valor = 0
for m in range(Qf+1):
valor = valor + (coeficientes[m] * self.legendreIterativo(m,xArray[l]))
yArray[l] = valor + (sqrt(sigma) * eArray[l])
return yArray
| [
"math.pow",
"math.sqrt",
"numpy.random.randn",
"numpy.zeros",
"numpy.ones"
] | [((620, 640), 'numpy.random.randn', 'random.randn', (['(Qf + 1)'], {}), '(Qf + 1)\n', (632, 640), False, 'from numpy import random, zeros, ones, divide\n'), ((876, 898), 'numpy.random.randn', 'random.randn', (['nlist[i]'], {}), '(nlist[i])\n', (888, 898), False, 'from numpy import random, zeros, ones, divide\n'), ((916, 931), 'numpy.zeros', 'zeros', (['nlist[i]'], {}), '(nlist[i])\n', (921, 931), False, 'from numpy import random, zeros, ones, divide\n'), ((777, 788), 'math.sqrt', 'sqrt', (['(2 * k)'], {}), '(2 * k)\n', (781, 788), False, 'from math import pow, sqrt\n'), ((279, 292), 'numpy.ones', 'ones', (['x.shape'], {}), '(x.shape)\n', (283, 292), False, 'from numpy import random, zeros, ones, divide\n'), ((704, 727), 'math.pow', 'pow', (['coeficientes[l]', '(2)'], {}), '(coeficientes[l], 2)\n', (707, 727), False, 'from math import pow, sqrt\n'), ((1143, 1154), 'math.sqrt', 'sqrt', (['sigma'], {}), '(sigma)\n', (1147, 1154), False, 'from math import pow, sqrt\n')] |
import os
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
import lip.directories as dirs
from lip import my_config
import lip.network.utils as utils
from lip.network import mnist
# setup
device = my_config.device
mode = 'mnist'
main_dir = dirs.mnist_dir
net = mnist.mnist_net()
net = net.to(device)
net.eval()
# load image
#filename = os.path.join(main_dir, '2.png')
filename = os.path.join(main_dir, '8.png')
x = Image.open(filename)
x = mnist.transform_test(x)
x = torch.unsqueeze(x, 0)
x = x.to(device)
layers = mnist.get_layers(net)
# evaluate the nominal input
y = net(x)
# gradient ascent
n_step = 30
step_size = 10**-1
ind = 4 # ind of output to ascend
xc = x
eps_step = np.full(n_step, np.nan)
lb_step = np.full(n_step, np.nan)
for i in range(n_step):
J = utils.jacobian(net, xc)
J0 = J[ind,:]
pert = J0.view(x.shape)
xc = xc + step_size*pert # "xc += pert" throws an error
yc = net(xc)
sm = nn.functional.softmax(yc, dim=1)
print('pred', sm[0,ind].item())
eps_step[i] = torch.norm(x - xc)
lb_step[i] = torch.norm(y - yc)/torch.norm(x - xc)
print('eps ', eps_step[i].item())
print('lb ', lb_step[i].item())
| [
"numpy.full",
"lip.network.utils.jacobian",
"torch.norm",
"lip.network.mnist.transform_test",
"PIL.Image.open",
"lip.network.mnist.get_layers",
"torch.nn.functional.softmax",
"torch.unsqueeze",
"os.path.join",
"lip.network.mnist.mnist_net"
] | [((292, 309), 'lip.network.mnist.mnist_net', 'mnist.mnist_net', ([], {}), '()\n', (307, 309), False, 'from lip.network import mnist\n'), ((411, 442), 'os.path.join', 'os.path.join', (['main_dir', '"""8.png"""'], {}), "(main_dir, '8.png')\n", (423, 442), False, 'import os\n'), ((447, 467), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (457, 467), False, 'from PIL import Image\n'), ((472, 495), 'lip.network.mnist.transform_test', 'mnist.transform_test', (['x'], {}), '(x)\n', (492, 495), False, 'from lip.network import mnist\n'), ((500, 521), 'torch.unsqueeze', 'torch.unsqueeze', (['x', '(0)'], {}), '(x, 0)\n', (515, 521), False, 'import torch\n'), ((549, 570), 'lip.network.mnist.get_layers', 'mnist.get_layers', (['net'], {}), '(net)\n', (565, 570), False, 'from lip.network import mnist\n'), ((714, 737), 'numpy.full', 'np.full', (['n_step', 'np.nan'], {}), '(n_step, np.nan)\n', (721, 737), True, 'import numpy as np\n'), ((748, 771), 'numpy.full', 'np.full', (['n_step', 'np.nan'], {}), '(n_step, np.nan)\n', (755, 771), True, 'import numpy as np\n'), ((804, 827), 'lip.network.utils.jacobian', 'utils.jacobian', (['net', 'xc'], {}), '(net, xc)\n', (818, 827), True, 'import lip.network.utils as utils\n'), ((960, 992), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['yc'], {'dim': '(1)'}), '(yc, dim=1)\n', (981, 992), True, 'import torch.nn as nn\n'), ((1047, 1065), 'torch.norm', 'torch.norm', (['(x - xc)'], {}), '(x - xc)\n', (1057, 1065), False, 'import torch\n'), ((1083, 1101), 'torch.norm', 'torch.norm', (['(y - yc)'], {}), '(y - yc)\n', (1093, 1101), False, 'import torch\n'), ((1102, 1120), 'torch.norm', 'torch.norm', (['(x - xc)'], {}), '(x - xc)\n', (1112, 1120), False, 'import torch\n')] |
from tkinter import *
from tkinter import ttk # 导入ttk模块,因为下拉菜单控件在ttk中
import numpy as np
from tkinter import messagebox
#窗口大小
root = Tk()
root.title("calculator")
root.geometry("360x180+600+200")
#统一组件宽度
width1_set = 10
#标签设置,第一排和第三排
label1 = Label(root,text = "应力")
label1.grid(row = 0,column = 1)
label2 = Label(root,text = "强度")
label2.grid(row = 2,column = 1)
label11 = Label(root,text = "参数1")
label11.grid(row = 0,column = 2)
label12 = Label(root,text = "参数2")
label12.grid(row = 0,column = 3)
label13 = Label(root,text = "参数3")
label13.grid(row = 0,column = 4)
label21 = Label(root,text = "参数1")
label21.grid(row = 2,column = 2)
label22 = Label(root,text = "参数2")
label22.grid(row = 2,column = 3)
label23 = Label(root,text = "参数3")
label23.grid(row = 2,column = 4)
#下拉框设置
choose_type1 = ttk.Combobox(root,textvariable=StringVar(),width = width1_set,state = 'readonly')
choose_type1["values"] = ("正态分布","对数正态分布","指数分布","威布尔分布")
choose_type1.grid(row = 1,column = 1)
choose_type1.current(0)
choose_type2 = ttk.Combobox(root,textvariable=StringVar(),width = width1_set,state = 'readonly')
choose_type2["values"] = ("正态分布","对数正态分布","指数分布","威布尔分布")
choose_type2.grid(row = 3,column = 1)
choose_type2.current(0)
#输入框和输出框
entry11 = Entry(root,width = width1_set,text = 0)
entry11.grid(row = 1,column = 2)
entry11.insert(0,"0")
entry12 = Entry(root,width = width1_set,text = 1)
entry12.grid(row = 1,column = 3)
entry12.insert(0,"1")
entry13 = Entry(root,width = width1_set)
entry13.grid(row = 1,column = 4)
entry21 = Entry(root,width = width1_set)
entry21.insert(0,"0")
entry21.grid(row = 3,column = 2)
entry22 = Entry(root,width = width1_set)
entry22.insert(0,"1")
entry22.grid(row = 3,column = 3)
entry23 = Entry(root,width = width1_set)
entry23.grid(row = 3,column = 4)
entry41 = Entry(root,width = width1_set,state = 'normal')
entry41.grid(row = 4,column = 2)
#根据下拉菜单调整标签以及输入框状态
def choosing_type1(event):
print("choosing type 1")
if choose_type1.get() == "正态分布":
label11["text"] = "均值μ"
label12["text"] = "方差σ^2"
label13["text"] = ""
entry12["state"] = "normal"
entry13["state"] = "disable"
elif choose_type1.get() == "对数正态分布":
label11["text"] = "均值μ"
label12["text"] = "方差σ^2"
label13["text"] = ""
entry12["state"] = "normal"
entry13["state"] = "disable"
elif choose_type1.get() == "指数分布":
label11["text"] = "参数λ"
label12["text"] = ""
label13["text"] = ""
entry12["state"] = "disable"
entry13["state"] = "disable"
elif choose_type1.get() == "威布尔分布":
label11["text"] = "形状参数k"
label12["text"] = "比例参数λ"
label13["text"] = ""
entry12["state"] = "normal"
entry13["state"] = "disable"
def choosing_type2(event):
print("choosing type 2")
if choose_type2.get() == "正态分布":
label21["text"] = "均值μ"
label22["text"] = "方差σ^2"
label23["text"] = ""
entry22["state"] = "normal"
entry23["state"] = "disable"
elif choose_type2.get() == "对数正态分布":
label21["text"] = "均值μ"
label22["text"] = "方差σ^2"
label23["text"] = ""
entry22["state"] = "normal"
entry23["state"] = "disable"
elif choose_type2.get() == "指数分布":
label21["text"] = "参数λ"
label22["text"] = ""
label23["text"] = ""
entry22["state"] = "disable"
entry23["state"] = "disable"
elif choose_type2.get() == "威布尔分布":
label21["text"] = "形状参数k"
label22["text"] = "比例参数λ"
label23["text"] = ""
entry22["state"] = "normal"
entry23["state"] = "disable"
#对自带的weibull函数重定义,加上尺度参数
def Nweibull(a,scale,size):
return scale*np.random.weibull(a,size)
#计算模块
def calculate():
print("calculate")
test_number = 1000
#获得四个输入框的值
a = entry11.get()
b = entry12.get()
m = entry21.get()
n = entry22.get()
#首先判断是否为空字符串,为空则弹出弹窗,不足之处是显得有点乱,应该函数封装下会比较好
if choose_type1.get() == "正态分布":
if (a == '')|(b == ''):
messagebox.showinfo('提示', '请输入正态分布的两个参数')
else:
list_of_stress = np.random.normal(float(a),float(b),test_number)
elif choose_type1.get() == "对数正态分布":
if (a == '')|(b == ''):
messagebox.showinfo('提示', '请输入对数正态分布的两个参数')
else:
list_of_stress = np.random.lognormal(float(a),float(b),test_number)
elif choose_type1.get() == "指数分布":
if (a == ''):
messagebox.showinfo('提示', '请输入指数分布的参数')
else:
list_of_stress = np.random.exponential(float(a),test_number)
elif choose_type1.get() == "威布尔分布":
if (a == '')|(b == ''):
messagebox.showinfo('提示', '请输入威布尔分布的两个参数')
else:
list_of_stress = Nweibull(float(a),float(b),test_number)
if choose_type2.get() == "正态分布":
if (m == '')|(n == ''):
messagebox.showinfo('提示', '请输入正态分布的两个参数')
else:
list_of_strength = np.random.normal(float(m),float(n),test_number)
elif choose_type2.get() == "对数正态分布":
if (m == '')|(n == ''):
messagebox.showinfo('提示', '请输入对数正态分布的两个参数')
else:
list_of_strength = np.random.lognormal(float(m),float(n),test_number)
elif choose_type2.get() == "指数分布":
if (m == ''):
messagebox.showinfo('提示', '请输入指数分布的参数')
else:
list_of_strength = np.random.exponential(float(m),test_number)
elif choose_type2.get() == "威布尔分布":
if (m == '')|( n == ''):
messagebox.showinfo('提示', '请输入威布尔分布的两个参数')
else:
list_of_strength = Nweibull(float(m),float(n),test_number)
#依次比较
j = 0
for i in range(test_number):
if list_of_strength[i] >= list_of_stress[i]:
j+=1
#输出概率
print(j/test_number)
entry41.delete(0,END)
entry41.insert(0,str(j/test_number))
#设置按键命令
button1 = Button(root,width = width1_set,text = "计算",command = calculate)
button1.grid(row = 4,column = 1)
#设置下拉框命令,调整标签
choose_type1.bind("<<ComboboxSelected>>",choosing_type1)
choose_type2.bind("<<ComboboxSelected>>",choosing_type2)
mainloop() | [
"numpy.random.weibull",
"tkinter.messagebox.showinfo"
] | [((3857, 3883), 'numpy.random.weibull', 'np.random.weibull', (['a', 'size'], {}), '(a, size)\n', (3874, 3883), True, 'import numpy as np\n'), ((4195, 4236), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入正态分布的两个参数"""'], {}), "('提示', '请输入正态分布的两个参数')\n", (4214, 4236), False, 'from tkinter import messagebox\n'), ((5066, 5107), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入正态分布的两个参数"""'], {}), "('提示', '请输入正态分布的两个参数')\n", (5085, 5107), False, 'from tkinter import messagebox\n'), ((4418, 4461), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入对数正态分布的两个参数"""'], {}), "('提示', '请输入对数正态分布的两个参数')\n", (4437, 4461), False, 'from tkinter import messagebox\n'), ((5291, 5334), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入对数正态分布的两个参数"""'], {}), "('提示', '请输入对数正态分布的两个参数')\n", (5310, 5334), False, 'from tkinter import messagebox\n'), ((4634, 4673), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入指数分布的参数"""'], {}), "('提示', '请输入指数分布的参数')\n", (4653, 4673), False, 'from tkinter import messagebox\n'), ((5509, 5548), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入指数分布的参数"""'], {}), "('提示', '请输入指数分布的参数')\n", (5528, 5548), False, 'from tkinter import messagebox\n'), ((4850, 4892), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入威布尔分布的两个参数"""'], {}), "('提示', '请输入威布尔分布的两个参数')\n", (4869, 4892), False, 'from tkinter import messagebox\n'), ((5728, 5770), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""提示"""', '"""请输入威布尔分布的两个参数"""'], {}), "('提示', '请输入威布尔分布的两个参数')\n", (5747, 5770), False, 'from tkinter import messagebox\n')] |
import numpy as np
import matplotlib.pyplot as plt
from random import choice
import matplotlib as mpl
class Layer:
"""
node - an int specifying the size of the layer
"""
def __init__(self, node = 1):
self.inputs = np.random.random_sample(node)
self.outputs = np.zeros(node)
class NeuralNet:
"""
layers - a list of ints specifying the shape of the layer at the corresponding index
"""
def __init__(self, layers):
#Ensure layers is an array
assert(type(layers) == type([]))
#Layers of neural network
self.layers = [Layer(x) for (x) in layers]
self.weights = [np.array([0])]
self.bias = [np.array([0])]
#Initialize random values for weights -- [0, 1) -- and zeros for biases
for i in range(len(layers) - 1):
self.weights.append(np.random.random_sample((layers[i + 1], layers[i])))
self.bias.append(np.zeros((self.weights[i + 1].shape[0], 1)))
#Propagate input values forward by multiplying by weights and adding bias
def forwardProp(self, xTrain):
self.layers[0].outputs = xTrain
for i in range(len(self.layers) - 1):
i += 1
prevLayer = self.layers[i - 1]
layer = self.layers[i]
layer.inputs = NeuralNet.weightedSum(self.weights[i], prevLayer.outputs, self.bias[i])
layer.outputs = NeuralNet.sigmoid(layer.inputs)
#Propagate error backwards to update weights
def backProp(self, xTrain, yTrain, learnRate):
#Get inputs and outputs for layers
self.forwardProp(xTrain)
#Derivative of cost function w.r.t. weighted input for each node
errorSignal = []
for i in reversed(range(len(self.layers) - 1)):
layer = self.layers[i + 1]
m,n,o = i, i + 1, i + 2
#Get error signal for last layer to propagate backwards
if i == (len(self.layers) - 2):
errorSignal.insert(0, NeuralNet.costDerWeightedSum(layer.outputs, yTrain, layer.inputs))
else:
weightsNext = self.weights[o]
errorSignal.insert(0, NeuralNet.errorSignalHidden(weightsNext, errorSignal[0], layer.inputs))
#Derivatives used to update bias and weights
prevLayerOut = self.layers[m].outputs
deltaBias = errorSignal[0]
deltaWeights = NeuralNet.costDerWeights(errorSignal[0], prevLayerOut)
#Update weights and biases for each layer
self.weights[n] = self.weights[n] - (learnRate * deltaWeights)
self.bias[n] = self.bias[n] - (learnRate * deltaBias)
#Train network
def train(self, xTrain, yTrain, epochs, learnRate):
for epoch in range(epochs):
for i in range(len(xTrain)):
i = choice(range(len(xTrain)))
input, output = np.array(xTrain[i]), np.array(yTrain[i])
self.backProp(input, output, learnRate)
#Predict output
def predict(self, input):
self.forwardProp(input)
return self.layers[-1].outputs
#Print output values of each node in each layer
def printNet(self):
for i in range(len(self.layers)):
print(str(i) + " : " + str(self.layers[i].outputs))
#Sigmoid activation function (1 / 1 + e^-x)
def sigmoid(x):
#Avoid overflow from following operation
x = np.array(x, dtype=np.float128)
return 1 / (1 + np.exp(-x))
#Derivative of the sigmoid function w.r.t. weight (f(x)(1 - f(x)))
def sigmoidDerWeight(x):
return NeuralNet.sigmoid(x) * (1 - NeuralNet.sigmoid(x))
#Weights * inputs + bias (W*I + b)
def weightedSum(weights, inputs, bias):
return weights.dot(inputs.reshape(weights.shape[-1])).reshape(bias.shape) + bias
#Derivative of cost function w.r.t. activation output (O - E)
def costDerActivation(output, expected):
return output - expected.reshape(output.shape)
#Derivative of cost function w.r.t. input sum from last layer ((O - E) * f(x)(1 - f(x))) - error signal for last layer
def costDerWeightedSum(output, expected, inputs):
temp = NeuralNet.costDerActivation(output, expected)
return temp * NeuralNet.sigmoidDerWeight(inputs).reshape(temp.shape)
#Derivative of cost w.r.t. weights
def costDerWeights(errorSignals, prevLayerOut):
return np.dot(errorSignals, np.atleast_2d(prevLayerOut.T))
#Get error signals for hidden layers
def errorSignalHidden(weightsNext, errorNext, inputs):
return weightsNext.T.dot(errorNext) * NeuralNet.sigmoidDerWeight(inputs)
#Error for training set
def error(self, inputs, expected):
cost = np.array([])
for i in range(len(inputs)):
outputs = np.array(self.layers[-1].outputs, dtype=np.float128)
self.forwardProp(inputs[i])
cost = np.append(cost, (outputs - expected[i]) ** 2)
return np.sum(cost) / 2
if __name__ == '__main__':
#2x2x1 neural network trained on all possible inputs of XOR function
network = NeuralNet([2, 2, 1])
xTrain, yTrain = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]), np.array([[0], [1], [1], [0]])
#After doing some research I found this to be a good starting point for
#the number of epochs and the learning rate
epochs = 2500
learnRate = 0.8
#Train for XOR function
network.train(xTrain, yTrain, epochs, learnRate)
x1 = np.linspace(0,1,11) * np.ones((11, 1))
x2 = x1.T
x1, x2 = x1.flatten(), x2.flatten()
out = np.array([])
#Sub plots
fig, axs = plt.subplots(2, 1, constrained_layout=True)
fig.suptitle('Neural Networks', fontsize=16)
#XOR Scatter plot
for x, y in zip(x1, x2):
out = np.append(out, network.predict(np.array([x, y])))
axs[0].set_title("XOR Function")
axs[0].set_xlabel("Input 1")
axs[0].set_ylabel("Input 2")
axs[0].scatter(x1, x2, c=out, s=10, cmap="gray_r")
#Random training data for X^2
x = 20 * (2 * np.random.random_sample(1000) - 1)
temp = 2 * np.random.random_sample(1000) - 1
y = (x + temp) ** 2
xTrain = np.vstack((x, y)).T
yTrain = np.array([])
#Labels for training data
for x in xTrain:
if (x[0] ** 2) > x[1]:
yTrain = np.append(yTrain, [1, 0])
else:
yTrain = np.append(yTrain, [0, 1])
yTrain = yTrain.reshape(1000, 2)
#After some experimentation I found this to be a fairly good architecture
network1 = NeuralNet([2,8,2])
#I found these values to produce a pretty good fit to the x^2 function
epochs = 2400
learnRates = [0.001 * (2 ** x) for x in range(4)]
i = 0
for learnRate in learnRates:
network1.train(xTrain, yTrain, epochs, learnRate)
#Test data to plot
x1 = np.arange(-10,10,0.25) * np.ones((80, 1))
x2 = x1.T
x1, x2 = x1.flatten(), x2.flatten()
out = np.array([])
for x, y in zip(x1, x2):
#Convert 2-d output to scalar and distribute values
out = np.append(out, np.std(network1.predict(np.array([x, y]))))
#Normalize output
min, max = out.min(), out.max()
out = (out - min) / (max - min)
if i == 0:
temp = out
i += 1
temp += out
#Get average value for 10 different learning rates
out = temp / len(learnRates)
#Graph
axs[1].set_title("X^2 Function")
axs[1].set_xlabel("X")
axs[1].set_ylabel("Y")
axs[1].scatter(x1, x2, c=out, s=5, cmap="Blues_r")
axs[1].set_xlim([-10, 10])
axs[1].set_ylim([-10, 10])
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.random.random_sample",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.append",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.exp",
"matplotlib.pyplot.subplots",
"numpy.vstack",
"numpy.atleast_2d"
] | [((5599, 5611), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5607, 5611), True, 'import numpy as np\n'), ((5643, 5686), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'constrained_layout': '(True)'}), '(2, 1, constrained_layout=True)\n', (5655, 5686), True, 'import matplotlib.pyplot as plt\n'), ((6223, 6235), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6231, 6235), True, 'import numpy as np\n'), ((7683, 7693), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7691, 7693), True, 'import matplotlib.pyplot as plt\n'), ((239, 268), 'numpy.random.random_sample', 'np.random.random_sample', (['node'], {}), '(node)\n', (262, 268), True, 'import numpy as np\n'), ((292, 306), 'numpy.zeros', 'np.zeros', (['node'], {}), '(node)\n', (300, 306), True, 'import numpy as np\n'), ((3428, 3458), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float128'}), '(x, dtype=np.float128)\n', (3436, 3458), True, 'import numpy as np\n'), ((4745, 4757), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4753, 4757), True, 'import numpy as np\n'), ((5166, 5208), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 0], [1, 1]]'], {}), '([[0, 0], [0, 1], [1, 0], [1, 1]])\n', (5174, 5208), True, 'import numpy as np\n'), ((5210, 5240), 'numpy.array', 'np.array', (['[[0], [1], [1], [0]]'], {}), '([[0], [1], [1], [0]])\n', (5218, 5240), True, 'import numpy as np\n'), ((5496, 5517), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (5507, 5517), True, 'import numpy as np\n'), ((5518, 5534), 'numpy.ones', 'np.ones', (['(11, 1)'], {}), '((11, 1))\n', (5525, 5534), True, 'import numpy as np\n'), ((6190, 6207), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (6199, 6207), True, 'import numpy as np\n'), ((6987, 6999), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6995, 6999), True, 'import numpy as np\n'), ((649, 662), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (657, 662), True, 'import numpy as np\n'), ((685, 698), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (693, 698), True, 'import numpy as np\n'), ((4447, 4476), 'numpy.atleast_2d', 'np.atleast_2d', (['prevLayerOut.T'], {}), '(prevLayerOut.T)\n', (4460, 4476), True, 'import numpy as np\n'), ((4817, 4869), 'numpy.array', 'np.array', (['self.layers[-1].outputs'], {'dtype': 'np.float128'}), '(self.layers[-1].outputs, dtype=np.float128)\n', (4825, 4869), True, 'import numpy as np\n'), ((4929, 4974), 'numpy.append', 'np.append', (['cost', '((outputs - expected[i]) ** 2)'], {}), '(cost, (outputs - expected[i]) ** 2)\n', (4938, 4974), True, 'import numpy as np\n'), ((4990, 5002), 'numpy.sum', 'np.sum', (['cost'], {}), '(cost)\n', (4996, 5002), True, 'import numpy as np\n'), ((6119, 6148), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000)'], {}), '(1000)\n', (6142, 6148), True, 'import numpy as np\n'), ((6340, 6365), 'numpy.append', 'np.append', (['yTrain', '[1, 0]'], {}), '(yTrain, [1, 0])\n', (6349, 6365), True, 'import numpy as np\n'), ((6402, 6427), 'numpy.append', 'np.append', (['yTrain', '[0, 1]'], {}), '(yTrain, [0, 1])\n', (6411, 6427), True, 'import numpy as np\n'), ((6869, 6893), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.25)'], {}), '(-10, 10, 0.25)\n', (6878, 6893), True, 'import numpy as np\n'), ((6894, 6910), 'numpy.ones', 'np.ones', (['(80, 1)'], {}), '((80, 1))\n', (6901, 6910), True, 'import numpy as np\n'), ((855, 906), 'numpy.random.random_sample', 'np.random.random_sample', (['(layers[i + 1], layers[i])'], {}), '((layers[i + 1], layers[i]))\n', (878, 906), True, 'import numpy as np\n'), ((937, 980), 'numpy.zeros', 'np.zeros', (['(self.weights[i + 1].shape[0], 1)'], {}), '((self.weights[i + 1].shape[0], 1))\n', (945, 980), True, 'import numpy as np\n'), ((3483, 3493), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3489, 3493), True, 'import numpy as np\n'), ((5837, 5853), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (5845, 5853), True, 'import numpy as np\n'), ((6069, 6098), 'numpy.random.random_sample', 'np.random.random_sample', (['(1000)'], {}), '(1000)\n', (6092, 6098), True, 'import numpy as np\n'), ((2896, 2915), 'numpy.array', 'np.array', (['xTrain[i]'], {}), '(xTrain[i])\n', (2904, 2915), True, 'import numpy as np\n'), ((2917, 2936), 'numpy.array', 'np.array', (['yTrain[i]'], {}), '(yTrain[i])\n', (2925, 2936), True, 'import numpy as np\n'), ((7162, 7178), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (7170, 7178), True, 'import numpy as np\n')] |
import os
from joblib import Parallel, delayed
import json
import argparse
import numpy as np
from pathlib import Path
import yaml
from argparse import Namespace
from tqdm import tqdm
from feature_melspectrogram_essentia import feature_melspectrogram_essentia
from feature_melspectrogram_vggish import feature_melspectrogram_vggish
from feature_ol3 import feature_ol3
from feature_spleeter import feature_spleeter
from feature_tempocnn import feature_tempocnn
config_file = Namespace(**yaml.load(open('config_file.yaml'), Loader=yaml.SafeLoader))
DEBUG = False
def compute_audio_repr(audio_file, audio_repr_file, force=False):
if not force:
if os.path.exists(audio_repr_file):
print('{} exists. skipping!'.format(audio_file))
return 0
if config['type'] == 'waveform':
audio, sr = librosa.load(audio_file, sr=config['resample_sr'])
audio_repr = audio
audio_repr = np.expand_dims(audio_repr, axis=1)
elif config['feature_name'] == 'melspectrogram':
audio_repr = feature_melspectrogram_essentia(audio_file)
elif config['feature_name'] == 'vggish':
audio_repr = feature_melspectrogram_vggish(audio_file)
elif config['feature_name'] == 'ol3':
audio_repr = feature_ol3(audio_file)
elif config['feature_name'] == 'spleeter':
audio_repr = feature_spleeter(audio_file)
elif config['feature_name'] == 'tempocnn':
audio_repr = feature_tempocnn(audio_file)
else:
raise Exception('Feature {} not implemented.'.format(config['type']))
# Compute length
length = audio_repr.shape[0]
# Transform to float16 (to save storage, and works the same)
audio_repr = audio_repr.astype(np.float16)
# Write results:
fp = np.memmap(audio_repr_file, dtype='float16', mode='w+', shape=audio_repr.shape)
fp[:] = audio_repr[:]
del fp
return length
def do_process(files, index):
try:
[id, audio_file, audio_repr_file] = files[index]
if not os.path.exists(audio_repr_file[:audio_repr_file.rfind('/') + 1]):
path = Path(audio_repr_file[:audio_repr_file.rfind('/') + 1])
path.mkdir(parents=True, exist_ok=True)
# compute audio representation (pre-processing)
length = compute_audio_repr(audio_file, audio_repr_file)
# index.tsv writing
fw = open(audio_representation_folder + "index_" + str(config['machine_i']) + ".tsv", "a")
fw.write("%s\t%s\t%s\n" % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):]))
fw.close()
print(str(index) + '/' + str(len(files)) + ' Computed: %s' % audio_file)
except Exception as e:
ferrors = open(audio_representation_folder + "errors" + str(config['machine_i']) + ".txt", "a")
ferrors.write(audio_file + "\n")
ferrors.write(str(e))
ferrors.close()
print('Error computing audio representation: ', audio_file)
print(str(e))
def process_files(files):
if DEBUG:
print('WARNING: Parallelization is not used!')
for index in tqdm(range(0, len(files))):
do_process(files, index)
else:
Parallel(n_jobs=config['num_processing_units'], prefer="threads")(
delayed(do_process)(files, index) for index in range(0, len(files)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('feature_name', help='the feature type')
args = parser.parse_args()
config = config_file.config_preprocess
feature_name = args.feature_name
config.update(config_file.config_preprocess[feature_name])
config['feature_name'] = feature_name
audio_representation_folder = config_file.config_train['audio_representation_folder']
# set audio representations folder
if not os.path.exists(audio_representation_folder):
os.makedirs(audio_representation_folder)
else:
print("WARNING: already exists a folder with this name!"
"\nThis is expected if you are splitting computations into different machines.."
"\n..because all these machines are writing to this folder. Otherwise, check your config_file!")
# list audios to process: according to 'index_file'
files_to_convert = []
f = open(config_file.DATA_FOLDER + config["index_audio_file"])
for line in f.readlines():
id, audio = line.strip().split("\t")
audio_repr = audio[:audio.rfind(".")] + ".dat" # .npy or .pk
files_to_convert.append((id, config['audio_folder'] + audio,
audio_representation_folder + audio_repr))
# compute audio representation
if config['machine_i'] == config['n_machines'] - 1:
process_files(files_to_convert[int(len(files_to_convert) / config['n_machines']) * (config['machine_i']):])
# we just save parameters once! In the last thread run by n_machine-1!
json.dump(config, open(audio_representation_folder + "config.json", "w"))
else:
first_index = int(len(files_to_convert) / config['n_machines']) * (config['machine_i'])
second_index = int(len(files_to_convert) / config['n_machines']) * (config['machine_i'] + 1)
assigned_files = files_to_convert[first_index:second_index]
process_files(assigned_files)
print("Audio representation folder: " + audio_representation_folder)
| [
"feature_melspectrogram_vggish.feature_melspectrogram_vggish",
"feature_spleeter.feature_spleeter",
"feature_ol3.feature_ol3",
"os.makedirs",
"argparse.ArgumentParser",
"feature_melspectrogram_essentia.feature_melspectrogram_essentia",
"os.path.exists",
"numpy.expand_dims",
"feature_tempocnn.feature... | [((1763, 1841), 'numpy.memmap', 'np.memmap', (['audio_repr_file'], {'dtype': '"""float16"""', 'mode': '"""w+"""', 'shape': 'audio_repr.shape'}), "(audio_repr_file, dtype='float16', mode='w+', shape=audio_repr.shape)\n", (1772, 1841), True, 'import numpy as np\n'), ((3390, 3415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3413, 3415), False, 'import argparse\n'), ((661, 692), 'os.path.exists', 'os.path.exists', (['audio_repr_file'], {}), '(audio_repr_file)\n', (675, 692), False, 'import os\n'), ((933, 967), 'numpy.expand_dims', 'np.expand_dims', (['audio_repr'], {'axis': '(1)'}), '(audio_repr, axis=1)\n', (947, 967), True, 'import numpy as np\n'), ((3839, 3882), 'os.path.exists', 'os.path.exists', (['audio_representation_folder'], {}), '(audio_representation_folder)\n', (3853, 3882), False, 'import os\n'), ((3892, 3932), 'os.makedirs', 'os.makedirs', (['audio_representation_folder'], {}), '(audio_representation_folder)\n', (3903, 3932), False, 'import os\n'), ((1043, 1086), 'feature_melspectrogram_essentia.feature_melspectrogram_essentia', 'feature_melspectrogram_essentia', (['audio_file'], {}), '(audio_file)\n', (1074, 1086), False, 'from feature_melspectrogram_essentia import feature_melspectrogram_essentia\n'), ((3199, 3264), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': "config['num_processing_units']", 'prefer': '"""threads"""'}), "(n_jobs=config['num_processing_units'], prefer='threads')\n", (3207, 3264), False, 'from joblib import Parallel, delayed\n'), ((1153, 1194), 'feature_melspectrogram_vggish.feature_melspectrogram_vggish', 'feature_melspectrogram_vggish', (['audio_file'], {}), '(audio_file)\n', (1182, 1194), False, 'from feature_melspectrogram_vggish import feature_melspectrogram_vggish\n'), ((1258, 1281), 'feature_ol3.feature_ol3', 'feature_ol3', (['audio_file'], {}), '(audio_file)\n', (1269, 1281), False, 'from feature_ol3 import feature_ol3\n'), ((3278, 3297), 'joblib.delayed', 'delayed', (['do_process'], {}), '(do_process)\n', (3285, 3297), False, 'from joblib import Parallel, delayed\n'), ((1350, 1378), 'feature_spleeter.feature_spleeter', 'feature_spleeter', (['audio_file'], {}), '(audio_file)\n', (1366, 1378), False, 'from feature_spleeter import feature_spleeter\n'), ((1447, 1475), 'feature_tempocnn.feature_tempocnn', 'feature_tempocnn', (['audio_file'], {}), '(audio_file)\n', (1463, 1475), False, 'from feature_tempocnn import feature_tempocnn\n')] |
import math
import oneflow
import oneflow as flow
import oneflow.nn as nn
import numpy as np
class SparseDispatcher(object):
"""Helper for implementing a mixture of experts.
The purpose of this class is to create input minibatches for the
experts and to combine the results of the experts to form a unified
output tensor.
There are two functions:
dispatch - take an input Tensor and create input Tensors for each expert.
combine - take output Tensors from each expert and form a combined output
Tensor. Outputs from different experts for the same batch element are
summed together, weighted by the provided "gates".
The class is initialized with a "gates" Tensor, which specifies which
batch elements go to which experts, and the weights to use when combining
the outputs. Batch element b is sent to expert e iff gates[b, e] != 0.
The inputs and outputs are all two-dimensional [batch, depth].
Caller is responsible for collapsing additional dimensions prior to
calling this class and reshaping the output to the original shape.
See common_layers.reshape_like().
Example use:
gates: a float32 `Tensor` with shape `[batch_size, num_experts]`
inputs: a float32 `Tensor` with shape `[batch_size, input_size]`
experts: a list of length `num_experts` containing sub-networks.
dispatcher = SparseDispatcher(num_experts, gates)
expert_inputs = dispatcher.dispatch(inputs)
expert_outputs = [experts[i](expert_inputs[i]) for i in range(num_experts)]
outputs = dispatcher.combine(expert_outputs)
The preceding code sets the output for a particular example b to:
output[b] = Sum_i(gates[b, i] * experts[i](inputs[b]))
This class takes advantage of sparsity in the gate matrix by including in the
`Tensor`s for expert i only the batch elements for which `gates[b, i] > 0`.
"""
def __init__(self, num_experts, gates):
"""Create a SparseDispatcher."""
self._gates = gates
self._num_experts = num_experts
# sort experts
sorted_experts, index_sorted_experts = flow.nonzero(gates).sort(0)
# drop indices
_, self._expert_index = sorted_experts.split(1, dim=1)
# get according batch index for each expert
self._batch_index = sorted_experts[index_sorted_experts[:, 1], 0]
# calculate num samples that each expert gets
self._part_sizes = list((gates > 0).sum(0).numpy())
# TODO workaround
for i in range(len(self._part_sizes)):
self._part_sizes[i] = self._part_sizes[i].item()
# expand gates to match with self._batch_index
gates_exp = gates[self._batch_index.flatten()]
self._nonzero_gates = flow.gather(gates_exp, 1, self._expert_index)
def dispatch(self, inp):
"""Create one input Tensor for each expert.
The `Tensor` for a expert `i` contains the slices of `inp` corresponding
to the batch elements `b` where `gates[b, i] > 0`.
Args:
inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]`
Returns:
a list of `num_experts` `Tensor`s with shapes
`[expert_batch_size_i, <extra_input_dims>]`.
"""
# assigns samples to experts whose gate is nonzero
# expand according to batch index so we can just split by _part_sizes
inp_exp = inp[self._batch_index].squeeze(1)
return flow.split(inp_exp, self._part_sizes, dim=0)
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# apply exp to expert outputs, so we are not longer in log space
stitched = flow.cat(expert_out, 0).exp()
if multiply_by_gates:
stitched = stitched.mul(self._nonzero_gates)
zeros = flow.zeros(
self._gates.size(0),
expert_out[-1].size(1),
requires_grad=True,
device=stitched.device,
)
# spanning a index matrix
batch_index = np.zeros([stitched.shape[0], stitched.shape[1]])
for i in range(stitched.shape[0]):
batch_index[i, :] = (
np.ones(batch_index.shape[1]) * self._batch_index[i].item()
)
batch_index_ = flow.Tensor(batch_index, device=stitched.device)
batch_index_ = batch_index_.int()
batch_index_.requires_grad = False
combined = flow.scatter_add(
zeros, dim=0, index=batch_index_, src=stitched.float()
)
# add eps to all zero values in order to avoid nans when going back to log space
combined[combined == 0] = np.finfo(float).eps
# back to log space
return combined.log()
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32`
and shapes `[expert_batch_size_i]`
"""
# split nonzero gates for each expert
return flow.split(self._nonzero_gates, self._part_sizes, dim=0)
# should be aware of the placement
def cdf(value, loc=flow.tensor([0.0]), scale=flow.tensor([1.0])):
loc = loc.to(value.device)
scale = scale.to(value.device)
return 0.5 * (1 + oneflow.erf((value - loc) * scale.reciprocal() / math.sqrt(2)))
class MoE(nn.Module):
"""Call a Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.
Args:
input_size: integer - size of the input
output_size: integer - size of the input
num_experts: an integer - number of experts
hidden_size: an integer - hidden size of the expertsm, FFN
noisy_gating: a boolean
k: an integer - how many experts to use for each batch element
"""
def __init__(
self, model, input_size, output_size, num_experts, noisy_gating=True, k=4
):
super(MoE, self).__init__()
self.noisy_gating = noisy_gating
self.num_experts = num_experts
self.output_size = output_size
self.input_size = input_size
self.k = k
# instantiate experts
self.experts = nn.ModuleList([model for i in range(self.num_experts)])
self.w_gate = nn.Parameter(
flow.zeros(input_size, num_experts), requires_grad=True
)
self.w_noise = nn.Parameter(
flow.zeros(input_size, num_experts), requires_grad=True
)
self.softplus = nn.Softplus()
self.softmax = nn.Softmax(1)
assert self.k <= self.num_experts
def cv_squared(self, x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
eps = 1e-10
# if only num_experts = 1
if x.shape[0] == 1:
return flow.Tensor([0])
return x.float().var() / (x.float().mean() ** 2 + eps)
def _gates_to_load(self, gates):
"""Compute the true load per expert, given the gates.
The load is the number of examples for which the corresponding gate is >0.
Args:
gates: a `Tensor` of shape [batch_size, n]
Returns:
a float32 `Tensor` of shape [n]
"""
return (gates > 0).sum(0)
def _prob_in_top_k(
self, clean_values, noisy_values, noise_stddev, noisy_top_values
):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
Returns:
a `Tensor` of shape [batch, n].
"""
batch = clean_values.size(0)
m = noisy_top_values.size(1)
top_values_flat = noisy_top_values.flatten()
threshold_positions_if_in = (
flow.arange(batch, device=noisy_values.device) * m + self.k
)
threshold_if_in = flow.unsqueeze(
flow.gather(top_values_flat, 0, threshold_positions_if_in), 1
)
is_in = flow.gt(noisy_values, threshold_if_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = flow.unsqueeze(
flow.gather(top_values_flat, 0, threshold_positions_if_out), 1
)
# is each value currently in the top k.
prob_if_in = cdf((clean_values - threshold_if_in) / noise_stddev)
prob_if_out = cdf((clean_values - threshold_if_out) / noise_stddev)
prob = flow.where(is_in, prob_if_in, prob_if_out)
return prob
def noisy_top_k_gating(self, x, train, noise_epsilon=1e-2):
"""Noisy top-k gating.
See paper: https://arxiv.org/abs/1701.06538.
Args:
x: input Tensor with shape [batch_size, input_size]
train: a boolean - we only add noise at training time.
noise_epsilon: a float
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
clean_logits = oneflow.matmul(x, self.w_gate)
if self.noisy_gating:
raw_noise_stddev = oneflow.matmul(x, self.w_noise)
noise_stddev = (self.softplus(raw_noise_stddev) + noise_epsilon) * train
# noisy_logits = clean_logits + ( torch.randn(clean_logits.size()) * noise_stddev)
# TODO, fix this after torch randn argument fixed
noisy_logits = clean_logits + (
flow.randn(
clean_logits.size()[0],
clean_logits.size()[1],
device=clean_logits.device,
)
* noise_stddev
)
logits = noisy_logits
else:
logits = clean_logits
# calculate topk + 1 that will be needed for the noisy gates
top_logits, top_indices = logits.topk(min(self.k + 1, self.num_experts), dim=1)
top_k_logits = top_logits[:, : self.k]
top_k_indices = top_indices[:, : self.k]
top_k_gates = self.softmax(top_k_logits)
top_k_logits = top_k_logits.to(logits.device)
top_indices = top_indices.to(logits.device)
top_logits = top_logits.to(logits.device)
zeros = flow.zeros(
logits.shape, dtype=logits.dtype, requires_grad=True, device=logits.device
)
gates = oneflow.scatter(zeros, 1, top_k_indices, top_k_gates)
if self.noisy_gating and self.k < self.num_experts:
load = (
self._prob_in_top_k(
clean_logits, noisy_logits, noise_stddev, top_logits
)
).sum(0)
else:
load = self._gates_to_load(gates)
return gates, load
def forward(self, x, train=True, loss_coef=1e-2):
"""Args:
x: tensor shape [batch_size, input_size]
train: a boolean scalar.
loss_coef: a scalar - multiplier on load-balancing losses
Returns:
y: a tensor with shape [batch_size, output_size].
extra_training_loss: a scalar. This should be added into the overall
training loss of the model. The backpropagation of this loss
encourages all experts to be approximately equally used across a batch.
"""
gates, load = self.noisy_top_k_gating(x, train)
# calculate importance loss
importance = gates.sum(0)
loss = self.cv_squared(importance) + self.cv_squared(load)
loss *= loss_coef
dispatcher = SparseDispatcher(self.num_experts, gates)
expert_inputs = dispatcher.dispatch(x)
gates = dispatcher.expert_to_gates()
expert_outputs = []
# TODO, vectorize this part after fixing the zero dimension bug
for i in range(self.num_experts):
if expert_inputs[i].shape.numel() != 0:
expert_outputs.append(self.experts[i](expert_inputs[i]))
y = dispatcher.combine(expert_outputs)
return y, loss
| [
"oneflow.Tensor",
"oneflow.nn.Softplus",
"oneflow.cat",
"oneflow.scatter",
"oneflow.arange",
"oneflow.matmul",
"math.sqrt",
"numpy.zeros",
"numpy.ones",
"oneflow.gather",
"oneflow.where",
"oneflow.zeros",
"numpy.finfo",
"oneflow.nn.Softmax",
"oneflow.nonzero",
"oneflow.gt",
"oneflow.... | [((5752, 5770), 'oneflow.tensor', 'flow.tensor', (['[0.0]'], {}), '([0.0])\n', (5763, 5770), True, 'import oneflow as flow\n'), ((5778, 5796), 'oneflow.tensor', 'flow.tensor', (['[1.0]'], {}), '([1.0])\n', (5789, 5796), True, 'import oneflow as flow\n'), ((2748, 2793), 'oneflow.gather', 'flow.gather', (['gates_exp', '(1)', 'self._expert_index'], {}), '(gates_exp, 1, self._expert_index)\n', (2759, 2793), True, 'import oneflow as flow\n'), ((3447, 3491), 'oneflow.split', 'flow.split', (['inp_exp', 'self._part_sizes'], {'dim': '(0)'}), '(inp_exp, self._part_sizes, dim=0)\n', (3457, 3491), True, 'import oneflow as flow\n'), ((4612, 4660), 'numpy.zeros', 'np.zeros', (['[stitched.shape[0], stitched.shape[1]]'], {}), '([stitched.shape[0], stitched.shape[1]])\n', (4620, 4660), True, 'import numpy as np\n'), ((4852, 4900), 'oneflow.Tensor', 'flow.Tensor', (['batch_index'], {'device': 'stitched.device'}), '(batch_index, device=stitched.device)\n', (4863, 4900), True, 'import oneflow as flow\n'), ((5639, 5695), 'oneflow.split', 'flow.split', (['self._nonzero_gates', 'self._part_sizes'], {'dim': '(0)'}), '(self._nonzero_gates, self._part_sizes, dim=0)\n', (5649, 5695), True, 'import oneflow as flow\n'), ((7074, 7087), 'oneflow.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (7085, 7087), True, 'import oneflow.nn as nn\n'), ((7111, 7124), 'oneflow.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (7121, 7124), True, 'import oneflow.nn as nn\n'), ((9427, 9465), 'oneflow.gt', 'flow.gt', (['noisy_values', 'threshold_if_in'], {}), '(noisy_values, threshold_if_in)\n', (9434, 9465), True, 'import oneflow as flow\n'), ((9877, 9919), 'oneflow.where', 'flow.where', (['is_in', 'prob_if_in', 'prob_if_out'], {}), '(is_in, prob_if_in, prob_if_out)\n', (9887, 9919), True, 'import oneflow as flow\n'), ((10445, 10475), 'oneflow.matmul', 'oneflow.matmul', (['x', 'self.w_gate'], {}), '(x, self.w_gate)\n', (10459, 10475), False, 'import oneflow\n'), ((11654, 11745), 'oneflow.zeros', 'flow.zeros', (['logits.shape'], {'dtype': 'logits.dtype', 'requires_grad': '(True)', 'device': 'logits.device'}), '(logits.shape, dtype=logits.dtype, requires_grad=True, device=\n logits.device)\n', (11664, 11745), True, 'import oneflow as flow\n'), ((11779, 11832), 'oneflow.scatter', 'oneflow.scatter', (['zeros', '(1)', 'top_k_indices', 'top_k_gates'], {}), '(zeros, 1, top_k_indices, top_k_gates)\n', (11794, 11832), False, 'import oneflow\n'), ((5225, 5240), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5233, 5240), True, 'import numpy as np\n'), ((6868, 6903), 'oneflow.zeros', 'flow.zeros', (['input_size', 'num_experts'], {}), '(input_size, num_experts)\n', (6878, 6903), True, 'import oneflow as flow\n'), ((6983, 7018), 'oneflow.zeros', 'flow.zeros', (['input_size', 'num_experts'], {}), '(input_size, num_experts)\n', (6993, 7018), True, 'import oneflow as flow\n'), ((7615, 7631), 'oneflow.Tensor', 'flow.Tensor', (['[0]'], {}), '([0])\n', (7626, 7631), True, 'import oneflow as flow\n'), ((9339, 9397), 'oneflow.gather', 'flow.gather', (['top_values_flat', '(0)', 'threshold_positions_if_in'], {}), '(top_values_flat, 0, threshold_positions_if_in)\n', (9350, 9397), True, 'import oneflow as flow\n'), ((9589, 9648), 'oneflow.gather', 'flow.gather', (['top_values_flat', '(0)', 'threshold_positions_if_out'], {}), '(top_values_flat, 0, threshold_positions_if_out)\n', (9600, 9648), True, 'import oneflow as flow\n'), ((10538, 10569), 'oneflow.matmul', 'oneflow.matmul', (['x', 'self.w_noise'], {}), '(x, self.w_noise)\n', (10552, 10569), False, 'import oneflow\n'), ((2116, 2135), 'oneflow.nonzero', 'flow.nonzero', (['gates'], {}), '(gates)\n', (2128, 2135), True, 'import oneflow as flow\n'), ((4262, 4285), 'oneflow.cat', 'flow.cat', (['expert_out', '(0)'], {}), '(expert_out, 0)\n', (4270, 4285), True, 'import oneflow as flow\n'), ((4755, 4784), 'numpy.ones', 'np.ones', (['batch_index.shape[1]'], {}), '(batch_index.shape[1])\n', (4762, 4784), True, 'import numpy as np\n'), ((9214, 9260), 'oneflow.arange', 'flow.arange', (['batch'], {'device': 'noisy_values.device'}), '(batch, device=noisy_values.device)\n', (9225, 9260), True, 'import oneflow as flow\n'), ((5936, 5948), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5945, 5948), False, 'import math\n')] |
# -*- coding: utf-8 -*-
""" Binary latent class model.
"""
# metadata variables
__author__ = "<NAME> <<EMAIL>>"
__date__ = "2013/12/15"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2013 Hiroshi Kajino all rights reserved."
__docformat__ = "restructuredtext en"
import numpy as np
import scipy as sp
import sys
from crowd_data import BinaryData
class LatentClassModel:
""" Binary latent class model
:IVariables:
data : crowdData.BinaryData
log_mu : numpy.array
numpy.array of length `num_instances`. mu = Pr[true_label = 1 | other variables].
log_p : numpy.array
numpy.array of length 2. p = Pr[true_label = 1]. The 1st element contains log(p), and the 2nd log(1-p).
log_a : numpy.array
numpy.array of length `num_instances`. intermediate variables to calculate log_p.
log_b : numpy.array
numpy.array of length `num_instances`. intermediate variables to calculate log_p.
log_alpha : numpy.array
2 * `num_workers` numpy.array. alpha_j = Pr[label_by_worker_j = 1 | true_label = 1]. The 1st row contains log(aloha), and the 2nd log(1-alpha).
log_beta : numpy.array
numpy.array of length `num_workers`. beta_j = Pr[label_by_worker_j = 0 | true_label = 0]. The 1st row contains log(beta), and the 2nd log(1-beta).
"""
def __init__(self, crowd_data):
self.data = crowd_data
self.pos_ind = np.where(self.data.y == 1)
self.neg_ind = np.where(self.data.y == -1)
self.log_mu = self.data.majority_vote("log_prob")
self.log_p = np.zeros(2)
self.log_a = np.zeros(self.data.num_instance)
self.log_b = np.zeros(self.data.num_instance)
self.log_alpha = np.zeros((2, self.data.num_workers))
self.log_beta = np.zeros((2, self.data.num_workers))
self._m_step()
def _e_step(self):
""" Perform the E-step. I.e., update log_a, log_b, and log_mu.
"""
self.log_a = np.sum((self.log_alpha[0, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == 1), axis=1)\
+ np.sum((self.log_alpha[1, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == -1), axis=1)
self.log_b = np.sum((self.log_beta[0, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == -1), axis=1)\
+ np.sum((self.log_beta[1, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == 1), axis=1)
self.log_mu[0, :] = self.log_p[0] + self.log_a
self.log_mu[1, :] = self.log_p[1] + self.log_b
self.log_mu = self.log_mu - sp.special.logsumexp(self.log_mu, axis=0)
def _m_step(self):
""" Perform the M-step. I.e., update log_p, log_alpha, and log_beta.
"""
log_mu_ij = ((self.log_mu[0,:] * np.ones((self.data.num_workers, self.data.num_instance))).transpose())
log_one_minus_mu_ij = ((self.log_mu[1,:] * np.ones((self.data.num_workers, self.data.num_instance))).transpose())
alpha_log_denomi = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y != 0))
alpha_log_nume_pos = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y == 1))
alpha_log_nume_neg = sp.special.logsumexp(log_mu_ij, axis=0, b=(self.data.y == -1))
beta_log_denomi = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y != 0))
beta_log_nume_pos = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y == 1))
beta_log_nume_neg = sp.special.logsumexp(log_one_minus_mu_ij, axis=0, b=(self.data.y == -1))
self.log_p = sp.special.logsumexp(self.log_mu, axis=1)
self.log_p = self.log_p - sp.special.logsumexp(self.log_p)
self.log_alpha = np.array([alpha_log_nume_pos - alpha_log_denomi, alpha_log_nume_neg - alpha_log_denomi])
self.log_beta = np.array([beta_log_nume_neg - beta_log_denomi, beta_log_nume_pos - beta_log_denomi])
def _q_function(self):
""" Calculate the value of the Q-function on current estimates.
"""
self.log_a = np.sum((self.log_alpha[0, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == 1), axis=1)\
+ np.sum((self.log_alpha[1, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == -1), axis=1)
self.log_b = np.sum((self.log_beta[0, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == -1), axis=1)\
+ np.sum((self.log_beta[1, :] \
* np.ones((self.data.num_instance,
self.data.num_workers))) \
* (self.data.y == 1), axis=1)
log_pa_pb = np.array([self.log_p[0] + (self.log_a), self.log_p[1] + (self.log_b)])
return (np.exp(self.log_mu) * log_pa_pb).sum()
def run_em(self, eps, verbose=False):
""" Run EM algorithm
:Variables:
eps : float
tolerable relative errors on the Q-function.
verbose : bool
if verbose, print the value of the q-function, else don't print.
"""
q_new = -np.inf
q_old = 0
convergent = False
while not convergent:
q_old = q_new
self._e_step()
self._m_step()
q_new = self._q_function()
convergent = (np.abs(q_old - q_new) / np.abs(q_new) < eps)
if verbose:
if q_new - q_old < 0:
sys.stderr.write("WARNING: Q-function decreases. Something might be wrong.\n")
sys.stderr.flush()
sys.stdout.write("\r " + "q_func = " + str(q_new) + "\n")
sys.stdout.flush()
sys.stdout.write("\n"+"Converged. Relative_err = " + str(np.abs(q_old - q_new) / np.abs(q_new)) + "\n")
def estimated_labels(self, threshold=0.5):
""" Estimate the true labels based on the current estimates on the posterior probabilities of the true labels.
:Variables:
threshold : float
A threshold to round the probability. If mu > threshold, return 1. Otherwise, return -1.
:RType: numpy.array
:Returns: Estimated labels. The length of returned numpy.array = #(instances)
"""
return (self.log_mu[0, :] > np.log(threshold)).astype(int) * 2 - 1
if __name__ == "__main__":
# for test
#mat = np.array([[1,1,1,1,1,-1,-1], [-1,-1,-1,-1,-1,1,1], [1,1,1,-1,-1,-1,-1]])
mat = np.array([[1, 1, 1, 1, 1, 1, -1],
[-1, -1, -1, -1, -1, -1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, -1],
[1, 1, 1, 1, 1, 1, -1]])
c_data = BinaryData(mat)
model = LatentClassModel(c_data)
model.run_em(10 ** (-10))
print(model.estimated_labels())
print(np.exp(model.log_alpha[0, :]))
print(np.exp(model.log_beta[0, :]))
| [
"numpy.abs",
"numpy.log",
"crowd_data.BinaryData",
"numpy.zeros",
"numpy.ones",
"numpy.where",
"numpy.array",
"numpy.exp",
"scipy.special.logsumexp",
"sys.stdout.flush",
"sys.stderr.write",
"sys.stderr.flush"
] | [((7332, 7470), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1, 1, -1], [-1, -1, -1, -1, -1, -1, 1], [1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, -1], [1, 1, 1, 1, 1, 1, -1]]'], {}), '([[1, 1, 1, 1, 1, 1, -1], [-1, -1, -1, -1, -1, -1, 1], [1, 1, 1, 1,\n 1, 1, 1], [1, 1, 1, 1, 1, 1, -1], [1, 1, 1, 1, 1, 1, -1]])\n', (7340, 7470), True, 'import numpy as np\n'), ((7560, 7575), 'crowd_data.BinaryData', 'BinaryData', (['mat'], {}), '(mat)\n', (7570, 7575), False, 'from crowd_data import BinaryData\n'), ((1457, 1483), 'numpy.where', 'np.where', (['(self.data.y == 1)'], {}), '(self.data.y == 1)\n', (1465, 1483), True, 'import numpy as np\n'), ((1507, 1534), 'numpy.where', 'np.where', (['(self.data.y == -1)'], {}), '(self.data.y == -1)\n', (1515, 1534), True, 'import numpy as np\n'), ((1614, 1625), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1622, 1625), True, 'import numpy as np\n'), ((1647, 1679), 'numpy.zeros', 'np.zeros', (['self.data.num_instance'], {}), '(self.data.num_instance)\n', (1655, 1679), True, 'import numpy as np\n'), ((1701, 1733), 'numpy.zeros', 'np.zeros', (['self.data.num_instance'], {}), '(self.data.num_instance)\n', (1709, 1733), True, 'import numpy as np\n'), ((1759, 1795), 'numpy.zeros', 'np.zeros', (['(2, self.data.num_workers)'], {}), '((2, self.data.num_workers))\n', (1767, 1795), True, 'import numpy as np\n'), ((1820, 1856), 'numpy.zeros', 'np.zeros', (['(2, self.data.num_workers)'], {}), '((2, self.data.num_workers))\n', (1828, 1856), True, 'import numpy as np\n'), ((3531, 3590), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['log_mu_ij'], {'axis': '(0)', 'b': '(self.data.y != 0)'}), '(log_mu_ij, axis=0, b=self.data.y != 0)\n', (3551, 3590), True, 'import scipy as sp\n'), ((3622, 3681), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['log_mu_ij'], {'axis': '(0)', 'b': '(self.data.y == 1)'}), '(log_mu_ij, axis=0, b=self.data.y == 1)\n', (3642, 3681), True, 'import scipy as sp\n'), ((3713, 3773), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['log_mu_ij'], {'axis': '(0)', 'b': '(self.data.y == -1)'}), '(log_mu_ij, axis=0, b=self.data.y == -1)\n', (3733, 3773), True, 'import scipy as sp\n'), ((3802, 3871), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['log_one_minus_mu_ij'], {'axis': '(0)', 'b': '(self.data.y != 0)'}), '(log_one_minus_mu_ij, axis=0, b=self.data.y != 0)\n', (3822, 3871), True, 'import scipy as sp\n'), ((3902, 3971), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['log_one_minus_mu_ij'], {'axis': '(0)', 'b': '(self.data.y == 1)'}), '(log_one_minus_mu_ij, axis=0, b=self.data.y == 1)\n', (3922, 3971), True, 'import scipy as sp\n'), ((4002, 4072), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['log_one_minus_mu_ij'], {'axis': '(0)', 'b': '(self.data.y == -1)'}), '(log_one_minus_mu_ij, axis=0, b=self.data.y == -1)\n', (4022, 4072), True, 'import scipy as sp\n'), ((4096, 4137), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['self.log_mu'], {'axis': '(1)'}), '(self.log_mu, axis=1)\n', (4116, 4137), True, 'import scipy as sp\n'), ((4230, 4322), 'numpy.array', 'np.array', (['[alpha_log_nume_pos - alpha_log_denomi, alpha_log_nume_neg - alpha_log_denomi]'], {}), '([alpha_log_nume_pos - alpha_log_denomi, alpha_log_nume_neg -\n alpha_log_denomi])\n', (4238, 4322), True, 'import numpy as np\n'), ((4343, 4431), 'numpy.array', 'np.array', (['[beta_log_nume_neg - beta_log_denomi, beta_log_nume_pos - beta_log_denomi]'], {}), '([beta_log_nume_neg - beta_log_denomi, beta_log_nume_pos -\n beta_log_denomi])\n', (4351, 4431), True, 'import numpy as np\n'), ((5546, 5612), 'numpy.array', 'np.array', (['[self.log_p[0] + self.log_a, self.log_p[1] + self.log_b]'], {}), '([self.log_p[0] + self.log_a, self.log_p[1] + self.log_b])\n', (5554, 5612), True, 'import numpy as np\n'), ((7689, 7718), 'numpy.exp', 'np.exp', (['model.log_alpha[0, :]'], {}), '(model.log_alpha[0, :])\n', (7695, 7718), True, 'import numpy as np\n'), ((7730, 7758), 'numpy.exp', 'np.exp', (['model.log_beta[0, :]'], {}), '(model.log_beta[0, :])\n', (7736, 7758), True, 'import numpy as np\n'), ((3115, 3156), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['self.log_mu'], {'axis': '(0)'}), '(self.log_mu, axis=0)\n', (3135, 3156), True, 'import scipy as sp\n'), ((4172, 4204), 'scipy.special.logsumexp', 'sp.special.logsumexp', (['self.log_p'], {}), '(self.log_p)\n', (4192, 4204), True, 'import scipy as sp\n'), ((6543, 6561), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6559, 6561), False, 'import sys\n'), ((3311, 3367), 'numpy.ones', 'np.ones', (['(self.data.num_workers, self.data.num_instance)'], {}), '((self.data.num_workers, self.data.num_instance))\n', (3318, 3367), True, 'import numpy as np\n'), ((3433, 3489), 'numpy.ones', 'np.ones', (['(self.data.num_workers, self.data.num_instance)'], {}), '((self.data.num_workers, self.data.num_instance))\n', (3440, 3489), True, 'import numpy as np\n'), ((5633, 5652), 'numpy.exp', 'np.exp', (['self.log_mu'], {}), '(self.log_mu)\n', (5639, 5652), True, 'import numpy as np\n'), ((6208, 6229), 'numpy.abs', 'np.abs', (['(q_old - q_new)'], {}), '(q_old - q_new)\n', (6214, 6229), True, 'import numpy as np\n'), ((6232, 6245), 'numpy.abs', 'np.abs', (['q_new'], {}), '(q_new)\n', (6238, 6245), True, 'import numpy as np\n'), ((6335, 6413), 'sys.stderr.write', 'sys.stderr.write', (['"""WARNING: Q-function decreases. Something might be wrong.\n"""'], {}), "('WARNING: Q-function decreases. Something might be wrong.\\n')\n", (6351, 6413), False, 'import sys\n'), ((6434, 6452), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (6450, 6452), False, 'import sys\n'), ((2070, 2126), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (2077, 2126), True, 'import numpy as np\n'), ((2316, 2372), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (2323, 2372), True, 'import numpy as np\n'), ((2561, 2617), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (2568, 2617), True, 'import numpy as np\n'), ((2807, 2863), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (2814, 2863), True, 'import numpy as np\n'), ((4627, 4683), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (4634, 4683), True, 'import numpy as np\n'), ((4873, 4929), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (4880, 4929), True, 'import numpy as np\n'), ((5118, 5174), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (5125, 5174), True, 'import numpy as np\n'), ((5364, 5420), 'numpy.ones', 'np.ones', (['(self.data.num_instance, self.data.num_workers)'], {}), '((self.data.num_instance, self.data.num_workers))\n', (5371, 5420), True, 'import numpy as np\n'), ((6628, 6649), 'numpy.abs', 'np.abs', (['(q_old - q_new)'], {}), '(q_old - q_new)\n', (6634, 6649), True, 'import numpy as np\n'), ((6652, 6665), 'numpy.abs', 'np.abs', (['q_new'], {}), '(q_new)\n', (6658, 6665), True, 'import numpy as np\n'), ((7156, 7173), 'numpy.log', 'np.log', (['threshold'], {}), '(threshold)\n', (7162, 7173), True, 'import numpy as np\n')] |
#!/usr/bin/env python
################################################################################
# Created by <NAME> #
# <EMAIL> #
################################################################################
import shapely, logging, numpy, math
from shapely.wkt import loads
from shapely.geometry import box
from de9im.patterns import intersects, contains, pattern
excluding_interiors = pattern('F********')
import morton
# maximum number of bits for X and Y
MAX_NUMBITS = 31
class QuadTree:
""" QuadTree class """
def __init__(self, domain, numLevels):
maximumValue = max(domain)
minimumValue = min(domain)
if minimumValue < 0:
raise Exception('ERROR: Domain must contain only positive X and Y numbers!')
self.numBits = MAX_NUMBITS
fits = True
while fits:
if (1 << self.numBits) >= maximumValue:
self.numBits -= 1
else:
fits = False
self.numBits += 1
if self.numBits > MAX_NUMBITS:
raise Exception('ERROR: maximum number of bits of X and Y is ' + str(MAX_NUMBITS))
if numLevels != 'auto' and numLevels > 0:
if numLevels > self.numBits:
raise Exception('ERROR: quadTree numLevels must be lower or equal to the number of bits of X and Y')
else:
self.numLevels = numLevels
else:
self.numLevels = 'auto'
mindomain = 0
maxdomain = 1 << self.numBits
parentQuad = (mindomain, mindomain, maxdomain, maxdomain)
startLevel = 0
self.domainRegion = box(*domain)
fits = True
while fits:
numCodes = len(self._overlapCodes(startLevel, 0, 0, self.domainRegion, *parentQuad)[0])
if numCodes == 1:
startLevel += 1
else:
fits = False
startLevel -= 1
if startLevel > 0:
(self.startQuadCode, self.startLevel, startFullIn, startMRange) = self._overlapCodes(startLevel, 0, 0, self.domainRegion, *parentQuad)[0][0]
self.startQuad = self.getCoords(startMRange)
else:
self.startLevel = 0
self.startQuadCode = 0
self.startQuad = parentQuad
# print 'domain', domain
# print 'domain numBits', self.numBits
# print 'quadtree numLevels', self.numLevels
# print 'quadtree startLevel', self.startLevel
# print 'quadtree startQuadCode', self.startQuadCode
# print 'quadtree startQuad', self.startQuad
def _relation(self, geom1, geom2):
""" Returns the relationship between two geometries.
0 if they are disjoint,
1 if geom2 is completely in geom1,
2 if geom2 is partly in geom1"""
relation = geom1.relate(geom2)
if not intersects.matches(relation):
return 0 # they are disjoint
elif contains.matches(relation):
return 1
else: # there is some overlaps
if excluding_interiors.matches(relation):
return 0 # overlap only in boundaries, we do not count it
else:
return 2 # some interior of geom2 is in geom1
def _overlapCodes(self, maxDepth, parentLevel, parentCode, region, minx, miny, maxx, maxy):
""" Recursive method that return morton ranges overlapping with the region for the specified domain"""
cx = minx + ((maxx - minx) >> 1)
cy = miny + ((maxy - miny) >> 1)
quads = [
(minx, miny, cx, cy), #0
(minx, cy, cx, maxy), #1
(cx, miny, maxx, cy), #2
(cx, cy, maxx, maxy) #3
]
level = parentLevel + 1
codes = []
c = 0
for quadIndex in range(4):
quad = quads[quadIndex]
relation = self._relation(region, box(*quad))
if relation: #1 or 2
quadCode = (parentCode << 2) + quadIndex
if relation == 1 or parentLevel == maxDepth:
codes.append((quadCode, level, relation == 1, self.quadCodeToMortonRange(quadCode, level))) # relation = 1 indicates that this morton range is fully withoin query region
c += 1
else:
(tcodes, tc) = self._overlapCodes(maxDepth, level, quadCode, region, *quad)
if tc == 4:
codes.append((quadCode, level, False, self.quadCodeToMortonRange(quadCode, level)))
c += 1
else:
codes.extend(tcodes)
return (codes,c)
def quadCodeToMortonRange(self, quadCode, level):
diff = (self.numBits - level) << 1
minr = quadCode << diff
maxr = ((quadCode+1) << diff) - 1
return (minr,maxr)
def overlapCodes(self, region, numLevels = None):
if numLevels == None:
numLevels = self.numLevels
if (numLevels == 'auto') or (numLevels < 0):
numLevels = int(math.ceil(math.log(self.domainRegion.area / region.area,2) / 2.)) + 0
#print 'Py ', numLevels, self.domainRegion.area, region.area
if box(*self.startQuad).intersects(region):
return self._overlapCodes(numLevels, self.startLevel, self.startQuadCode, region, *self.startQuad)[0]
return []
def mergeConsecutiveRanges(self, mranges):
if len(mranges) == 0:
return []
omranges = []
(mrangemin, mrangemax) = mranges[0]
for rangeIndex in range(1, len(mranges)):
mrange = mranges[rangeIndex]
if mrangemax == mrange[0] - 1:
mrangemax = mrange[1]
else:
omranges.append((mrangemin, mrangemax))
(mrangemin, mrangemax) = mrange
omranges.append((mrangemin, mrangemax))
return omranges
def mergeRanges(self, mranges, maxRanges):
numRanges = len(mranges)
if numRanges <= maxRanges or numRanges < 2:
return mranges
numRangesToMerge = numRanges - maxRanges
b = numpy.array(numpy.array(mranges).flat)
diffs = b[::2][1:] - b[1::2][:-1]
tDiff = sorted(diffs)[numRangesToMerge-1]
lowerDiffs = len(diffs[diffs < tDiff])
equalToMerge = numRangesToMerge - lowerDiffs
equalCounter = 0
omranges = []
mrangemin = None
for rangeIndex in range(numRanges):
if mrangemin == None:
mrangemin = mranges[rangeIndex][0]
if rangeIndex < numRanges-1:
if diffs[rangeIndex] > tDiff:
omranges.append((mrangemin, mranges[rangeIndex][1]))
mrangemin = None
elif diffs[rangeIndex] == tDiff:
equalCounter += 1
if equalCounter > equalToMerge:
omranges.append((mrangemin, mranges[rangeIndex][1]))
mrangemin = None
else:
omranges.append((mrangemin, mranges[rangeIndex][1]))
return omranges
def getAllRanges(self, codes):
mranges = []
for code in codes:
mranges.append(code[-1])
return mranges
def getDiffRanges(self, codes):
imranges = []
omranges = []
for code in codes:
if code[2]:
imranges.append(code[-1])
else:
omranges.append(code[-1])
return (imranges, omranges)
def getCoords(self, mortonRange):
(minr, maxr) = mortonRange
minx = morton.DecodeMorton2DX(minr)
miny = morton.DecodeMorton2DY(minr)
maxx = morton.DecodeMorton2DX(maxr)
maxy = morton.DecodeMorton2DY(maxr)
return (minx,miny,maxx+1,maxy+1)
def getMortonRanges(self, wkt, distinctIn = False, numLevels = None, maxRanges = None):
codes = self.overlapCodes(loads(wkt), numLevels)
if distinctIn:
(imranges, xmranges) = self.getDiffRanges(codes)
mimranges = self.mergeConsecutiveRanges(imranges)
mxmranges = self.mergeConsecutiveRanges(xmranges)
logging.debug(' '.join((' #mranges:' , str(len(codes)), ' #imranges:' , str(len(imranges)), ' #xmranges:' , str(len(xmranges)), ' #mimranges:' , str(len(mimranges)), ' #mxmranges:' , str(len(mxmranges)))))
return (mimranges, mxmranges)
else:
mmranges = self.mergeConsecutiveRanges(self.getAllRanges(codes))
if maxRanges != None:
maxmranges = self.mergeRanges(mmranges, maxRanges)
logging.debug('#mranges:' + str(len(codes)) + ' #mmranges:' + str(len(mmranges)) + ' #maxmranges:' + str(len(maxmranges)))
return ([], maxmranges)
else:
logging.debug('#mranges:' + str(len(codes)) + ' #mmranges:' + str(len(mmranges)))
return ([], mmranges)
def mortonToQuadCell(self, morton, level):
return (((1 << 2*level) - 1) << ((2 * MAX_NUMBITS) - 2*level)) & morton
| [
"shapely.wkt.loads",
"de9im.patterns.contains.matches",
"morton.DecodeMorton2DY",
"de9im.patterns.intersects.matches",
"numpy.array",
"morton.DecodeMorton2DX",
"math.log",
"de9im.patterns.pattern",
"shapely.geometry.box"
] | [((500, 520), 'de9im.patterns.pattern', 'pattern', (['"""F********"""'], {}), "('F********')\n", (507, 520), False, 'from de9im.patterns import intersects, contains, pattern\n'), ((1764, 1776), 'shapely.geometry.box', 'box', (['*domain'], {}), '(*domain)\n', (1767, 1776), False, 'from shapely.geometry import box\n'), ((7900, 7928), 'morton.DecodeMorton2DX', 'morton.DecodeMorton2DX', (['minr'], {}), '(minr)\n', (7922, 7928), False, 'import morton\n'), ((7944, 7972), 'morton.DecodeMorton2DY', 'morton.DecodeMorton2DY', (['minr'], {}), '(minr)\n', (7966, 7972), False, 'import morton\n'), ((7988, 8016), 'morton.DecodeMorton2DX', 'morton.DecodeMorton2DX', (['maxr'], {}), '(maxr)\n', (8010, 8016), False, 'import morton\n'), ((8032, 8060), 'morton.DecodeMorton2DY', 'morton.DecodeMorton2DY', (['maxr'], {}), '(maxr)\n', (8054, 8060), False, 'import morton\n'), ((3044, 3072), 'de9im.patterns.intersects.matches', 'intersects.matches', (['relation'], {}), '(relation)\n', (3062, 3072), False, 'from de9im.patterns import intersects, contains, pattern\n'), ((3128, 3154), 'de9im.patterns.contains.matches', 'contains.matches', (['relation'], {}), '(relation)\n', (3144, 3154), False, 'from de9im.patterns import intersects, contains, pattern\n'), ((8233, 8243), 'shapely.wkt.loads', 'loads', (['wkt'], {}), '(wkt)\n', (8238, 8243), False, 'from shapely.wkt import loads\n'), ((4105, 4115), 'shapely.geometry.box', 'box', (['*quad'], {}), '(*quad)\n', (4108, 4115), False, 'from shapely.geometry import box\n'), ((5441, 5461), 'shapely.geometry.box', 'box', (['*self.startQuad'], {}), '(*self.startQuad)\n', (5444, 5461), False, 'from shapely.geometry import box\n'), ((6388, 6408), 'numpy.array', 'numpy.array', (['mranges'], {}), '(mranges)\n', (6399, 6408), False, 'import shapely, logging, numpy, math\n'), ((5301, 5350), 'math.log', 'math.log', (['(self.domainRegion.area / region.area)', '(2)'], {}), '(self.domainRegion.area / region.area, 2)\n', (5309, 5350), False, 'import shapely, logging, numpy, math\n')] |
from rl.memory.simple import SimpleMemory
from rl.hparams.utils import HParams
import numpy as np
import random
import tensorflow as tf
class SimpleMemoryTest(tf.test.TestCase):
def setUp(self):
self._memory = self.get_empty_memory()
def get_empty_memory(self):
hparams = HParams()
hparams.memory_size = 100
return SimpleMemory(hparams, 0)
def test_samples_from_empty_memory(self):
memory = self.get_empty_memory()
with self.assertRaises(AssertionError,
msg="Cannot sample from empty memory."):
memory.sample(2)
def test_get_sequence_from_added_transition(self):
memory = self.get_empty_memory()
for _ in range(2):
observation = {
'last_state': np.zeros(2),
'action': np.zeros(2),
'reward': 0,
'discount': 0,
'done': False,
'state': np.zeros(2)
}
memory.add_sample(**observation)
rewards = memory.get_sequence('reward')
self.assertAllEqual(rewards, np.zeros(2))
def test_sample_from_added_transition(self):
memory = self.get_empty_memory()
for _ in range(2):
observation = {
'last_state': np.zeros(2),
'action': np.zeros(2),
'reward': 0,
'discount': 0,
'done': False,
'state': np.zeros(2)
}
memory.add_sample(**observation)
sample = memory.sample(2)
self.assertAllEqual(sample.reward, np.zeros(2))
def test_shuffled_batches_from_added_transition(self):
memory = self.get_empty_memory()
for _ in range(4):
observation = {
'last_state': np.zeros(2),
'action': np.zeros(2),
'reward': 0,
'discount': 0,
'done': False,
'state': np.zeros(2)
}
memory.add_sample(**observation)
for batch in memory.shuffled_batches(2):
self.assertAllEqual(batch.reward, np.zeros(2))
def test_set_sequence_on_added_transitions(self):
memory = self.get_empty_memory()
for _ in range(4):
observation = {
'last_state': np.zeros(2),
'action': np.zeros(2),
'reward': 0,
'discount': 0,
'done': False,
'state': np.zeros(2)
}
memory.add_sample(**observation)
memory.set_sequence('reward', np.ones(4))
rewards = memory.get_sequence('reward')
self.assertAllEqual(rewards, np.ones(4))
def test_unique_occurence_of_transitions_in_shuffled_batches(self):
memory = self.get_empty_memory()
for i in range(4):
observation = {
'last_state': np.zeros(2),
'action': np.zeros(2),
'reward': 0,
'discount': 0.99 * i,
'done': False,
'state': np.zeros(2)
}
memory.add_sample(**observation)
unique_transitions = []
for batch in memory.shuffled_batches(1):
self.assertTrue(batch.discount not in unique_transitions)
unique_transitions.append(batch.discount)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.test.main",
"rl.memory.simple.SimpleMemory",
"numpy.zeros",
"numpy.ones",
"rl.hparams.utils.HParams"
] | [((3008, 3022), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3020, 3022), True, 'import tensorflow as tf\n'), ((289, 298), 'rl.hparams.utils.HParams', 'HParams', ([], {}), '()\n', (296, 298), False, 'from rl.hparams.utils import HParams\n'), ((340, 364), 'rl.memory.simple.SimpleMemory', 'SimpleMemory', (['hparams', '(0)'], {}), '(hparams, 0)\n', (352, 364), False, 'from rl.memory.simple import SimpleMemory\n'), ((1015, 1026), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1023, 1026), True, 'import numpy as np\n'), ((1448, 1459), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1456, 1459), True, 'import numpy as np\n'), ((2310, 2320), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2317, 2320), True, 'import numpy as np\n'), ((2399, 2409), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2406, 2409), True, 'import numpy as np\n'), ((741, 752), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (749, 752), True, 'import numpy as np\n'), ((774, 785), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (782, 785), True, 'import numpy as np\n'), ((879, 890), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (887, 890), True, 'import numpy as np\n'), ((1182, 1193), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1190, 1193), True, 'import numpy as np\n'), ((1215, 1226), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1223, 1226), True, 'import numpy as np\n'), ((1320, 1331), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1328, 1331), True, 'import numpy as np\n'), ((1625, 1636), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1633, 1636), True, 'import numpy as np\n'), ((1658, 1669), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1666, 1669), True, 'import numpy as np\n'), ((1763, 1774), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1771, 1774), True, 'import numpy as np\n'), ((1907, 1918), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1915, 1918), True, 'import numpy as np\n'), ((2079, 2090), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2087, 2090), True, 'import numpy as np\n'), ((2112, 2123), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2120, 2123), True, 'import numpy as np\n'), ((2217, 2228), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2225, 2228), True, 'import numpy as np\n'), ((2588, 2599), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2596, 2599), True, 'import numpy as np\n'), ((2621, 2632), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2629, 2632), True, 'import numpy as np\n'), ((2733, 2744), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2741, 2744), True, 'import numpy as np\n')] |
"""Fatigue-life distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class fatigue_life(Dist):
"""Fatigue-life distribution."""
def __init__(self, c=0):
Dist.__init__(self, c=c)
def _pdf(self, x, c):
output = (x+1)/(2*c*numpy.sqrt(2*numpy.pi*x**3))
output *= numpy.exp(-(x-1)**2/(2.0*x*c**2))
output[(x == 0) & numpy.isnan(output)] = 0
return output
def _cdf(self, x, c):
return special.ndtr(1.0/c*(numpy.sqrt(x)-1.0/numpy.sqrt(x)))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + numpy.sqrt(tmp**2 + 4))**2
def _lower(self, c):
return 0.
class FatigueLife(Add):
"""
Fatigue-Life or Birmbaum-Sanders distribution
Args:
shape (float, Dist):
Shape parameter
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.FatigueLife(2, 2, 1)
>>> distribution
FatigueLife(scale=2, shape=2, shift=1)
>>> q = numpy.linspace(0,1,6)[1:-1]
>>> distribution.inv(q).round(4)
array([ 1.4332, 2.2113, 4.3021, 10.2334])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0.2, 0.4, 0.6, 0.8])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0.4223, 0.1645, 0.0603, 0.0198])
>>> distribution.sample(4).round(4)
array([ 5.3231, 1.2621, 26.5603, 2.8292])
>>> distribution.mom(1).round(4)
7.0
"""
def __init__(self, shape=1, scale=1, shift=0):
self._repr = {"shape": shape, "scale": scale, "shift": shift}
Add.__init__(self, left=fatigue_life(shape)*scale, right=shift)
| [
"scipy.special.ndtri",
"numpy.exp",
"numpy.isnan",
"numpy.sqrt"
] | [((369, 414), 'numpy.exp', 'numpy.exp', (['(-(x - 1) ** 2 / (2.0 * x * c ** 2))'], {}), '(-(x - 1) ** 2 / (2.0 * x * c ** 2))\n', (378, 414), False, 'import numpy\n'), ((615, 631), 'scipy.special.ndtri', 'special.ndtri', (['q'], {}), '(q)\n', (628, 631), False, 'from scipy import special\n'), ((322, 355), 'numpy.sqrt', 'numpy.sqrt', (['(2 * numpy.pi * x ** 3)'], {}), '(2 * numpy.pi * x ** 3)\n', (332, 355), False, 'import numpy\n'), ((429, 448), 'numpy.isnan', 'numpy.isnan', (['output'], {}), '(output)\n', (440, 448), False, 'import numpy\n'), ((538, 551), 'numpy.sqrt', 'numpy.sqrt', (['x'], {}), '(x)\n', (548, 551), False, 'import numpy\n'), ((659, 683), 'numpy.sqrt', 'numpy.sqrt', (['(tmp ** 2 + 4)'], {}), '(tmp ** 2 + 4)\n', (669, 683), False, 'import numpy\n'), ((556, 569), 'numpy.sqrt', 'numpy.sqrt', (['x'], {}), '(x)\n', (566, 569), False, 'import numpy\n')] |
#!/usr/bin/env python
import h5py
import numpy as np
import struct
import sys
import argparse
import json
import itertools
class CompressActor(object):
RECORD_FMT = '>f'
def __init__(self, fname, ofile, weight_file):
self.fname = fname
self.ofile = ofile
self.weight_file = weight_file
def act(self):
raise NotImplementedError()
class Compressor(CompressActor):
def act(self):
self.out_floats = []
out_struct = {
'groups' : []
}
with h5py.File(self.fname, 'r') as dataset:
for key in dataset.keys():
subgroup = dataset[key]
outgroup = {
'datasets' : []
}
assert type(subgroup) == h5py.Group
for gkey in subgroup.keys():
datablock = subgroup[gkey]
assert type(datablock) == h5py.Dataset
outgroup['datasets'].append(
[gkey, self.output_datablock(datablock)])
outgroup['attr'] = list(map(lambda t: (t[0], int(t[1])),
subgroup.attrs.items()))
out_struct['groups'].append([key, outgroup])
out_struct['attr'] = list(map(lambda t: (t[0], int(t[1])),
dataset.attrs.items()))
self.output_head(out_struct)
def output_datablock(self, datablock):
self.out_floats += datablock[:].flatten().tolist()
return list(datablock.shape)
def write_weight(self, weight, ofile):
ofile.write(struct.pack(self.RECORD_FMT, weight))
def output_head(self, out_struct):
with open(self.ofile, 'w') as ofile:
json.dump(out_struct, ofile)
with open(self.weight_file, 'wb') as ofile:
for item in self.out_floats:
self.write_weight(item, ofile)
class Decompressor(CompressActor):
def act(self):
with open(self.fname, 'r') as ifile:
item = json.load(ifile)
with open(self.weight_file, 'rb') as weightfile:
chunksize = struct.calcsize(self.RECORD_FMT)
self.weights = []
chunk = weightfile.read(chunksize)
while chunk != b'':
self.weights.append(self.read_weight(chunk))
chunk = weightfile.read(chunksize)
self.output(item)
def read_weight(self, chunk):
return struct.unpack(self.RECORD_FMT, chunk)[0]
def calc_num_elems(self, dimensions):
num_elems = 1
for dimension in dimensions:
num_elems *= dimension
return num_elems
def output(self, item):
with h5py.File(self.ofile, 'w') as ofile:
ctr = 0
for agroup in item['groups']:
key, groups = agroup
grp = ofile.create_group(key)
for attr in groups['attr']:
grp.attrs[attr[0]] = attr[1]
for adataset in groups['datasets']:
name, shape = adataset
num_elems = self.calc_num_elems(shape)
data = np.reshape(self.weights[ctr:num_elems + ctr], shape)
grp.create_dataset(name, data=data, dtype=np.float32)
ctr += num_elems
for attr in item['attr']:
ofile.attrs[attr[0]] = attr[1]
def main(args):
assert args.compress or args.decompress, (
'Must provide compress or decompress argument')
(Compressor if args.compress else Decompressor)(
args.ifile, args.ofile, args.weight_file).act()
if __name__=='__main__':
parser = argparse.ArgumentParser(
description='Compress and decompress model. ')
parser.add_argument('ifile', help='Input file. ')
parser.add_argument('ofile', help='Output file. ')
parser.add_argument('weight_file', help='File for weights. ')
parser.add_argument('-c', '--compress', action='store_true')
parser.add_argument('-d', '--decompress', action='store_true')
main(parser.parse_args())
| [
"json.dump",
"h5py.File",
"json.load",
"argparse.ArgumentParser",
"struct.unpack",
"struct.pack",
"struct.calcsize",
"numpy.reshape"
] | [((3701, 3771), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compress and decompress model. """'}), "(description='Compress and decompress model. ')\n", (3724, 3771), False, 'import argparse\n'), ((532, 558), 'h5py.File', 'h5py.File', (['self.fname', '"""r"""'], {}), "(self.fname, 'r')\n", (541, 558), False, 'import h5py\n'), ((1632, 1668), 'struct.pack', 'struct.pack', (['self.RECORD_FMT', 'weight'], {}), '(self.RECORD_FMT, weight)\n', (1643, 1668), False, 'import struct\n'), ((1767, 1795), 'json.dump', 'json.dump', (['out_struct', 'ofile'], {}), '(out_struct, ofile)\n', (1776, 1795), False, 'import json\n'), ((2055, 2071), 'json.load', 'json.load', (['ifile'], {}), '(ifile)\n', (2064, 2071), False, 'import json\n'), ((2153, 2185), 'struct.calcsize', 'struct.calcsize', (['self.RECORD_FMT'], {}), '(self.RECORD_FMT)\n', (2168, 2185), False, 'import struct\n'), ((2483, 2520), 'struct.unpack', 'struct.unpack', (['self.RECORD_FMT', 'chunk'], {}), '(self.RECORD_FMT, chunk)\n', (2496, 2520), False, 'import struct\n'), ((2728, 2754), 'h5py.File', 'h5py.File', (['self.ofile', '"""w"""'], {}), "(self.ofile, 'w')\n", (2737, 2754), False, 'import h5py\n'), ((3184, 3236), 'numpy.reshape', 'np.reshape', (['self.weights[ctr:num_elems + ctr]', 'shape'], {}), '(self.weights[ctr:num_elems + ctr], shape)\n', (3194, 3236), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*
import numpy as np
import geo
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from aa_encoder import AminoacidEncoder
import pathlib
import os
def MapDis(coo):
return squareform(pdist(coo, metric='euclidean')).astype('float32')
class Atom(object):
def __init__(self, aminoacid, index, x, y, z):
self.aa = aminoacid
self.index = index
self.x = x
self.y = y
self.z = z
class Arraylize(object):
def __init__(self, resolution, size, atoms, indexs, aa_encoder):
self.atoms = atoms
self.pad = 4
self.ar = size + self.pad
self.idx_ary = indexs
self.scale = size * 2 / resolution
self.res = resolution + int(2*self.pad/self.scale)
self.dim = 5
self.array = np.zeros(
[self.res, self.res, self.dim], dtype='float32', order='C')
self.aa_encoder = aa_encoder
self.rec = {}
self.site = {}
self.run()
def pixel_center_dis(self, dot):
dot.dis_x = dot.x / self.scale % 1 - 0.5
dot.dis_y = dot.y / self.scale % 1 - 0.5
dot.dis_sqrt = dot.dis_x ** 2 + dot.dis_y ** 2
def closer_pixel(self, dot):
x_sign = int(np.sign(dot.dis_x))
y_sign = int(np.sign(dot.dis_y))
if abs(dot.dis_x) < abs(dot.dis_y):
neighbors = [(0, y_sign), (x_sign, 0), (x_sign, y_sign), (-x_sign, 0),
(-x_sign, y_sign), (0, -y_sign), (x_sign, -y_sign), (-x_sign, -y_sign)]
else:
neighbors = [(x_sign, 0), (0, y_sign), (x_sign, y_sign), (0, -y_sign),
(x_sign, -y_sign), (-x_sign, 0), (-x_sign, y_sign), (-x_sign, -y_sign)]
for (i, j) in neighbors:
if -1 < dot.x_ary + i < self.res and -1 < dot.y_ary + j < self.res:
if self.array[dot.x_ary + i, dot.y_ary + j, -1] == 0:
dot.x_ary = dot.x_ary + i
dot.y_ary = dot.y_ary + j
self.draw_atom(dot)
break
def closer_dot(self, dot1, dot2):
self.pixel_center_dis(dot1)
self.pixel_center_dis(dot2)
if dot1.dis_sqrt > dot2.dis_sqrt:
self.closer_pixel(dot1)
self.draw_atom(dot2)
else:
self.closer_pixel(dot2)
def draw_atom(self, dot):
self.array[dot.x_ary, dot.y_ary] = [
dot.z, dot.index] + list(self.aa_encoder.encode(dot.aa)[0])
self.rec.update({(dot.x_ary, dot.y_ary): dot})
def draw_dot(self, x, y, dot, z_add, idx_add, property_add):
if self.rec.get((x, y)) is None:
property_inter = list(
self.aa_encoder.encode(dot.aa)[0] + property_add)
if self.array[x, y, 0]:
if dot.z + z_add > self.array[x, y, 0]:
self.array[x, y] = [dot.z + z_add,
dot.index + idx_add] + property_inter
else:
self.array[x, y] = [dot.z + z_add,
dot.index + idx_add] + property_inter
def dots_connection(self, dot1, dot2):
z_dis = dot2.z - dot1.z
x_sign = int(np.sign(dot2.x_ary - dot1.x_ary))
y_sign = int(np.sign(dot2.y_ary - dot1.y_ary))
x_dis = abs(dot2.x_ary - dot1.x_ary)
y_dis = abs(dot2.y_ary - dot1.y_ary)
long_step = max(x_dis, y_dis)
short_step = min(x_dis, y_dis)
property_dis = self.aa_encoder.encode(
dot2.aa)[0]-self.aa_encoder.encode(dot1.aa)[0]
if short_step == 0:
if x_dis > y_dis:
x_step, y_step = 1, 0
else:
x_step, y_step = 0, 1
else:
slope = long_step / short_step
if x_dis > y_dis:
x_step, y_step = 1, 1 / slope
else:
x_step, y_step = 1 / slope, 1
for step in range(1, long_step):
self.draw_dot(round(dot1.x_ary + step * x_step * x_sign), round(dot1.y_ary + step * y_step * y_sign),
dot1, z_dis * step / (long_step + 1), step / (long_step + 1), property_dis * step / (long_step + 1))
def draw_connection(self):
for (x, y) in self.rec.keys():
self.site.update({self.rec[(x, y)]: [x, y]})
for i in range(len(self.atoms) - 1):
if self.atoms[i + 1].index - self.atoms[i].index == 1 or self.atoms[i].index == -1:
self.dots_connection(self.atoms[i], self.atoms[i + 1])
def crop_image(self):
padding = int(self.pad / self.scale)
self.array = self.array[padding:self.res -
padding, padding:self.res-padding]
def height_limit(self):
self.array[abs(self.array[:, :, 0]) > self.ar - self.pad] = 0
def height_norm(self):
self.array[:, :, 0] /= self.ar - self.pad
def index_norm(self, norm_lenght=200):
self.array[:, :, 1] /= norm_lenght
def run(self):
for atom in self.atoms:
atom.x_ary = int(atom.x // self.scale + self.res // 2)
atom.y_ary = int(atom.y // self.scale + self.res // 2)
if self.rec.get((atom.x_ary, atom.y_ary)):
self.closer_dot(self.rec[(atom.x_ary, atom.y_ary)], atom)
else:
self.draw_atom(atom)
self.draw_connection()
self.crop_image()
self.height_limit()
self.height_norm()
self.index_norm()
class StrucRep(object):
def __init__(self, struc_format='knn', aa_format='property', index_norm=200):
self.aa_encoder = AminoacidEncoder(aa_format)
self.index_norm = index_norm
if struc_format == 'knn':
self.struc_rep = self.knn_struc_rep
elif struc_format == 'image':
self.struc_rep = self.image_struc_rep
elif struc_format == 'conmap':
self.struc_rep = self.contact_map
elif struc_format == 'dismap':
self.struc_rep = self.distance_map
def knn_struc_rep(self, ca, seq, k=15):
dismap = MapDis(ca)
nn_indexs = np.argsort(dismap, axis=1)[:, :k]
relative_indexs = nn_indexs.reshape(-1, k, 1) - \
nn_indexs[:, 0].reshape(-1, 1, 1).astype('float32')
relative_indexs /= self.index_norm
seq_embeded = self.aa_encoder.encode(seq)
knn_feature = np.array(seq_embeded)[nn_indexs]
knn_distance = [dismap[i][nn_indexs[i]] for i in range(len(nn_indexs))]
knn_distance = np.array(knn_distance).reshape(-1, k, 1)
knn_orient = []
for i in range(len(nn_indexs)):
orient = geo.norm(ca[nn_indexs[i]][1:] - ca[i])
knn_orient.append(np.concatenate([np.zeros((1, 3)), orient]))
knn_orient = np.array(knn_orient)
knn_rep = np.concatenate(
(knn_orient, knn_distance, relative_indexs, knn_feature), -1)
return knn_rep.astype('float32')
def contact_map(self, ca, seq='', cutoff=8):
dismap = MapDis(ca)
conmap = np.zeros_like(dismap)
conmap[dismap < cutoff] = 1.
return conmap.astype('float32')
def distance_map(self, ca, seq=''):
return MapDis(ca).astype('float32')
def image_struc_rep(self, ca, seq, resolution=128, box_size=8, compress=True, pad=4):
arrays = []
tgt_x = np.array([0, 1, 0])
rot_axis_y = tgt_x
tgt_y = np.array([1, 0, 0])
ori_x = geo.norm(ca[1:] - ca[:-1])
ori_y = np.concatenate((ori_x[1:], -(ori_x[np.newaxis, -2])))
centers = ca.copy()
ori_x = np.concatenate((ori_x, ori_x[np.newaxis, -1]))
ori_y = np.concatenate((ori_y, ori_y[np.newaxis, -1]))
rot_axis_x = geo.norm(np.cross(ori_x, tgt_x))
tor_x = geo.get_torsion(ori_x, tgt_x, rot_axis_x)
ori_y_rot = geo.rotation(ori_y, rot_axis_x, tor_x.reshape(-1, 1))
ori_y_proj = ori_y_rot.copy()
ori_y_proj[:, 1] = 0.
ori_y_proj = geo.norm(ori_y_proj)
l_ori_y_proj = len(ori_y_proj)
tor_y = geo.get_torsion(ori_y_proj,
np.tile(tgt_y, (l_ori_y_proj, 1)),
np.tile(rot_axis_y, (l_ori_y_proj, 1)))
for i, center in enumerate(centers):
ca_ = ca - center
global_indexs = np.where(geo.get_len(
ca_) < (box_size + pad)*np.sqrt(3))[0]
local_indexs = global_indexs - i
num_local_atoms = len(global_indexs)
ca_xrot = geo.rotation(ca_[global_indexs],
np.tile(rot_axis_x[i],
(num_local_atoms, 1)),
np.tile(tor_x[i], (num_local_atoms, 1)))
ca_rot = geo.rotation(ca_xrot,
np.tile(rot_axis_y, (num_local_atoms, 1)),
np.tile(tor_y[i], (num_local_atoms, 1)))
local_atoms = []
for j, idx in enumerate(global_indexs):
if np.max(np.abs(ca_rot[j])) < box_size + pad:
local_atoms.append(
Atom(seq[idx], local_indexs[j], ca_rot[j][0], ca_rot[j][1], ca_rot[j][2]))
arrays.append(Arraylize(resolution=resolution,
size=box_size,
atoms=local_atoms,
indexs=local_indexs,
aa_encoder=self.aa_encoder).array)
arrays = np.array(arrays, dtype='float32')
if compress:
shape = arrays.shape
keys = arrays[:, :, :, -1].nonzero()
values = arrays[keys]
com_ary = [shape, keys, values.astype('float32')]
return com_ary
else:
return arrays
if __name__ == "__main__":
dataset = 'test'
struc_format = 'dismap'
coo_path = './data/%s/coo' % dataset
seq_path = './data/%s/seq' % dataset
strucrep = StrucRep(struc_format=struc_format)
struc_path = './data/%s/%s' % (dataset, struc_format)
pathlib.Path(struc_path).mkdir(parents=True, exist_ok=True)
for filename in os.listdir(coo_path):
coo = np.load(os.path.join(coo_path, filename))
with open(os.path.join(seq_path, "%s.txt" % filename[:-4])) as f:
seq = f.read()
struc = strucrep.struc_rep(coo[1::4], seq)
np.save(os.path.join(struc_path, filename), struc)
| [
"numpy.zeros_like",
"numpy.abs",
"numpy.zeros",
"aa_encoder.AminoacidEncoder",
"numpy.cross",
"numpy.argsort",
"geo.get_torsion",
"pathlib.Path",
"geo.norm",
"numpy.array",
"numpy.tile",
"scipy.spatial.distance.pdist",
"numpy.sign",
"geo.get_len",
"os.path.join",
"os.listdir",
"numpy... | [((10520, 10540), 'os.listdir', 'os.listdir', (['coo_path'], {}), '(coo_path)\n', (10530, 10540), False, 'import os\n'), ((865, 933), 'numpy.zeros', 'np.zeros', (['[self.res, self.res, self.dim]'], {'dtype': '"""float32"""', 'order': '"""C"""'}), "([self.res, self.res, self.dim], dtype='float32', order='C')\n", (873, 933), True, 'import numpy as np\n'), ((5815, 5842), 'aa_encoder.AminoacidEncoder', 'AminoacidEncoder', (['aa_format'], {}), '(aa_format)\n', (5831, 5842), False, 'from aa_encoder import AminoacidEncoder\n'), ((7008, 7028), 'numpy.array', 'np.array', (['knn_orient'], {}), '(knn_orient)\n', (7016, 7028), True, 'import numpy as np\n'), ((7050, 7126), 'numpy.concatenate', 'np.concatenate', (['(knn_orient, knn_distance, relative_indexs, knn_feature)', '(-1)'], {}), '((knn_orient, knn_distance, relative_indexs, knn_feature), -1)\n', (7064, 7126), True, 'import numpy as np\n'), ((7282, 7303), 'numpy.zeros_like', 'np.zeros_like', (['dismap'], {}), '(dismap)\n', (7295, 7303), True, 'import numpy as np\n'), ((7602, 7621), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (7610, 7621), True, 'import numpy as np\n'), ((7667, 7686), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (7675, 7686), True, 'import numpy as np\n'), ((7704, 7730), 'geo.norm', 'geo.norm', (['(ca[1:] - ca[:-1])'], {}), '(ca[1:] - ca[:-1])\n', (7712, 7730), False, 'import geo\n'), ((7748, 7799), 'numpy.concatenate', 'np.concatenate', (['(ori_x[1:], -ori_x[np.newaxis, -2])'], {}), '((ori_x[1:], -ori_x[np.newaxis, -2]))\n', (7762, 7799), True, 'import numpy as np\n'), ((7850, 7896), 'numpy.concatenate', 'np.concatenate', (['(ori_x, ori_x[np.newaxis, -1])'], {}), '((ori_x, ori_x[np.newaxis, -1]))\n', (7864, 7896), True, 'import numpy as np\n'), ((7914, 7960), 'numpy.concatenate', 'np.concatenate', (['(ori_y, ori_y[np.newaxis, -1])'], {}), '((ori_y, ori_y[np.newaxis, -1]))\n', (7928, 7960), True, 'import numpy as np\n'), ((8035, 8076), 'geo.get_torsion', 'geo.get_torsion', (['ori_x', 'tgt_x', 'rot_axis_x'], {}), '(ori_x, tgt_x, rot_axis_x)\n', (8050, 8076), False, 'import geo\n'), ((8244, 8264), 'geo.norm', 'geo.norm', (['ori_y_proj'], {}), '(ori_y_proj)\n', (8252, 8264), False, 'import geo\n'), ((9842, 9875), 'numpy.array', 'np.array', (['arrays'], {'dtype': '"""float32"""'}), "(arrays, dtype='float32')\n", (9850, 9875), True, 'import numpy as np\n'), ((1307, 1325), 'numpy.sign', 'np.sign', (['dot.dis_x'], {}), '(dot.dis_x)\n', (1314, 1325), True, 'import numpy as np\n'), ((1349, 1367), 'numpy.sign', 'np.sign', (['dot.dis_y'], {}), '(dot.dis_y)\n', (1356, 1367), True, 'import numpy as np\n'), ((3319, 3351), 'numpy.sign', 'np.sign', (['(dot2.x_ary - dot1.x_ary)'], {}), '(dot2.x_ary - dot1.x_ary)\n', (3326, 3351), True, 'import numpy as np\n'), ((3375, 3407), 'numpy.sign', 'np.sign', (['(dot2.y_ary - dot1.y_ary)'], {}), '(dot2.y_ary - dot1.y_ary)\n', (3382, 3407), True, 'import numpy as np\n'), ((6327, 6353), 'numpy.argsort', 'np.argsort', (['dismap'], {'axis': '(1)'}), '(dismap, axis=1)\n', (6337, 6353), True, 'import numpy as np\n'), ((6603, 6624), 'numpy.array', 'np.array', (['seq_embeded'], {}), '(seq_embeded)\n', (6611, 6624), True, 'import numpy as np\n'), ((6872, 6910), 'geo.norm', 'geo.norm', (['(ca[nn_indexs[i]][1:] - ca[i])'], {}), '(ca[nn_indexs[i]][1:] - ca[i])\n', (6880, 6910), False, 'import geo\n'), ((7992, 8014), 'numpy.cross', 'np.cross', (['ori_x', 'tgt_x'], {}), '(ori_x, tgt_x)\n', (8000, 8014), True, 'import numpy as np\n'), ((8383, 8416), 'numpy.tile', 'np.tile', (['tgt_y', '(l_ori_y_proj, 1)'], {}), '(tgt_y, (l_ori_y_proj, 1))\n', (8390, 8416), True, 'import numpy as np\n'), ((8451, 8489), 'numpy.tile', 'np.tile', (['rot_axis_y', '(l_ori_y_proj, 1)'], {}), '(rot_axis_y, (l_ori_y_proj, 1))\n', (8458, 8489), True, 'import numpy as np\n'), ((10437, 10461), 'pathlib.Path', 'pathlib.Path', (['struc_path'], {}), '(struc_path)\n', (10449, 10461), False, 'import pathlib\n'), ((10565, 10597), 'os.path.join', 'os.path.join', (['coo_path', 'filename'], {}), '(coo_path, filename)\n', (10577, 10597), False, 'import os\n'), ((10771, 10805), 'os.path.join', 'os.path.join', (['struc_path', 'filename'], {}), '(struc_path, filename)\n', (10783, 10805), False, 'import os\n'), ((257, 287), 'scipy.spatial.distance.pdist', 'pdist', (['coo'], {'metric': '"""euclidean"""'}), "(coo, metric='euclidean')\n", (262, 287), False, 'from scipy.spatial.distance import pdist\n'), ((6741, 6763), 'numpy.array', 'np.array', (['knn_distance'], {}), '(knn_distance)\n', (6749, 6763), True, 'import numpy as np\n'), ((8867, 8911), 'numpy.tile', 'np.tile', (['rot_axis_x[i]', '(num_local_atoms, 1)'], {}), '(rot_axis_x[i], (num_local_atoms, 1))\n', (8874, 8911), True, 'import numpy as np\n'), ((8993, 9032), 'numpy.tile', 'np.tile', (['tor_x[i]', '(num_local_atoms, 1)'], {}), '(tor_x[i], (num_local_atoms, 1))\n', (9000, 9032), True, 'import numpy as np\n'), ((9113, 9154), 'numpy.tile', 'np.tile', (['rot_axis_y', '(num_local_atoms, 1)'], {}), '(rot_axis_y, (num_local_atoms, 1))\n', (9120, 9154), True, 'import numpy as np\n'), ((9191, 9230), 'numpy.tile', 'np.tile', (['tor_y[i]', '(num_local_atoms, 1)'], {}), '(tor_y[i], (num_local_atoms, 1))\n', (9198, 9230), True, 'import numpy as np\n'), ((10618, 10666), 'os.path.join', 'os.path.join', (['seq_path', "('%s.txt' % filename[:-4])"], {}), "(seq_path, '%s.txt' % filename[:-4])\n", (10630, 10666), False, 'import os\n'), ((6958, 6974), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (6966, 6974), True, 'import numpy as np\n'), ((8608, 8624), 'geo.get_len', 'geo.get_len', (['ca_'], {}), '(ca_)\n', (8619, 8624), False, 'import geo\n'), ((9344, 9361), 'numpy.abs', 'np.abs', (['ca_rot[j]'], {}), '(ca_rot[j])\n', (9350, 9361), True, 'import numpy as np\n'), ((8662, 8672), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (8669, 8672), True, 'import numpy as np\n')] |
import h5py
import numpy as np
import pandas as pd
def generate_database_info():
info_path = 'CVD2014info.mat'
data = h5py.File(info_path, 'r')
video_names = data['video_names']
scores = data['scores']
# video_name = video_names[0][0]
# obj = data[video_name]
# str = ''.join(chr(i) for i in obj[:])
# print(str)
video_names_list = []
scores_list = []
for idx in range(video_names.shape[1]):
video_name = video_names[0][idx]
score = scores[0][idx]
obj = data[video_name]
# Test1/City/Test01_City_D01.avi
name = ''.join(chr(i) for i in obj[:])
# Test1/City/Test01_City_D01.avi --> Test1/City/Test01_City_D01
name = name.split('.')[0]
# Test1/City/Test01_City_D01 --> Test01_City_D01
name = name.split('/')[-1] # Test01_City_D01
video_names_list.append(name)
scores_list.append(score)
database_info = np.array([video_names_list, scores_list]).T
df_database_info = pd.DataFrame(database_info, columns=['video_name', 'MOS'])
df_database_info.to_csv('CVD2014_info.csv')
def main():
generate_database_info()
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"h5py.File",
"numpy.array"
] | [((128, 153), 'h5py.File', 'h5py.File', (['info_path', '"""r"""'], {}), "(info_path, 'r')\n", (137, 153), False, 'import h5py\n'), ((1023, 1081), 'pandas.DataFrame', 'pd.DataFrame', (['database_info'], {'columns': "['video_name', 'MOS']"}), "(database_info, columns=['video_name', 'MOS'])\n", (1035, 1081), True, 'import pandas as pd\n'), ((956, 997), 'numpy.array', 'np.array', (['[video_names_list, scores_list]'], {}), '([video_names_list, scores_list])\n', (964, 997), True, 'import numpy as np\n')] |
import os
import sys
import time
import argparse
import numpy as np
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from carle.env import CARLE
from carle.mcl import AE2D, RND2D, CornerBonus, PufferDetector, SpeedDetector
from game_of_carle.agents.toggle import Toggle
from game_of_carle.agents.grnn import ConvGRNN
from game_of_carle.agents.carla import CARLA
from game_of_carle.agents.harli import HARLI
from game_of_carle.algos.cma import CMAPopulation
WRAPPER_DICT = { \
"cornerbonus": CornerBonus, \
"ae2d": AE2D, \
"rnd2d": RND2D, \
"pufferdetector": PufferDetector, \
"speeddetector": SpeedDetector \
}
AGENT_DICT = { \
"harli": HARLI, \
"carla": CARLA, \
"convgrnn": ConvGRNN, \
"toggle": Toggle \
}
def train(args):
# parse arguments
episodes = args.episodes
max_generations = args.max_generations
max_steps = args.max_steps
population_size = args.population_size
my_instances = args.vectorization
seeds = args.seeds
device = args.device
env_dimension = args.env_dimension
agents = [AGENT_DICT[key.lower()] for key in args.agents]
wrappers = [WRAPPER_DICT[key.lower()] for key in args.wrappers]
training_rules = args.training_rules
validation_rules = args.validation_rules
testing_rules = args.testing_rules
# define environment and exploration bonus wrappers
env = CARLE(instances = my_instances, device=device, \
height=env_dimension, width=env_dimension)
my_device = env.my_device
for wrapper in wrappers:
env = wrapper(env)
for my_seed in seeds:
np.random.seed(my_seed)
torch.manual_seed(my_seed)
for agent_fn in agents:
time_stamp = int(time.time())
agent = CMAPopulation(agent_fn, device=device, \
episodes=episodes, population_size=population_size)
tag = args.tag + str(int(time.time()))
experiment_name = agent.population[0].__class__.__name__ + \
f"_{my_seed}_{tag}"
my_file_path = os.path.abspath(os.path.dirname(__file__))
my_directory_path = os.path.join(my_file_path, "../policies/")
my_save_path = os.path.join(my_directory_path, experiment_name)
my_meta_path = os.path.join(\
os.path.sep.join(my_file_path.split(os.path.sep)[:-1]), \
"experiments", f"args_{experiment_name}")
with open(my_meta_path, "w") as f:
my_module = sys.argv[0].split(os.path.sep)[-1]
f.write(my_module)
for my_arg in sys.argv[1:]:
f.write(f" {my_arg} ")
agent.save_path = my_save_path
agent.population_size = population_size
# with a vectorization of 4, don't need to repeat "episodes"
agent.max_episodes = 4
writer_path = os.path.sep.join(my_file_path.split(os.path.sep)[:-1])
writer_path = os.path.join(writer_path, f"experiments/logs/{experiment_name}")
writer = SummaryWriter(writer_path)
print(f"tensorboard logging to {writer_path}")
results = {"generation": [],\
"fitness": [],\
"fitness_max": [],\
"fitness_min": [],\
"fitness_mean": [],\
"fitness std. dev.": []}
my_rules = np.random.choice(training_rules, \
p=[1/len(training_rules)]*len(training_rules))
env.rules_from_string(my_rules)
for generation in range(max_generations):
t0 = time.time()
obs = env.reset()
rewards = torch.Tensor([]).to(my_device)
reward_sum = []
number_steps = 0
temp_generation = 0.0 + agent.generation
while agent.generation <= temp_generation:
#len(agent.fitness) <= (agent.population_size * agent.max_episodes):
if number_steps >= max_steps:
if (agent.meta_index % agent.population_size) == 0:
my_rules = np.random.choice(training_rules, \
p=[1/len(training_rules)]*len(training_rules))
env.rules_from_string(my_rules)
number_steps = 0
reward_sum.append(np.mean(rewards.detach().cpu().numpy()))
agent.step(reward_sum[-1])
obs = env.reset()
rewards = torch.Tensor([]).to(my_device)
action = agent(obs)
obs, reward, done, info = env.step(action)
rewards = torch.cat([rewards, reward])
number_steps += 1
t1 = time.time()
results["generation"].append(generation)
results["fitness_max"].append(np.max(reward_sum))
results["fitness_min"].append(np.min(reward_sum))
results["fitness_mean"].append(np.mean(reward_sum))
results["fitness std. dev."].append(np.std(reward_sum))
# training summary writer adds
max_fit = np.max(reward_sum)
mean_fit = np.mean(reward_sum)
min_fit = np.min(reward_sum)
std_dev_fit = np.std(reward_sum)
writer.add_scalar("max_fit/train", max_fit, generation)
writer.add_scalar("mean_fit/train", mean_fit, generation)
writer.add_scalar("min_fit/train", min_fit, generation)
writer.add_scalar("std_dev_fit/train", std_dev_fit, generation)
print(f"generation {generation}, mean, max, min, std. dev. fitness: "\
f"{mean_fit}, {max_fit}, "\
f"{min_fit}, {std_dev_fit}")
steps_per_second = (env.inner_env.instances*max_steps*agent.population_size)/(t1-t0)
print(f"steps per second = {steps_per_second} s per generation: {t1-t0}")
if generation % 16 == 0:
rewards = torch.Tensor([]).to(my_device)
reward_sum = []
number_steps = 0
agent_count = 0
for my_rules in validation_rules:
agent.fitness = []
env.rules_from_string(my_rules)
while agent_count < agent.population_size:
if number_steps >= max_steps:
agent_count += 1
agent.meta_index = agent_count
number_steps = 0
reward_sum.append(np.sum(rewards.detach().cpu().numpy()))
obs = env.reset()
rewards = torch.Tensor([]).to(my_device)
action = agent(obs)
obs, reward, done, info = env.step(action)
rewards = torch.cat([rewards, reward])
number_steps += 1
# validation summary writer adds
max_fit = np.max(reward_sum)
mean_fit = np.mean(reward_sum)
min_fit = np.min(reward_sum)
std_dev_fit = np.std(reward_sum)
writer.add_scalar("max_fit/val", max_fit, generation)
writer.add_scalar("mean_fit/val", mean_fit, generation)
writer.add_scalar("min_fit/val", min_fit, generation)
writer.add_scalar("std_dev_fit/val", std_dev_fit, generation)
print(f"{generation} validation, mean, max, min, std. dev. fitness: "\
f"{mean_fit}, {max_fit}, {min_fit}, {std_dev_fit}")
np.save(f"{writer_path}_results{tag}.npy", results, allow_pickle=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-mg", "--max_generations", type=int, default=10)
parser.add_argument("-ms", "--max_steps", type=int, default=512)
parser.add_argument("-p", "--population_size", type=int, default=16)
parser.add_argument("-v", "--vectorization", type=int, default=1)
parser.add_argument("-s", "--seeds", type=int, nargs="+", default=[13])
parser.add_argument("-e", "--episodes", type=int, default=[1])
parser.add_argument("-d", "--device", type=str, default="cuda:1")
parser.add_argument("-dim", "--env_dimension", type=int, default=256)
parser.add_argument("-a", "--agents", type=str, nargs="+", \
default=["Toggle", "HARLI"], \
help="agent(s) to train in experiment, can be several")
parser.add_argument("-w", "--wrappers", type=str, nargs="+", \
default=["CornerBonus"], help="reward wrappers to train with")
parser.add_argument("-tr", "--training_rules", type=str, nargs="+", \
default=["B3/S23"], \
help="B/S string(s) defining CA rules to use during training")
parser.add_argument("-vr", "--validation_rules", type=str, nargs="+", \
default=["B3/S23"], \
help="B/S string(s) defining CA rules to use during validation")
parser.add_argument("-xr", "--testing_rules", type=str, nargs="+", \
default=["B3/S23"], \
help="B/S string(s) defining CA rules to use during testing")
parser.add_argument("-tag", "--tag", type=str, default="default_tag", \
help="a tag to identify the experiment")
args = parser.parse_args()
train(args)
| [
"numpy.save",
"numpy.random.seed",
"argparse.ArgumentParser",
"game_of_carle.algos.cma.CMAPopulation",
"numpy.std",
"torch.manual_seed",
"os.path.dirname",
"torch.cat",
"time.time",
"numpy.max",
"numpy.mean",
"numpy.min",
"torch.Tensor",
"torch.utils.tensorboard.SummaryWriter",
"carle.en... | [((1482, 1574), 'carle.env.CARLE', 'CARLE', ([], {'instances': 'my_instances', 'device': 'device', 'height': 'env_dimension', 'width': 'env_dimension'}), '(instances=my_instances, device=device, height=env_dimension, width=\n env_dimension)\n', (1487, 1574), False, 'from carle.env import CARLE\n'), ((8336, 8361), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8359, 8361), False, 'import argparse\n'), ((1710, 1733), 'numpy.random.seed', 'np.random.seed', (['my_seed'], {}), '(my_seed)\n', (1724, 1733), True, 'import numpy as np\n'), ((1742, 1768), 'torch.manual_seed', 'torch.manual_seed', (['my_seed'], {}), '(my_seed)\n', (1759, 1768), False, 'import torch\n'), ((1866, 1961), 'game_of_carle.algos.cma.CMAPopulation', 'CMAPopulation', (['agent_fn'], {'device': 'device', 'episodes': 'episodes', 'population_size': 'population_size'}), '(agent_fn, device=device, episodes=episodes, population_size=\n population_size)\n', (1879, 1961), False, 'from game_of_carle.algos.cma import CMAPopulation\n'), ((2246, 2288), 'os.path.join', 'os.path.join', (['my_file_path', '"""../policies/"""'], {}), "(my_file_path, '../policies/')\n", (2258, 2288), False, 'import os\n'), ((2316, 2364), 'os.path.join', 'os.path.join', (['my_directory_path', 'experiment_name'], {}), '(my_directory_path, experiment_name)\n', (2328, 2364), False, 'import os\n'), ((3094, 3158), 'os.path.join', 'os.path.join', (['writer_path', 'f"""experiments/logs/{experiment_name}"""'], {}), "(writer_path, f'experiments/logs/{experiment_name}')\n", (3106, 3158), False, 'import os\n'), ((3194, 3220), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['writer_path'], {}), '(writer_path)\n', (3207, 3220), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1832, 1843), 'time.time', 'time.time', ([], {}), '()\n', (1841, 1843), False, 'import time\n'), ((2187, 2212), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2202, 2212), False, 'import os\n'), ((3773, 3784), 'time.time', 'time.time', ([], {}), '()\n', (3782, 3784), False, 'import time\n'), ((5064, 5075), 'time.time', 'time.time', ([], {}), '()\n', (5073, 5075), False, 'import time\n'), ((5481, 5499), 'numpy.max', 'np.max', (['reward_sum'], {}), '(reward_sum)\n', (5487, 5499), True, 'import numpy as np\n'), ((5527, 5546), 'numpy.mean', 'np.mean', (['reward_sum'], {}), '(reward_sum)\n', (5534, 5546), True, 'import numpy as np\n'), ((5573, 5591), 'numpy.min', 'np.min', (['reward_sum'], {}), '(reward_sum)\n', (5579, 5591), True, 'import numpy as np\n'), ((5622, 5640), 'numpy.std', 'np.std', (['reward_sum'], {}), '(reward_sum)\n', (5628, 5640), True, 'import numpy as np\n'), ((4975, 5003), 'torch.cat', 'torch.cat', (['[rewards, reward]'], {}), '([rewards, reward])\n', (4984, 5003), False, 'import torch\n'), ((5180, 5198), 'numpy.max', 'np.max', (['reward_sum'], {}), '(reward_sum)\n', (5186, 5198), True, 'import numpy as np\n'), ((5246, 5264), 'numpy.min', 'np.min', (['reward_sum'], {}), '(reward_sum)\n', (5252, 5264), True, 'import numpy as np\n'), ((5313, 5332), 'numpy.mean', 'np.mean', (['reward_sum'], {}), '(reward_sum)\n', (5320, 5332), True, 'import numpy as np\n'), ((5386, 5404), 'numpy.std', 'np.std', (['reward_sum'], {}), '(reward_sum)\n', (5392, 5404), True, 'import numpy as np\n'), ((7551, 7569), 'numpy.max', 'np.max', (['reward_sum'], {}), '(reward_sum)\n', (7557, 7569), True, 'import numpy as np\n'), ((7601, 7620), 'numpy.mean', 'np.mean', (['reward_sum'], {}), '(reward_sum)\n', (7608, 7620), True, 'import numpy as np\n'), ((7651, 7669), 'numpy.min', 'np.min', (['reward_sum'], {}), '(reward_sum)\n', (7657, 7669), True, 'import numpy as np\n'), ((7704, 7722), 'numpy.std', 'np.std', (['reward_sum'], {}), '(reward_sum)\n', (7710, 7722), True, 'import numpy as np\n'), ((8222, 8292), 'numpy.save', 'np.save', (['f"""{writer_path}_results{tag}.npy"""', 'results'], {'allow_pickle': '(True)'}), "(f'{writer_path}_results{tag}.npy', results, allow_pickle=True)\n", (8229, 8292), True, 'import numpy as np\n'), ((2017, 2028), 'time.time', 'time.time', ([], {}), '()\n', (2026, 2028), False, 'import time\n'), ((3847, 3863), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (3859, 3863), False, 'import torch\n'), ((6400, 6416), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (6412, 6416), False, 'import torch\n'), ((7391, 7419), 'torch.cat', 'torch.cat', (['[rewards, reward]'], {}), '([rewards, reward])\n', (7400, 7419), False, 'import torch\n'), ((4788, 4804), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (4800, 4804), False, 'import torch\n'), ((7200, 7216), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (7212, 7216), False, 'import torch\n')] |
import tensorflow as tf
import numpy as np
from numpy import genfromtxt
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import csv
drive_data_path = '../output_tests.csv'
class_data_path = '../output_classes.csv'
model_save_path = '../multilayer_perceptron.ckpt'
## ******************************
## DATA PREPROCESSING AHEAD HERE
## ******************************
data_drive = genfromtxt(drive_data_path, delimiter=',')
data_classes = genfromtxt(class_data_path, delimiter=',')
# print data_drive
# print data_classes
X_train, X_test, y_train, y_test = train_test_split(data_drive, data_classes,
test_size=0.20, random_state = 420)
num_train = X_train.shape[0]
num_test = X_test.shape[0]
# Save the testing data into files for later use...
with open("test_driver.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(X_test)
with open("test_classes.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(y_test)
## ******************************
## NETWORK PARAMETERS HERE
## ******************************
learning_rate = 0.05
training_epochs = 50
batch_size = 1
batch_total = int(num_train/batch_size)
display_step = 10
test_step = 10
## ******************************
## MACHINE LEARNING SECTION AHEAD
## ******************************
# SIMPLE NEURAL NETWORK
# This neural network will just be composed of two hidden, fully connected
# layers. Basically the implementation of a multilayer perceptron machine.
# It can learn non-linear relationships (hopefully)
dim_input = 36
dim_layer1 = 150
dim_layer2 = 250
dim_output = 4
sess = tf.InteractiveSession()
# [None] is used because we might change up this size, by using batch_size
# a batch of inputs of dim_input size
inputs = tf.placeholder(tf.float32, shape=[None, dim_input])
outputs = tf.placeholder(tf.float32, shape=[None, dim_output])
outputs_actual = tf.placeholder(tf.float32, shape=[None, dim_output])
# connect inputs to hidden units
# also, initialize weights with random numbers
weights_1 = tf.Variable(tf.truncated_normal([dim_input, dim_layer1]))
biases_1 = tf.Variable(tf.zeros([dim_layer1]))
layer_1_outputs = tf.nn.sigmoid(tf.matmul(inputs, weights_1) + biases_1)
weights_2 = tf.Variable(tf.truncated_normal([dim_layer1, dim_layer2]))
biases_2 = tf.Variable(tf.zeros([dim_layer2]))
layer_2_outputs = tf.nn.sigmoid(tf.matmul(layer_1_outputs, weights_2) + biases_2)
weights_3 = tf.Variable(tf.truncated_normal([dim_layer2, dim_output]))
biases_3 = tf.Variable(tf.zeros([dim_output]))
output = tf.nn.sigmoid(tf.matmul(layer_2_outputs, weights_3) + biases_3)
# [!] The error function chosen is good for multiclass classifications
# takes the difference of all of the classes in the output
# error_function = 0.5 * tf.reduce_sum(tf.sub(output, outputs_actual) \
# * tf.sub(output, outputs_actual))
error_function = -tf.reduce_sum( ( (outputs_actual*tf.log(output + 1e-9))
+ ((1-outputs_actual) * tf.log(1 - output + 1e-9)) ) , name='xentropy' )
train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(error_function)
sess.run(tf.initialize_all_variables())
## **********************************
## ACTUAL NETWORK DEPLOYMENT HERE
## **********************************
# Convert numpy array [x1, x2, x3...] into
# [0, 1, 0, 1, 1, ...]
def threshold_array(array, middle=0.5):
for x in xrange(array.shape[0]):
for y in xrange(array.shape[1]):
if array[x][y] < middle:
array[x][y] = 0
if array[x][y] >= middle:
array[x][y] = 2 * middle
return array
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
for i in range(batch_total):
# we want to take slice of length batch_size
I = [i]
X = X_train[I, :]
y = y_train[I, :]
# Run backprop and cost operation to get loss value
_, cost = sess.run([train_step, error_function],
feed_dict= {inputs: X,
outputs_actual: y})
# Compute average loss
avg_cost += cost / batch_total
# Display some output every epoch
if epoch % display_step == 0:
print("Epoch:", '%06d' % (epoch), "cost=", \
"{:.9f}".format(avg_cost))
if epoch % test_step == 0:
test_total = X_test.shape[0]
result = output
predictions = result.eval(feed_dict={inputs: X_test,})
predictions = threshold_array(predictions)
correct = 0
for i in xrange(test_total):
if np.array_equal(y_test[i], predictions[i]):
correct = correct + 1
perfect_accuracy = float(correct) / test_total
print("Testing accuracy: {} / {}".format(correct, test_total) \
+ " {}".format(perfect_accuracy))
print("Optimization Finished!")
# Save the model for testing later
saver = tf.train.Saver()
save_path = saver.save(sess, model_save_path)
print("Model saved for future use in: %s" % save_path)
| [
"numpy.array_equal",
"csv.writer",
"tensorflow.train.Saver",
"sklearn.model_selection.train_test_split",
"numpy.genfromtxt",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.initialize_all_variables",
"tensorflow.log",
"tensorflow.InteractiveSession",
"tensorflow.... | [((419, 461), 'numpy.genfromtxt', 'genfromtxt', (['drive_data_path'], {'delimiter': '""","""'}), "(drive_data_path, delimiter=',')\n", (429, 461), False, 'from numpy import genfromtxt\n'), ((477, 519), 'numpy.genfromtxt', 'genfromtxt', (['class_data_path'], {'delimiter': '""","""'}), "(class_data_path, delimiter=',')\n", (487, 519), False, 'from numpy import genfromtxt\n'), ((598, 673), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data_drive', 'data_classes'], {'test_size': '(0.2)', 'random_state': '(420)'}), '(data_drive, data_classes, test_size=0.2, random_state=420)\n', (614, 673), False, 'from sklearn.model_selection import train_test_split\n'), ((1667, 1690), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1688, 1690), True, 'import tensorflow as tf\n'), ((1815, 1866), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, dim_input]'}), '(tf.float32, shape=[None, dim_input])\n', (1829, 1866), True, 'import tensorflow as tf\n'), ((1877, 1929), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, dim_output]'}), '(tf.float32, shape=[None, dim_output])\n', (1891, 1929), True, 'import tensorflow as tf\n'), ((1947, 1999), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, dim_output]'}), '(tf.float32, shape=[None, dim_output])\n', (1961, 1999), True, 'import tensorflow as tf\n'), ((5022, 5038), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5036, 5038), True, 'import tensorflow as tf\n'), ((885, 898), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (895, 898), False, 'import csv\n'), ((985, 998), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (995, 998), False, 'import csv\n'), ((2108, 2152), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[dim_input, dim_layer1]'], {}), '([dim_input, dim_layer1])\n', (2127, 2152), True, 'import tensorflow as tf\n'), ((2177, 2199), 'tensorflow.zeros', 'tf.zeros', (['[dim_layer1]'], {}), '([dim_layer1])\n', (2185, 2199), True, 'import tensorflow as tf\n'), ((2299, 2344), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[dim_layer1, dim_layer2]'], {}), '([dim_layer1, dim_layer2])\n', (2318, 2344), True, 'import tensorflow as tf\n'), ((2369, 2391), 'tensorflow.zeros', 'tf.zeros', (['[dim_layer2]'], {}), '([dim_layer2])\n', (2377, 2391), True, 'import tensorflow as tf\n'), ((2502, 2547), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[dim_layer2, dim_output]'], {}), '([dim_layer2, dim_output])\n', (2521, 2547), True, 'import tensorflow as tf\n'), ((2572, 2594), 'tensorflow.zeros', 'tf.zeros', (['[dim_output]'], {}), '([dim_output])\n', (2580, 2594), True, 'import tensorflow as tf\n'), ((3180, 3209), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (3207, 3209), True, 'import tensorflow as tf\n'), ((2233, 2261), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'weights_1'], {}), '(inputs, weights_1)\n', (2242, 2261), True, 'import tensorflow as tf\n'), ((2425, 2462), 'tensorflow.matmul', 'tf.matmul', (['layer_1_outputs', 'weights_2'], {}), '(layer_1_outputs, weights_2)\n', (2434, 2462), True, 'import tensorflow as tf\n'), ((2619, 2656), 'tensorflow.matmul', 'tf.matmul', (['layer_2_outputs', 'weights_3'], {}), '(layer_2_outputs, weights_3)\n', (2628, 2656), True, 'import tensorflow as tf\n'), ((3083, 3145), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3116, 3145), True, 'import tensorflow as tf\n'), ((4666, 4707), 'numpy.array_equal', 'np.array_equal', (['y_test[i]', 'predictions[i]'], {}), '(y_test[i], predictions[i])\n', (4680, 4707), True, 'import numpy as np\n'), ((2963, 2985), 'tensorflow.log', 'tf.log', (['(output + 1e-09)'], {}), '(output + 1e-09)\n', (2969, 2985), True, 'import tensorflow as tf\n'), ((3015, 3041), 'tensorflow.log', 'tf.log', (['(1 - output + 1e-09)'], {}), '(1 - output + 1e-09)\n', (3021, 3041), True, 'import tensorflow as tf\n')] |
import numpy as np
from typing import List, Tuple, Dict
import random
from PIL import Image
from ..pad import Padding
from ..interpolate import Interpolate
class LFFDRandomSample():
"""Applies augmantation defined in the LFFD paper"""
def __init__(self, scales: List[Tuple[int, int]], target_size: Tuple[int, int] = (640, 640),
p: float = 0.5):
assert p >= 0 and p <= 1.0, "given `p` is not valid, must be between 0 and 1 but found: {}".format(p)
self.scales = scales
self.target_size = target_size # W,H
self.padding = Padding(target_size=target_size, pad_value=0)
self.interpolate = Interpolate(target_size=target_size[0])
self.p = p
def __call__(self, img: np.ndarray, targets: Dict = {}) -> Tuple[np.ndarray, Dict]:
"""Randomly samples faces using given scales. All scales represents branches and
for each branch selection probability is same.
Args:
img (np.ndarray): H,W,C
targets (Dict, Optional): contains targets
Returns:
Tuple[np.ndarray, Dict]: transformed image and transformed targets
"""
target_boxes = targets.get("target_boxes")
if (target_boxes is None) or (target_boxes.shape[0] == 0) or (random.random() > self.p):
img, targets = self.interpolate(img, targets=targets)
img, targets = self.padding(img, targets=targets)
return (img, targets)
num_faces = target_boxes.shape[0]
# select one face
selected_face_idx = random.randint(0, num_faces-1)
selected_face_scale_idx = random.choice(list(range(len(self.scales))))
min_scale,max_scale = self.scales[selected_face_scale_idx]
scale_size = random.uniform(min_scale, max_scale)
x1, y1, x2, y2 = target_boxes[selected_face_idx].astype(np.int32)
face_scale = max(y2-y1, x2-x1)
h, w = img.shape[:2]
sf = scale_size / face_scale
aboxes = target_boxes * sf
sx1,sy1,sx2,sy2 = aboxes[selected_face_idx].astype(np.int32)
offset_w_1 = (self.target_size[0] - (sx2-sx1)) // 2
offset_w_2 = offset_w_1 + (self.target_size[0] - (sx2-sx1)) % 2
offset_w_1 //= sf
offset_w_2 //= sf
offset_h_1 = (self.target_size[1] - (sy2-sy1)) // 2
offset_h_2 = offset_h_1 + (self.target_size[1] - (sy2-sy1)) % 2
offset_h_1 //= sf
offset_h_2 //= sf
offset_w_1 = int(min(x1,offset_w_1))
offset_w_2 = int(min(w-x2,offset_w_2))
offset_h_1 = int(min(y1,offset_h_1))
offset_h_2 = int(min(h-y2,offset_h_2))
# select faces that center's lie between cropped region
low_h,high_h = y1-offset_h_1,y2+offset_h_2
low_w,high_w = x1-offset_w_1,x2+offset_w_2
cboxes_x = (target_boxes[:, 0] + target_boxes[:, 2]) // 2
cboxes_y = (target_boxes[:, 1] + target_boxes[:, 3]) // 2
# TODO handle here
center_mask = np.bitwise_and(
np.bitwise_and(cboxes_x > low_w, cboxes_x < high_w),
np.bitwise_and(cboxes_y > low_h, cboxes_y < high_h))
aimg = img[y1-offset_h_1:y2+offset_h_2, x1-offset_w_1:x2+offset_w_2]
# TODO control this line
aimg = np.array(Image.fromarray(aimg).resize((int(aimg.shape[1]*sf), int(aimg.shape[0]*sf))))
aimg = aimg[:self.target_size[1], : self.target_size[0]]
target_boxes[:, [0,2]] = target_boxes[:, [0,2]] - (x1 - offset_w_1)
target_boxes[:, [1,3]] = target_boxes[:, [1,3]] - (y1 - offset_h_1)
target_boxes *= sf
x1, y1, x2, y2 = target_boxes[selected_face_idx].astype(np.int32)
cx = (x1+x2) // 2
cy = (y1+y2) // 2
img = np.zeros((self.target_size[1],self.target_size[0],3), dtype=np.uint8)
tcx = img.shape[1] // 2
tcy = img.shape[0] // 2
offset_x = int(tcx - cx)
offset_y = int(tcy - cy)
if offset_x >= 0:
# pad left
left_index_x = offset_x
right_index_x = offset_x+aimg.shape[1]
else:
# pad_right
left_index_x = 0
right_index_x = aimg.shape[1]
if offset_y >= 0:
# pad up
up_index_y = offset_y
down_index_y = offset_y+aimg.shape[0]
else:
# pad down
up_index_y = 0
down_index_y = aimg.shape[0]
target_h,target_w = img[up_index_y:down_index_y, left_index_x:right_index_x].shape[:2]
source_h,source_w = aimg.shape[:2]
up_index_y = up_index_y + target_h - source_h
down_index_y = down_index_y + target_h - source_h
left_index_x = left_index_x + target_w - source_w
right_index_x = right_index_x + target_w - source_w
img[up_index_y:down_index_y, left_index_x:right_index_x] = aimg
target_boxes[:, [0,2]] += left_index_x
target_boxes[:, [1,3]] += up_index_y
target_boxes[:, 0] = target_boxes[:, 0].clip(0, self.target_size[0])
target_boxes[:, 1] = target_boxes[:, 1].clip(0, self.target_size[1])
target_boxes[:, 2] = target_boxes[:, 2].clip(0, self.target_size[0])
target_boxes[:, 3] = target_boxes[:, 3].clip(0, self.target_size[1])
targets["target_boxes"] = target_boxes[center_mask, :]
return (img, targets)
| [
"random.randint",
"random.uniform",
"numpy.zeros",
"random.random",
"numpy.bitwise_and",
"PIL.Image.fromarray"
] | [((1563, 1595), 'random.randint', 'random.randint', (['(0)', '(num_faces - 1)'], {}), '(0, num_faces - 1)\n', (1577, 1595), False, 'import random\n'), ((1763, 1799), 'random.uniform', 'random.uniform', (['min_scale', 'max_scale'], {}), '(min_scale, max_scale)\n', (1777, 1799), False, 'import random\n'), ((3743, 3814), 'numpy.zeros', 'np.zeros', (['(self.target_size[1], self.target_size[0], 3)'], {'dtype': 'np.uint8'}), '((self.target_size[1], self.target_size[0], 3), dtype=np.uint8)\n', (3751, 3814), True, 'import numpy as np\n'), ((3022, 3073), 'numpy.bitwise_and', 'np.bitwise_and', (['(cboxes_x > low_w)', '(cboxes_x < high_w)'], {}), '(cboxes_x > low_w, cboxes_x < high_w)\n', (3036, 3073), True, 'import numpy as np\n'), ((3087, 3138), 'numpy.bitwise_and', 'np.bitwise_and', (['(cboxes_y > low_h)', '(cboxes_y < high_h)'], {}), '(cboxes_y > low_h, cboxes_y < high_h)\n', (3101, 3138), True, 'import numpy as np\n'), ((1276, 1291), 'random.random', 'random.random', ([], {}), '()\n', (1289, 1291), False, 'import random\n'), ((3276, 3297), 'PIL.Image.fromarray', 'Image.fromarray', (['aimg'], {}), '(aimg)\n', (3291, 3297), False, 'from PIL import Image\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import numpy as np
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Subset
from pytouch.datasets import DigitFolder
from pytouch.tasks import TouchDetect
_log = logging.getLogger(__name__)
class TouchDetectDataModule(LightningDataModule):
def __init__(
self,
cfg,
*args,
**kwargs,
):
super().__init__()
self.cfg = cfg
self.transform = TouchDetect.transform
def setup(self, stage=None):
train_dataset = DigitFolder(
root=self.cfg.data.path,
exclude=self.cfg.data.exclude,
baseline=None,
transform=self.transform(self.cfg.data.transform, train=True),
)
val_dataset = DigitFolder(
root=self.cfg.data.path,
exclude=self.cfg.data.exclude,
baseline=None,
transform=self.transform(self.cfg.data.transform, train=False),
)
self.dataset_len = len(train_dataset)
dataset_idx = list(range(self.dataset_len))
np.random.shuffle(dataset_idx)
split_train_val = int(
np.floor(self.cfg.training.train_val_ratio * self.dataset_len)
)
self.train_idx, self.val_idx = (
dataset_idx[:split_train_val],
dataset_idx[split_train_val:],
)
_log.info(
f"Total dataset size: {self.dataset_len}, train {len(self.train_idx)}, val {len(self.val_idx)}"
+ f" using sensors {set(train_dataset.serials)}"
)
self.train_dataset = Subset(train_dataset, self.train_idx)
self.val_dataset = Subset(val_dataset, self.val_idx)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.cfg.training.batch_size,
num_workers=self.cfg.training.n_threads,
pin_memory=self.cfg.training.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=self.cfg.training.batch_size,
num_workers=self.cfg.training.n_threads,
pin_memory=self.cfg.training.pin_memory,
shuffle=False,
)
| [
"torch.utils.data.Subset",
"torch.utils.data.DataLoader",
"numpy.floor",
"logging.getLogger",
"numpy.random.shuffle"
] | [((294, 321), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (311, 321), False, 'import logging\n'), ((1155, 1185), 'numpy.random.shuffle', 'np.random.shuffle', (['dataset_idx'], {}), '(dataset_idx)\n', (1172, 1185), True, 'import numpy as np\n'), ((1670, 1707), 'torch.utils.data.Subset', 'Subset', (['train_dataset', 'self.train_idx'], {}), '(train_dataset, self.train_idx)\n', (1676, 1707), False, 'from torch.utils.data import DataLoader, Subset\n'), ((1735, 1768), 'torch.utils.data.Subset', 'Subset', (['val_dataset', 'self.val_idx'], {}), '(val_dataset, self.val_idx)\n', (1741, 1768), False, 'from torch.utils.data import DataLoader, Subset\n'), ((1817, 1993), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_dataset'], {'batch_size': 'self.cfg.training.batch_size', 'num_workers': 'self.cfg.training.n_threads', 'pin_memory': 'self.cfg.training.pin_memory', 'shuffle': '(True)'}), '(self.train_dataset, batch_size=self.cfg.training.batch_size,\n num_workers=self.cfg.training.n_threads, pin_memory=self.cfg.training.\n pin_memory, shuffle=True)\n', (1827, 1993), False, 'from torch.utils.data import DataLoader, Subset\n'), ((2102, 2277), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_dataset'], {'batch_size': 'self.cfg.training.batch_size', 'num_workers': 'self.cfg.training.n_threads', 'pin_memory': 'self.cfg.training.pin_memory', 'shuffle': '(False)'}), '(self.val_dataset, batch_size=self.cfg.training.batch_size,\n num_workers=self.cfg.training.n_threads, pin_memory=self.cfg.training.\n pin_memory, shuffle=False)\n', (2112, 2277), False, 'from torch.utils.data import DataLoader, Subset\n'), ((1230, 1292), 'numpy.floor', 'np.floor', (['(self.cfg.training.train_val_ratio * self.dataset_len)'], {}), '(self.cfg.training.train_val_ratio * self.dataset_len)\n', (1238, 1292), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import face_recognition
from time import sleep
import pickle
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import cosine_distances
def face_detect(rgb_frame):
face_locations = face_recognition.face_locations(rgb_frame)
# no faces
if len(face_locations) <= 0:
return [[], []]
face_encodings = face_recognition.face_encodings(
rgb_frame, known_face_locations=face_locations, model="large"
)
return [face_locations, face_encodings]
def draw_face_rec(frame, face_locations, color=(0, 255, 0)):
for enc in face_locations:
(top, right, bottom, left) = enc
cv2.rectangle(frame, (left, top), (right, bottom), color)
return frame
def process_video(path, id):
cap = cv2.VideoCapture(path)
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
fps = cap.get(cv2.CAP_PROP_FPS)
size = (frame_width, frame_height)
print(size, fps)
result = cv2.VideoWriter('%s.mp4' % id,
cv2.VideoWriter_fourcc(*'mp4v'),
fps, size)
representations = []
frame_count = 0
while(True):
ret, frame = cap.read()
if ret == True:
[face_locations, face_encodings] = face_detect(frame)
if len(face_locations) > 0:
draw_face_rec(frame, face_locations)
representations.extend(face_encodings)
result.write(frame)
frame_count += 1
print(frame_count)
else:
break
with open('./%s.pk' % id, "wb") as f:
pickle.dump(representations, f)
cap.release()
result.release()
cv2.destroyAllWindows()
def load_face_data(id):
print("id", id)
representations = []
with open('./%s.pk' % id, "rb") as f:
representations = pickle.load(f)
representations = np.array(representations)
distance = cosine_distances(representations, representations)
print('CosineDistance', np.min(distance), np.max(distance))
distance = euclidean_distances(representations, representations)
print('EuclideanDistance', np.min(distance), np.max(distance))
def load_model(id):
with open('./%s.pk' % id, "rb") as f:
representations = pickle.load(f)
representations = np.array(representations)
return representations
def compare(enc, id, debug=True, representations=None):
if representations is None:
representations = load_model(id)
cos_distance = cosine_distances(representations, enc)
euc_distance = euclidean_distances(representations, enc)
if debug:
print("id", id)
print('\tCosineDistance', np.min(cos_distance), np.max(cos_distance))
print('\tEuclideanDistance', np.min(
euc_distance), np.max(euc_distance))
# https://github.com/serengil/deepface/blob/af13e4558fcc873fc60002a1512b975e97a30813/deepface/commons/distance.py#L24
return np.max(cos_distance) < 0.07
def test_img(img_path, id):
print(img_path)
frame = cv2.imread(img_path)
[location, enc] = face_detect(frame)
index = 0
for face in enc:
print("-------%d-------" % index)
index += 1
res = compare([face], id, debug=True)
print("\t", res)
# process_video("./my_face.mp4", "1")
# process_video("./test.mp4", "2")
# process_video("./test_trim.mp4", "3")
# load_face_data("1")
# load_face_data("2")
# load_face_data("3")
# --------
# Self test
# --------
# id 1
# CosineDistance 0.0 0.024210203362418747
# EuclideanDistance 0.0 0.3057140925539441
# id 2
# CosineDistance 0.0 0.07679001286737797
# EuclideanDistance 0.0 0.5337827995058241
# id 3
# CosineDistance 0.0 0.019538628657974066
# EuclideanDistance 0.0 0.2660321618995691
# test_img('/Users/admin/Downloads/IMG_3346.JPG', "3")
# test_img('/Users/admin/Downloads/brother.jpg', "3")
# test_img('/Users/admin/Documents/tmp/face-api-playground/tin_an.jpg', "3")
# test_img('/Users/admin/Documents/tmp/face-api-playground/tin_ton.jpg', "3")
# test_img('/Users/admin/Documents/tmp/magick_play/in-doc.jpg', "3")
# test_img('/Users/admin/Desktop/passport/mrz_passport_3.jpeg', "3")
# test_img('/Users/admin/Documents/tmp/python_play/face/dataset/vgg_face_dataset/faces/Recep_Tayyip_Erdogan/Recep_Tayyip_Erdogan_0030.jpg', "3")
| [
"sklearn.metrics.pairwise.cosine_distances",
"pickle.dump",
"cv2.VideoWriter_fourcc",
"face_recognition.face_encodings",
"sklearn.metrics.pairwise.euclidean_distances",
"cv2.VideoCapture",
"cv2.imread",
"numpy.min",
"pickle.load",
"numpy.array",
"numpy.max",
"cv2.rectangle",
"face_recognitio... | [((253, 295), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb_frame'], {}), '(rgb_frame)\n', (284, 295), False, 'import face_recognition\n'), ((390, 489), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb_frame'], {'known_face_locations': 'face_locations', 'model': '"""large"""'}), "(rgb_frame, known_face_locations=\n face_locations, model='large')\n", (421, 489), False, 'import face_recognition\n'), ((804, 826), 'cv2.VideoCapture', 'cv2.VideoCapture', (['path'], {}), '(path)\n', (820, 826), False, 'import cv2\n'), ((1723, 1746), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1744, 1746), False, 'import cv2\n'), ((1923, 1948), 'numpy.array', 'np.array', (['representations'], {}), '(representations)\n', (1931, 1948), True, 'import numpy as np\n'), ((1965, 2015), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['representations', 'representations'], {}), '(representations, representations)\n', (1981, 2015), False, 'from sklearn.metrics.pairwise import cosine_distances\n'), ((2096, 2149), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['representations', 'representations'], {}), '(representations, representations)\n', (2115, 2149), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((2556, 2594), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['representations', 'enc'], {}), '(representations, enc)\n', (2572, 2594), False, 'from sklearn.metrics.pairwise import cosine_distances\n'), ((2614, 2655), 'sklearn.metrics.pairwise.euclidean_distances', 'euclidean_distances', (['representations', 'enc'], {}), '(representations, enc)\n', (2633, 2655), False, 'from sklearn.metrics.pairwise import euclidean_distances\n'), ((3090, 3110), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (3100, 3110), False, 'import cv2\n'), ((687, 744), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', 'color'], {}), '(frame, (left, top), (right, bottom), color)\n', (700, 744), False, 'import cv2\n'), ((1065, 1096), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (1087, 1096), False, 'import cv2\n'), ((1647, 1678), 'pickle.dump', 'pickle.dump', (['representations', 'f'], {}), '(representations, f)\n', (1658, 1678), False, 'import pickle\n'), ((1886, 1900), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1897, 1900), False, 'import pickle\n'), ((2044, 2060), 'numpy.min', 'np.min', (['distance'], {}), '(distance)\n', (2050, 2060), True, 'import numpy as np\n'), ((2062, 2078), 'numpy.max', 'np.max', (['distance'], {}), '(distance)\n', (2068, 2078), True, 'import numpy as np\n'), ((2181, 2197), 'numpy.min', 'np.min', (['distance'], {}), '(distance)\n', (2187, 2197), True, 'import numpy as np\n'), ((2199, 2215), 'numpy.max', 'np.max', (['distance'], {}), '(distance)\n', (2205, 2215), True, 'import numpy as np\n'), ((2307, 2321), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2318, 2321), False, 'import pickle\n'), ((2348, 2373), 'numpy.array', 'np.array', (['representations'], {}), '(representations)\n', (2356, 2373), True, 'import numpy as np\n'), ((3000, 3020), 'numpy.max', 'np.max', (['cos_distance'], {}), '(cos_distance)\n', (3006, 3020), True, 'import numpy as np\n'), ((2728, 2748), 'numpy.min', 'np.min', (['cos_distance'], {}), '(cos_distance)\n', (2734, 2748), True, 'import numpy as np\n'), ((2750, 2770), 'numpy.max', 'np.max', (['cos_distance'], {}), '(cos_distance)\n', (2756, 2770), True, 'import numpy as np\n'), ((2809, 2829), 'numpy.min', 'np.min', (['euc_distance'], {}), '(euc_distance)\n', (2815, 2829), True, 'import numpy as np\n'), ((2844, 2864), 'numpy.max', 'np.max', (['euc_distance'], {}), '(euc_distance)\n', (2850, 2864), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
import imageio
import cv2
import pdb
def recursive_glob(rootdir=".", suffix=""):
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)]
class cityscapesLoader():
colors = [ # [ 0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
label_colours = dict(zip(range(19), colors))
def __init__(self,img_path,in_size):
self.img_path = img_path
self.n_classes = 19
self.files = recursive_glob(rootdir=self.img_path, suffix=".png")
self.files.sort()
self.files_num = len(self.files)
self.data = []
self.size = (in_size[1],in_size[0])
self.mean = np.array([.485, .456, .406])
self.std = np.array([.229, .224, .225])
def load_frames(self):
for idx in range(self.files_num):
img_path = self.files[idx].rstrip()
img_name = img_path.split('/')[-1]
folder = img_path.split('/')[-2]
#img = cv2.imread(img_path).astype(np.float32)
img = imageio.imread(img_path)
ori_size = img.shape[:-1]
img = cv2.resize(img,self.size)/255.0
img = (img-self.mean)/self.std
img = img.transpose(2, 0, 1)
img = img[np.newaxis,:]
img = torch.from_numpy(img).float()
self.data.append([img,img_name,folder,self.size])
def decode_segmap(self, temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_classes):
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r #/ 255.0
rgb[:, :, 1] = g #/ 255.0
rgb[:, :, 2] = b #/ 255.0
return rgb
| [
"imageio.imread",
"os.walk",
"numpy.zeros",
"numpy.array",
"os.path.join",
"cv2.resize",
"torch.from_numpy"
] | [((146, 178), 'os.path.join', 'os.path.join', (['looproot', 'filename'], {}), '(looproot, filename)\n', (158, 178), False, 'import os\n'), ((1195, 1226), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1203, 1226), True, 'import numpy as np\n'), ((1243, 1274), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1251, 1274), True, 'import numpy as np\n'), ((2229, 2272), 'numpy.zeros', 'np.zeros', (['(temp.shape[0], temp.shape[1], 3)'], {}), '((temp.shape[0], temp.shape[1], 3))\n', (2237, 2272), True, 'import numpy as np\n'), ((217, 233), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (224, 233), False, 'import os\n'), ((1561, 1585), 'imageio.imread', 'imageio.imread', (['img_path'], {}), '(img_path)\n', (1575, 1585), False, 'import imageio\n'), ((1643, 1669), 'cv2.resize', 'cv2.resize', (['img', 'self.size'], {}), '(img, self.size)\n', (1653, 1669), False, 'import cv2\n'), ((1814, 1835), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (1830, 1835), False, 'import torch\n')] |
import pytest
import numpy as np
from devito import norm
from examples.seismic import Model, setup_geometry, AcquisitionGeometry
def not_bcs(bc):
return ("mask", 1) if bc == "damp" else ("damp", 0)
@pytest.mark.parametrize('nbl, bcs', [
(20, ("mask", 1)), (0, ("mask", 1)),
(20, ("damp", 0)), (0, ("damp", 0))
])
def test_damp(nbl, bcs):
shape = (21, 21)
vp = np.ones(shape)
model = Model((0, 0), (10, 10), shape, 4, vp, nbl=nbl, bcs=bcs[0])
try:
center = model.damp.data[tuple(s // 2 for s in model.damp.shape)]
except AttributeError:
center = model.damp
assert all([s == s0 + 2 * nbl for s, s0 in zip(model.vp.shape, shape)])
assert center == bcs[1]
switch_bcs = not_bcs(bcs[0])
model._initialize_bcs(bcs=switch_bcs[0])
try:
center = model.damp.data[tuple(s // 2 for s in model.damp.shape)]
except AttributeError:
center = model.damp
assert center == switch_bcs[1]
@pytest.mark.parametrize('shape', [(41,), (21, 21), (11, 11, 11)])
def test_default_geom(shape):
vp = np.ones(shape)
o = tuple([0]*len(shape))
d = tuple([10]*len(shape))
model = Model(o, d, shape, 4, vp, nbl=20, dt=1)
assert model.critical_dt == 1
geometry = setup_geometry(model, 250)
nrec = shape[0] * (shape[1] if len(shape) > 2 else 1)
assert geometry.grid == model.grid
assert geometry.nrec == nrec
assert geometry.nsrc == 1
assert geometry.src_type == "Ricker"
assert geometry.rec.shape == (251, nrec)
assert norm(geometry.rec) == 0
assert geometry.src.shape == (251, 1)
assert norm(geometry.new_src(src_type=None)) == 0
rec2 = geometry.rec.resample(num=501)
assert rec2.shape == (501, nrec)
assert rec2.grid == model.grid
assert geometry.new_rec(name="bonjour").name == "bonjour"
assert geometry.new_src(name="bonjour").name == "bonjour"
@pytest.mark.parametrize('shape', [(41,), (21, 21), (11, 11, 11)])
def test_geom(shape):
vp = np.ones(shape)
o = tuple([0]*len(shape))
d = tuple([10]*len(shape))
model = Model(o, d, shape, 4, vp, nbl=20, dt=1)
assert model.critical_dt == 1
nrec = 31
nsrc = 4
rec_coordinates = np.ones((nrec, len(shape)))
src_coordinates = np.ones((nsrc, len(shape)))
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=0.0, tn=250)
assert geometry.grid == model.grid
assert geometry.nrec == nrec
assert geometry.nsrc == nsrc
assert geometry.src_type is None
assert geometry.rec.shape == (251, nrec)
assert norm(geometry.rec) == 0
assert geometry.src.shape == (251, nsrc)
assert norm(geometry.new_src(src_type=None)) == 0
assert norm(geometry.src) == 0
rec2 = geometry.rec.resample(num=501)
assert rec2.shape == (501, nrec)
assert rec2.grid == model.grid
assert geometry.new_rec(name="bonjour").name == "bonjour"
assert geometry.new_src(name="bonjour").name == "bonjour"
| [
"examples.seismic.AcquisitionGeometry",
"examples.seismic.setup_geometry",
"numpy.ones",
"devito.norm",
"examples.seismic.Model",
"pytest.mark.parametrize"
] | [((208, 324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nbl, bcs"""', "[(20, ('mask', 1)), (0, ('mask', 1)), (20, ('damp', 0)), (0, ('damp', 0))]"], {}), "('nbl, bcs', [(20, ('mask', 1)), (0, ('mask', 1)), (\n 20, ('damp', 0)), (0, ('damp', 0))])\n", (231, 324), False, 'import pytest\n'), ((971, 1036), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(41,), (21, 21), (11, 11, 11)]'], {}), "('shape', [(41,), (21, 21), (11, 11, 11)])\n", (994, 1036), False, 'import pytest\n'), ((1902, 1967), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape"""', '[(41,), (21, 21), (11, 11, 11)]'], {}), "('shape', [(41,), (21, 21), (11, 11, 11)])\n", (1925, 1967), False, 'import pytest\n'), ((386, 400), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (393, 400), True, 'import numpy as np\n'), ((413, 471), 'examples.seismic.Model', 'Model', (['(0, 0)', '(10, 10)', 'shape', '(4)', 'vp'], {'nbl': 'nbl', 'bcs': 'bcs[0]'}), '((0, 0), (10, 10), shape, 4, vp, nbl=nbl, bcs=bcs[0])\n', (418, 471), False, 'from examples.seismic import Model, setup_geometry, AcquisitionGeometry\n'), ((1076, 1090), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1083, 1090), True, 'import numpy as np\n'), ((1164, 1203), 'examples.seismic.Model', 'Model', (['o', 'd', 'shape', '(4)', 'vp'], {'nbl': '(20)', 'dt': '(1)'}), '(o, d, shape, 4, vp, nbl=20, dt=1)\n', (1169, 1203), False, 'from examples.seismic import Model, setup_geometry, AcquisitionGeometry\n'), ((1254, 1280), 'examples.seismic.setup_geometry', 'setup_geometry', (['model', '(250)'], {}), '(model, 250)\n', (1268, 1280), False, 'from examples.seismic import Model, setup_geometry, AcquisitionGeometry\n'), ((1999, 2013), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (2006, 2013), True, 'import numpy as np\n'), ((2087, 2126), 'examples.seismic.Model', 'Model', (['o', 'd', 'shape', '(4)', 'vp'], {'nbl': '(20)', 'dt': '(1)'}), '(o, d, shape, 4, vp, nbl=20, dt=1)\n', (2092, 2126), False, 'from examples.seismic import Model, setup_geometry, AcquisitionGeometry\n'), ((2304, 2380), 'examples.seismic.AcquisitionGeometry', 'AcquisitionGeometry', (['model', 'rec_coordinates', 'src_coordinates'], {'t0': '(0.0)', 'tn': '(250)'}), '(model, rec_coordinates, src_coordinates, t0=0.0, tn=250)\n', (2323, 2380), False, 'from examples.seismic import Model, setup_geometry, AcquisitionGeometry\n'), ((1539, 1557), 'devito.norm', 'norm', (['geometry.rec'], {}), '(geometry.rec)\n', (1543, 1557), False, 'from devito import norm\n'), ((2615, 2633), 'devito.norm', 'norm', (['geometry.rec'], {}), '(geometry.rec)\n', (2619, 2633), False, 'from devito import norm\n'), ((2749, 2767), 'devito.norm', 'norm', (['geometry.src'], {}), '(geometry.src)\n', (2753, 2767), False, 'from devito import norm\n')] |
import numpy as np
import tensorflow as tf
import cv2
IMAGENET_MEANS = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
""" Computes the content cost as specified in the research paper
Arguments:
content_img -- a Tensor of dimension (1, height, width, channels), representing the hidden layer activations of the input content image
generated_img -- a Tensor of dimension (1, height, width, channels), representing the hidden layer activations of the ouput image
"""
def compute_content_cost(content_img, generated_img):
_, h, w, c = content_img.shape
cost = 1/(4*h*w*c)*tf.reduce_sum(tf.square(content_img-generated_img))
return cost
"""
Computes the style cost for a single style layer
Arguments:
style_img -- a Tensor of dimension (1, height, width, channels), representing the hidden layer activations of the input style image
generated_img -- a Tensor of dimension (1, height, width, channels), representing the hidden layer activations of the output image
"""
def style_layer_cost(style_img, generated_img):
_, h, w, c = style_img.shape
style_img = tf.transpose(tf.reshape(style_img, (h*w, c)))
generated_img = tf.transpose(tf.reshape(generated_img , (h*w, c)))
gram_style = gram_matrix(style_img)
gram_generated = gram_matrix(generated_img)
cost = 1/(4*c**2*(h*w)**2)*tf.reduce_sum(tf.square(gram_style - gram_generated))
return cost
"""
Computes the style cost through all the layers
Arguments:
sess -- a Tensorflow session
model -- the vgg19 model, from model.py
style_layers -- a list of the layers to use for computing the style cost, with names from the format in model.py
"""
def compute_style_cost(sess, model, style_layers):
style_cost = 0
num_layers = len(style_layers)
weight = 1.0/num_layers
for layer in style_layers:
out = model[layer]
style_activations = sess.run(out)
generated_activation = out
style_cost += weight*style_layer_cost(style_activations, generated_activation)
return style_cost
"""
Computes the gram matrix of a specified matrix
Arguments:
mat -- a Tensor of dimension (channels, height*width)
Returns a Tensor of dimension(channels, channels)
"""
def gram_matrix(mat):
return tf.matmul(mat, tf.transpose(mat))
"""
Computes the total cost function of neural style transfer
Arguments:
content_cost -- a float representing the content_cost
style_cost -- a float representing the style_cost
alpha -- a float representing the hyperparameter for the content_cost
beta -- a float representing the hyperparameter for the style cost
"""
def compute_total_cost(content_cost, style_cost, alpha, beta):
return alpha*content_cost + beta*style_cost
"""
Preprocesses an input image
Arguments:
image -- a numpy array representing an image
image_size -- tuple of format (h, w) representing the target image size
"""
def preprocess(image, image_size):
#cv2 reshape is (w,h), so we need to flip the image size tuple
image_size = image_size[::-1]
#VGG19 input image size is (1,h,w,c)
image = cv2.resize(image, image_size)
image = np.reshape(image, ((1,) + image.shape))
#subtract out imagenet means for data to be centered around 0
image = image - IMAGENET_MEANS
return image
"""
Unprocesses an image for display
Arguments:
image -- a numpy array representing an image
"""
def unpreprocess(image):
image = image + IMAGENET_MEANS
image = image[0]
image = np.clip(image, 0, 255).astype("uint8")
return image
"""
Reads in an image given a path
"""
def read_image(path):
image = cv2.imread(path)
return image
"""
Generates a noise image based on the original content as the initial output
Arguments:
image -- a numpy array representing the content image
"""
def generate_initial_output(content_img):
bound = 20
noise = np.random.uniform(-bound, bound, content_img.shape).astype("float32")
ratio = 0.6
rv = noise*ratio + content_img*(1-ratio)
return rv
| [
"numpy.random.uniform",
"tensorflow.reshape",
"numpy.clip",
"tensorflow.transpose",
"cv2.imread",
"numpy.array",
"numpy.reshape",
"tensorflow.square",
"cv2.resize"
] | [((3050, 3079), 'cv2.resize', 'cv2.resize', (['image', 'image_size'], {}), '(image, image_size)\n', (3060, 3079), False, 'import cv2\n'), ((3092, 3129), 'numpy.reshape', 'np.reshape', (['image', '((1,) + image.shape)'], {}), '(image, (1,) + image.shape)\n', (3102, 3129), True, 'import numpy as np\n'), ((3572, 3588), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3582, 3588), False, 'import cv2\n'), ((72, 108), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (80, 108), True, 'import numpy as np\n'), ((1101, 1134), 'tensorflow.reshape', 'tf.reshape', (['style_img', '(h * w, c)'], {}), '(style_img, (h * w, c))\n', (1111, 1134), True, 'import tensorflow as tf\n'), ((1167, 1204), 'tensorflow.reshape', 'tf.reshape', (['generated_img', '(h * w, c)'], {}), '(generated_img, (h * w, c))\n', (1177, 1204), True, 'import tensorflow as tf\n'), ((2245, 2262), 'tensorflow.transpose', 'tf.transpose', (['mat'], {}), '(mat)\n', (2257, 2262), True, 'import tensorflow as tf\n'), ((603, 641), 'tensorflow.square', 'tf.square', (['(content_img - generated_img)'], {}), '(content_img - generated_img)\n', (612, 641), True, 'import tensorflow as tf\n'), ((1340, 1378), 'tensorflow.square', 'tf.square', (['(gram_style - gram_generated)'], {}), '(gram_style - gram_generated)\n', (1349, 1378), True, 'import tensorflow as tf\n'), ((3442, 3464), 'numpy.clip', 'np.clip', (['image', '(0)', '(255)'], {}), '(image, 0, 255)\n', (3449, 3464), True, 'import numpy as np\n'), ((3826, 3877), 'numpy.random.uniform', 'np.random.uniform', (['(-bound)', 'bound', 'content_img.shape'], {}), '(-bound, bound, content_img.shape)\n', (3843, 3877), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.