code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import segyio
import pyvds
VDS_FILE = 'test_data/small.vds'
SGY_FILE = 'test_data/small.sgy'
def compare_inline_ordinal(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_ordinal in lines_to_test:
slice_segy = segyfile.iline[segyfile.ilines[line_ordinal]]
slice_vds = vdsfile.iline[vdsfile.ilines[line_ordinal]]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_inline_number(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_number in lines_to_test:
slice_segy = segyfile.iline[line_number]
slice_vds = vdsfile.iline[line_number]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_inline_slicing(vds_filename):
slices = [slice(1, 5, 2), slice(1, 2, None), slice(1, 3, None), slice(None, 3, None), slice(3, None, None)]
with pyvds.open(vds_filename) as vdsfile:
for slice_ in slices:
slices_slice = np.asarray(vdsfile.iline[slice_])
start = slice_.start if slice_.start is not None else 1
stop = slice_.stop if slice_.stop is not None else 6
step = slice_.step if slice_.step is not None else 1
slices_concat = np.asarray([vdsfile.iline[i] for i in range(start, stop, step)])
assert np.array_equal(slices_slice, slices_concat)
def test_inline_accessor():
compare_inline_ordinal(VDS_FILE, SGY_FILE, [0, 1, 2, 3, 4], tolerance=1e-5)
compare_inline_number(VDS_FILE, SGY_FILE, [1, 2, 3, 4, 5], tolerance=1e-5)
compare_inline_slicing(VDS_FILE)
def compare_crossline_ordinal(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_ordinal in lines_to_test:
slice_segy = segyfile.xline[segyfile.xlines[line_ordinal]]
slice_vds = vdsfile.xline[vdsfile.xlines[line_ordinal]]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_crossline_number(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_number in lines_to_test:
slice_segy = segyfile.xline[line_number]
slice_vds = vdsfile.xline[line_number]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_crossline_slicing(vds_filename):
slices = [slice(20, 21, 2), slice(21, 23, 1), slice(None, 22, None), slice(22, None, None)]
with pyvds.open(vds_filename) as vdsfile:
for slice_ in slices:
slices_slice = np.asarray(vdsfile.xline[slice_])
start = slice_.start if slice_.start is not None else 20
stop = slice_.stop if slice_.stop is not None else 25
step = slice_.step if slice_.step is not None else 1
slices_concat = np.asarray([vdsfile.xline[i] for i in range(start, stop, step)])
assert np.array_equal(slices_slice, slices_concat)
def test_crossline_accessor():
compare_crossline_ordinal(VDS_FILE, SGY_FILE, [0, 1, 2, 3, 4], tolerance=1e-5)
compare_crossline_number(VDS_FILE, SGY_FILE, [20, 21, 22, 23, 24], tolerance=1e-5)
compare_crossline_slicing(VDS_FILE)
def compare_zslice(vds_filename, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(SGY_FILE) as segyfile:
for line_number in range(50):
slice_vds = vdsfile.depth_slice[line_number]
slice_segy = segyfile.depth_slice[line_number]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def test_zslice_accessor():
compare_zslice(VDS_FILE, tolerance=1e-5)
def test_trace_accessor():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as segyfile:
for trace_number in range(-5, 25, 1):
vds_trace = vdsfile.trace[trace_number]
segy_trace = segyfile.trace[trace_number]
assert np.allclose(vds_trace, segy_trace, rtol=1e-5)
def test_read_bin_header():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as segyfile:
assert vdsfile.bin == segyfile.bin
def test_read_trace_header():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as sgyfile:
for trace_number in range(-5, 25, 1):
vds_header = vdsfile.header[trace_number]
sgy_header = sgyfile.header[trace_number]
assert vds_header == sgy_header
def test_read_trace_header_slicing():
slices = [slice(0, 5, None), slice(0, None, 2), slice(5, None, -1), slice(None, None, 10), slice(None, None, None)]
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as sgyfile:
for slice_ in slices:
sgy_headers = sgyfile.header[slice_]
vds_headers = vdsfile.header[slice_]
for vds_header, sgy_header in zip(vds_headers, sgy_headers):
assert vds_header == sgy_header
def test_header_is_iterable():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as sgy_file:
for vds_header, sgy_header in zip(vdsfile.header, sgy_file.header):
assert vds_header == sgy_header
def compare_cube(vds_filename, sgy_filename, tolerance):
vol_sgy = segyio.tools.cube(sgy_filename)
vol_vds = pyvds.tools.cube(vds_filename)
assert np.allclose(vol_vds, vol_sgy, rtol=tolerance)
def compare_dt(vds_filename, sgy_filename):
with segyio.open(sgy_filename) as sgy_file:
dt_sgy = segyio.tools.dt(sgy_file)
with pyvds.open(vds_filename) as vds_file:
dt_vds = pyvds.tools.dt(vds_file)
assert dt_sgy == dt_vds
def test_tools_functions():
compare_cube(VDS_FILE, SGY_FILE, tolerance=1e-5)
compare_dt(VDS_FILE, SGY_FILE)
| [
"numpy.allclose",
"pyvds.tools.dt",
"segyio.tools.cube",
"segyio.tools.dt",
"pyvds.tools.cube",
"numpy.asarray",
"pyvds.open",
"numpy.array_equal",
"segyio.open"
] | [((5742, 5773), 'segyio.tools.cube', 'segyio.tools.cube', (['sgy_filename'], {}), '(sgy_filename)\n', (5759, 5773), False, 'import segyio\n'), ((5788, 5818), 'pyvds.tools.cube', 'pyvds.tools.cube', (['vds_filename'], {}), '(vds_filename)\n', (5804, 5818), False, 'import pyvds\n'), ((5830, 5875), 'numpy.allclose', 'np.allclose', (['vol_vds', 'vol_sgy'], {'rtol': 'tolerance'}), '(vol_vds, vol_sgy, rtol=tolerance)\n', (5841, 5875), True, 'import numpy as np\n'), ((206, 230), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (216, 230), False, 'import pyvds\n'), ((654, 678), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (664, 678), False, 'import pyvds\n'), ((1139, 1163), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (1149, 1163), False, 'import pyvds\n'), ((1942, 1966), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (1952, 1966), False, 'import pyvds\n'), ((2393, 2417), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (2403, 2417), False, 'import pyvds\n'), ((2865, 2889), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (2875, 2889), False, 'import pyvds\n'), ((3647, 3671), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (3657, 3671), False, 'import pyvds\n'), ((4085, 4105), 'pyvds.open', 'pyvds.open', (['VDS_FILE'], {}), '(VDS_FILE)\n', (4095, 4105), False, 'import pyvds\n'), ((4437, 4457), 'pyvds.open', 'pyvds.open', (['VDS_FILE'], {}), '(VDS_FILE)\n', (4447, 4457), False, 'import pyvds\n'), ((4606, 4626), 'pyvds.open', 'pyvds.open', (['VDS_FILE'], {}), '(VDS_FILE)\n', (4616, 4626), False, 'import pyvds\n'), ((5069, 5089), 'pyvds.open', 'pyvds.open', (['VDS_FILE'], {}), '(VDS_FILE)\n', (5079, 5089), False, 'import pyvds\n'), ((5460, 5480), 'pyvds.open', 'pyvds.open', (['VDS_FILE'], {}), '(VDS_FILE)\n', (5470, 5480), False, 'import pyvds\n'), ((5930, 5955), 'segyio.open', 'segyio.open', (['sgy_filename'], {}), '(sgy_filename)\n', (5941, 5955), False, 'import segyio\n'), ((5986, 6011), 'segyio.tools.dt', 'segyio.tools.dt', (['sgy_file'], {}), '(sgy_file)\n', (6001, 6011), False, 'import segyio\n'), ((6021, 6045), 'pyvds.open', 'pyvds.open', (['vds_filename'], {}), '(vds_filename)\n', (6031, 6045), False, 'import pyvds\n'), ((6076, 6100), 'pyvds.tools.dt', 'pyvds.tools.dt', (['vds_file'], {}), '(vds_file)\n', (6090, 6100), False, 'import pyvds\n'), ((256, 281), 'segyio.open', 'segyio.open', (['sgy_filename'], {}), '(sgy_filename)\n', (267, 281), False, 'import segyio\n'), ((704, 729), 'segyio.open', 'segyio.open', (['sgy_filename'], {}), '(sgy_filename)\n', (715, 729), False, 'import segyio\n'), ((1233, 1266), 'numpy.asarray', 'np.asarray', (['vdsfile.iline[slice_]'], {}), '(vdsfile.iline[slice_])\n', (1243, 1266), True, 'import numpy as np\n'), ((1577, 1620), 'numpy.array_equal', 'np.array_equal', (['slices_slice', 'slices_concat'], {}), '(slices_slice, slices_concat)\n', (1591, 1620), True, 'import numpy as np\n'), ((1992, 2017), 'segyio.open', 'segyio.open', (['sgy_filename'], {}), '(sgy_filename)\n', (2003, 2017), False, 'import segyio\n'), ((2443, 2468), 'segyio.open', 'segyio.open', (['sgy_filename'], {}), '(sgy_filename)\n', (2454, 2468), False, 'import segyio\n'), ((2959, 2992), 'numpy.asarray', 'np.asarray', (['vdsfile.xline[slice_]'], {}), '(vdsfile.xline[slice_])\n', (2969, 2992), True, 'import numpy as np\n'), ((3305, 3348), 'numpy.array_equal', 'np.array_equal', (['slices_slice', 'slices_concat'], {}), '(slices_slice, slices_concat)\n', (3319, 3348), True, 'import numpy as np\n'), ((3697, 3718), 'segyio.open', 'segyio.open', (['SGY_FILE'], {}), '(SGY_FILE)\n', (3708, 3718), False, 'import segyio\n'), ((4131, 4152), 'segyio.open', 'segyio.open', (['SGY_FILE'], {}), '(SGY_FILE)\n', (4142, 4152), False, 'import segyio\n'), ((4483, 4504), 'segyio.open', 'segyio.open', (['SGY_FILE'], {}), '(SGY_FILE)\n', (4494, 4504), False, 'import segyio\n'), ((4652, 4673), 'segyio.open', 'segyio.open', (['SGY_FILE'], {}), '(SGY_FILE)\n', (4663, 4673), False, 'import segyio\n'), ((5115, 5136), 'segyio.open', 'segyio.open', (['SGY_FILE'], {}), '(SGY_FILE)\n', (5126, 5136), False, 'import segyio\n'), ((5506, 5527), 'segyio.open', 'segyio.open', (['SGY_FILE'], {}), '(SGY_FILE)\n', (5517, 5527), False, 'import segyio\n'), ((512, 562), 'numpy.allclose', 'np.allclose', (['slice_vds', 'slice_segy'], {'rtol': 'tolerance'}), '(slice_vds, slice_segy, rtol=tolerance)\n', (523, 562), True, 'import numpy as np\n'), ((924, 974), 'numpy.allclose', 'np.allclose', (['slice_vds', 'slice_segy'], {'rtol': 'tolerance'}), '(slice_vds, slice_segy, rtol=tolerance)\n', (935, 974), True, 'import numpy as np\n'), ((2248, 2298), 'numpy.allclose', 'np.allclose', (['slice_vds', 'slice_segy'], {'rtol': 'tolerance'}), '(slice_vds, slice_segy, rtol=tolerance)\n', (2259, 2298), True, 'import numpy as np\n'), ((2663, 2713), 'numpy.allclose', 'np.allclose', (['slice_vds', 'slice_segy'], {'rtol': 'tolerance'}), '(slice_vds, slice_segy, rtol=tolerance)\n', (2674, 2713), True, 'import numpy as np\n'), ((3921, 3971), 'numpy.allclose', 'np.allclose', (['slice_vds', 'slice_segy'], {'rtol': 'tolerance'}), '(slice_vds, slice_segy, rtol=tolerance)\n', (3932, 3971), True, 'import numpy as np\n'), ((4353, 4399), 'numpy.allclose', 'np.allclose', (['vds_trace', 'segy_trace'], {'rtol': '(1e-05)'}), '(vds_trace, segy_trace, rtol=1e-05)\n', (4364, 4399), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Module of Lauetools project
<NAME> Feb 2012
module to fit orientation and strain
http://sourceforge.net/projects/lauetools/
"""
__author__ = "<NAME>, CRG-IF BM32 @ ESRF"
from scipy.optimize import leastsq, least_squares
import numpy as np
np.set_printoptions(precision=15)
from scipy.linalg import qr
try:
from lauetools import CrystalParameters as CP
from lauetools import generaltools as GT
from lauetools import LaueGeometry as F2TC
from lauetools import dict_LaueTools as DictLT
from lauetools.dict_LaueTools import DEG
except:
import lauetoolsnn.lauetools.CrystalParameters as CP
import lauetoolsnn.lauetools.generaltools as GT
import lauetoolsnn.lauetools.LaueGeometry as F2TC
import lauetoolsnn.lauetools.dict_LaueTools as DictLT
from lauetoolsnn.lauetools.dict_LaueTools import DEG
RAD = 1.0 / DEG
IDENTITYMATRIX = np.eye(3)
def remove_harmonic(hkl, uflab, yz):
# print "removing harmonics from theoretical peak list"
nn = len(uflab[:, 0])
isbadpeak = np.zeros(nn, dtype=np.int)
toluf = 0.05
for i in list(range(nn)):
if isbadpeak[i] == 0:
for j in list(range(i + 1, nn)):
if isbadpeak[j] == 0:
if GT.norme_vec(uflab[j, :] - uflab[i, :]) < toluf:
isbadpeak[j] = 1
# print "harmonics :"
# print hkl[i,:]
# print hkl[j,:]
# print "isbadpeak = ", isbadpeak
index_goodpeak = np.where(isbadpeak == 0)
# print "index_goodpeak =", index_goodpeak
hkl2 = hkl[index_goodpeak]
uflab2 = uflab[index_goodpeak]
yz2 = yz[index_goodpeak]
nspots2 = len(hkl2[:, 0])
return (hkl2, uflab2, yz2, nspots2, isbadpeak)
def xy_from_Quat(varying_parameter_values, DATA_Q, nspots, varying_parameter_indices,
allparameters,
initrot=None,
vecteurref=IDENTITYMATRIX,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
kf_direction="Z>0"):
"""
compute x and y pixel positions of Laue spots given hkl list
DATA_Q: array of all 3 elements miller indices
nspots: indices of selected spots of DATA_Q
initrot: initial orientation matrix (rotation and distorsion)
varying_parameter_values: array of value that will be taken into account
varying_parameter_indices: list of indices (element position) of
varying parameters in allparameters array
allparameters: array of 8 elements: 5 first of calibration parameters
and 3 of angles defining quaternion
WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat
WARNING2: len(varying_parameter_values)=len(varying_parameter_indices)
returns:
array of x y pixel positions of Laue peaks
"""
allparameters.put(varying_parameter_indices, varying_parameter_values)
calibration_parameters = allparameters[:5]
# selecting nspots of DATA_Q
DATAQ = np.take(DATA_Q, nspots, axis=0)
trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations
if initrot is not None:
# R is a pure rotation
# dot(R,Q)=initrot
# Q may be viewed as lattice distortion
if pureRotation: # extract pure rotation matrix from UB matrix
R, Q = qr(initrot)
R = R / np.sign(np.diag(Q))
else: # keep UB matrix rotation + distorsion
R = initrot
# initial lattice rotation and distorsion (/ cubic structure) q = U*B * Q
trQ = np.dot(np.dot(R, vecteurref), trQ)
# results are qx,qy,qz
else:
print("I DONT LIKE INITROT == None")
print("this must mean that INITROT = Identity ?...")
if 0:
angle_Quat = allparameters[5:8] # three angles of quaternion
# with sample rotation
# print "3 angles representation of quaternion",angle_Quat
Quat = GT.from3rotangles_toQuat(angle_Quat)
# print "Quat",Quat
matfromQuat = np.array(GT.fromQuat_to_MatrixRot(Quat))
# print "matfromQuat", matfromQuat
else:
matfromQuat = np.eye(3)
Qrot = np.dot(matfromQuat, trQ) # lattice rotation due to quaternion
Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors
twthe, chi = F2TC.from_qunit_to_twchi(1.*Qrot / Qrotn)
# if verbose:
# print("matfromQuat", matfromQuat)
# print("tDATA_Q", np.transpose(DATA_Q))
# print("Qrot", Qrot)
# print("Qrotn", Qrotn)
# print("Qrot/Qrotn", Qrot / Qrotn)
# print("twthe,chi", twthe, chi)
X, Y, theta = F2TC.calc_xycam_from2thetachi(twthe,
chi,
calibration_parameters,
verbose=0,
pixelsize=pixelsize,
kf_direction=kf_direction)
return X, Y, theta, R
def calc_XY_pixelpositions(calibration_parameters, DATA_Q, nspots, UBmatrix=None,
B0matrix=IDENTITYMATRIX,
offset=0,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=0.079,
dim=(2048, 2048),
kf_direction="Z>0"):
"""
must: len(varying_parameter_values)=len(varying_parameter_indices)
DATA_Q: array of all 3 elements miller indices
nspots: indices of selected spots of DATA_Q
UBmatrix:
WARNING: All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat
returns:
"""
# selecting nspots of DATA_Q
# print "DATA_Q in calc_XY_pixelpositions", DATA_Q
# print "nspots", nspots
# print "len(DATA_Q)", len(DATA_Q)
DATAQ = np.take(DATA_Q, nspots, axis=0)
trQ = np.transpose(DATAQ) # np.array(Hs, Ks,Ls) for further computations
# print "DATAQ in xy_from_Quat", DATAQ
if UBmatrix is not None:
R = UBmatrix
# q = UB * B0 * Q
trQ = np.dot(np.dot(R, B0matrix), trQ)
# results are qx,qy,qz
else:
print("I DON'T LIKE INITROT == None")
print("this must mean that INITROT = Identity ?...")
Qrot = trQ # lattice rotation due to quaternion
Qrotn = np.sqrt(np.sum(Qrot ** 2, axis=0)) # norms of Q vectors
twthe, chi = F2TC.from_qunit_to_twchi(Qrot / Qrotn, labXMAS=labXMAS)
# print "twthe, chi", twthe, chi
if verbose:
print("tDATA_Q", np.transpose(DATA_Q))
print("Qrot", Qrot)
print("Qrotn", Qrotn)
print("Qrot/Qrotn", Qrot / Qrotn)
print("twthe,chi", twthe, chi)
X, Y, theta = F2TC.calc_xycam_from2thetachi(
twthe,
chi,
calibration_parameters,
offset=offset,
verbose=0,
pixelsize=pixelsize,
kf_direction=kf_direction)
return X, Y, theta, R
def error_function_on_demand_calibration(param_calib,
DATA_Q,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=IDENTITYMATRIX,
vecteurref=IDENTITYMATRIX,
pureRotation=1,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
allspots_info=0,
kf_direction="Z>0"):
"""
#All miller indices must be entered in DATA_Q,
selection is done in xy_from_Quat with nspots (array of indices)
# param_orient is three elements array representation of quaternion
"""
mat1, mat2, mat3 = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
invsq2 = 1 / np.sqrt(2)
AXIS1,AXIS2, AXIS3 = np.array([[invsq2,-.5,.5],[invsq2,.5,-.5],[0,invsq2,invsq2]])
if 5 in arr_indexvaryingparameters:
ind1 = np.where(arr_indexvaryingparameters == 5)[0][0]
if len(arr_indexvaryingparameters) > 1:
a1 = param_calib[ind1] * DEG
else:
a1 = param_calib[0] * DEG
# print "a1 (rad)= ",a1
mat1 = np.array([[np.cos(a1), 0, np.sin(a1)],
[0, 1, 0],
[-np.sin(a1), 0, np.cos(a1)]])
mat1 = GT.matRot(AXIS1, a1/DEG)
if 6 in arr_indexvaryingparameters:
ind2 = np.where(arr_indexvaryingparameters == 6)[0][0]
if len(arr_indexvaryingparameters) > 1:
a2 = param_calib[ind2] * DEG
else:
a2 = param_calib[0] * DEG
# print "a2 (rad)= ",a2
mat2 = np.array([[1, 0, 0],
[0, np.cos(a2), np.sin(a2)],
[0, np.sin(-a2), np.cos(a2)]])
mat2 = GT.matRot(AXIS2, a2/DEG)
if 7 in arr_indexvaryingparameters:
ind3 = np.where(arr_indexvaryingparameters == 7)[0][0]
if len(arr_indexvaryingparameters) > 1:
a3 = param_calib[ind3] * DEG
else:
a3 = param_calib[0] * DEG
mat3 = np.array([[np.cos(a3), -np.sin(a3), 0],
[np.sin(a3), np.cos(a3), 0],
[0, 0, 1]])
mat3 = GT.matRot(AXIS3, a3/DEG)
deltamat = np.dot(mat3, np.dot(mat2, mat1))
newmatrix = np.dot(deltamat, initrot)
# three last parameters are orientation angles in quaternion expression
onlydetectorindices = arr_indexvaryingparameters[arr_indexvaryingparameters < 5]
X, Y, theta, _ = xy_from_Quat(param_calib,
DATA_Q,
nspots,
onlydetectorindices,
allparameters,
initrot=newmatrix,
vecteurref=vecteurref,
pureRotation=pureRotation,
labXMAS=0,
verbose=verbose,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
distanceterm = np.sqrt((X - pixX) ** 2 + (Y - pixY) ** 2)
if (weights is not None): # take into account the exp. spots intensity as weight in cost distance function
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# print "**mean weighted distanceterm ",mean(distanceterm)," ********"
# print "**mean distanceterm ",mean(distanceterm)," ********"
if allspots_info == 0:
if verbose:
# print "X",X
# print "pixX",pixX
# print "Y",Y
# print "pixY",pixY
# print "param_orient",param_calib
# print "distanceterm",distanceterm
# print "*****************mean distanceterm ",mean(distanceterm)," ********"
# print "newmatrix", newmatrix
return distanceterm, deltamat, newmatrix
else:
return distanceterm
elif allspots_info == 1:
Xtheo = X
Ytheo = Y
Xexp = pixX
Yexp = pixY
Xdev = Xtheo - Xexp
Ydev = Ytheo - Yexp
theta_theo = theta
spotsData = [Xtheo, Ytheo, Xexp, Yexp, Xdev, Ydev, theta_theo]
return distanceterm, deltamat, newmatrix, spotsData
def fit_on_demand_calibration(starting_param, miller, allparameters,
_error_function_on_demand_calibration,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=IDENTITYMATRIX,
vecteurref=IDENTITYMATRIX,
pureRotation=1,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
**kwd):
"""
#All miller indices must be entered in miller,
selection is done in xy_from_Quat with nspots (array of indices)
"""
parameters = ["distance (mm)",
"Xcen (pixel)",
"Ycen (pixel)",
"Angle1 (deg)",
"Angle2 (deg)",
"theta1",
"theta2",
"theta3"]
parameters_being_fitted = [parameters[k] for k in arr_indexvaryingparameters]
param_calib_0 = starting_param
if verbose:
# print(
# "\n\n***************************\nfirst error with initial values of:",
# parameters_being_fitted, " \n\n***************************\n")
_error_function_on_demand_calibration(param_calib_0,
miller,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=initrot,
vecteurref=vecteurref,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
# print("\n\n***************************\nFitting parameters: ", parameters_being_fitted,
# "\n\n***************************\n")
# # NEEDS AT LEAST 5 spots (len of nspots)
# print("With initial values", param_calib_0)
# setting keywords of _error_function_on_demand_calibration during the fitting because leastsq handle only *args but not **kwds
_error_function_on_demand_calibration.__defaults__ = (initrot,
vecteurref,
pureRotation,
0,
pixelsize,
dim,
weights,
0,
kf_direction)
# For transmission geometry , changing gam scale is useful
# x_scale = [1,1,1,1,.1,1,1,1] 1 except for xgam .1
xscale = np.ones(len(arr_indexvaryingparameters))
try:
posgam = arr_indexvaryingparameters.tolist().index(4)
xscale[posgam] = .1
except ValueError:
pass
#------------------------
calib_sol2 = least_squares(_error_function_on_demand_calibration,
param_calib_0,
args=(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY),
tr_solver = 'exact',
x_scale=xscale, max_nfev=None)
# print("\nLEAST_SQUARES")
# #print("calib_sol2", calib_sol2['x'])
# print(calib_sol2['x'])
# print('mean residues', np.mean(calib_sol2['fun']))
return calib_sol2['x']
# LEASTSQUARE
calib_sol = leastsq(_error_function_on_demand_calibration,
param_calib_0,
args=(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY),
maxfev=5000,
**kwd) # args=(rre,ertetr,) last , is important!
if calib_sol[-1] in (1, 2, 3, 4, 5):
if verbose:
# print("\n\n ************** End of Fitting - Final errors ****************** \n\n")
_error_function_on_demand_calibration(calib_sol[0],
miller,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=initrot,
pureRotation=pureRotation,
verbose=verbose,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
return calib_sol[0] # 5 detector parameters + deltaangles
else:
return None
def error_function_on_demand_strain(param_strain,
DATA_Q,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=IDENTITYMATRIX,
Bmat=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048.,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0"):
"""
#All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat with nspots (array of indices)
# allparameters must contain 5 detector calibration parameters + 5 parameters of strain + 3 angles of elementary rotation
# param_strain must contain values of one or many parameters of allparameters
#
# strain = param_strain[:5]
# deltaangles = param_strain[5:8]
# arr_indexvaryingparameters = array of position of parameters whose values are in param_strain
# e.g.: arr_indexvaryingparameters = array([5,6,7,8,9]) for only fit strain without orientation refinement
# e.g.: arr_indexvaryingparameters = array([5,6,7,8,9, 10,11,12]) for strain AND orientation refinement
# in this function calibration is not refined (but values are needed!), arr_indexvaryingparameters must only contain index >= 5
Bmat= B0 matrix
"""
#print('param_strain in error_function_on_demand_strain', param_strain)
mat1, mat2, mat3 = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
# arr_indexvaryingparameters = [5,6,7,8,9,10,11,12] first 5 params for strain and 3 last for rotation
index_of_rot_in_arr_indexvaryingparameters = [10, 11, 12]
if index_of_rot_in_arr_indexvaryingparameters[0] in arr_indexvaryingparameters:
ind1 = np.where(
arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[0]
)[0][0]
if len(arr_indexvaryingparameters) > 1:
a1 = param_strain[ind1] * DEG
else:
a1 = param_strain[0] * DEG
# print "a1 (rad)= ",a1
mat1 = np.array([[np.cos(a1), 0, np.sin(a1)], [0, 1, 0], [-np.sin(a1), 0, np.cos(a1)]])
if index_of_rot_in_arr_indexvaryingparameters[1] in arr_indexvaryingparameters:
ind2 = np.where(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[1])[0][0]
if len(arr_indexvaryingparameters) > 1:
a2 = param_strain[ind2] * DEG
else:
a2 = param_strain[0] * DEG
# print "a2 (rad)= ",a2
mat2 = np.array([[1, 0, 0], [0, np.cos(a2), np.sin(a2)], [0, np.sin(-a2), np.cos(a2)]])
if index_of_rot_in_arr_indexvaryingparameters[2] in arr_indexvaryingparameters:
ind3 = np.where(
arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[2])[0][0]
if len(arr_indexvaryingparameters) > 1:
a3 = param_strain[ind3] * DEG
else:
a3 = param_strain[0] * DEG
mat3 = np.array([[np.cos(a3), -np.sin(a3), 0],
[np.sin(a3), np.cos(a3), 0],
[0, 0, 1]])
deltamat = np.dot(mat3, np.dot(mat2, mat1))
# building B mat
varyingstrain = np.array([[1.0, param_strain[2], param_strain[3]],
[0, param_strain[0], param_strain[4]],
[0, 0, param_strain[1]]])
newmatrix = np.dot(np.dot(deltamat, initrot), varyingstrain)
# # three last parameters are orientation angles in quaternion expression and are here not used
# varying_parameter_value = array(allparameters[:5])
# arr_indexvaryingparameters = arr_indexvaryingparameters [arr_indexvaryingparameters < 5]
# varying_parameter_value: array of value that will be taken into account
# xy_from_Quat only uses 5 detector calibration parameter
# fitting_param: index of position of varying parameters in allparameters array
# allparameters: array of 8 elements: 5 first of calibration parameters and 3 of angles defining quaternion
patchallparam = allparameters.tolist()
# 5 detector parameters + 3 angles + 5 strain components
ally = np.array(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])
if 2 in arr_indexvaryingparameters:
ally[2]=param_strain[-1]
# because elem 5 to 7 are used in quaternion calculation
# TODO : correct also strain calib in the same manner
X, Y, _, _ = xy_from_Quat(allparameters[:5],
DATA_Q,
nspots,
np.arange(5),
ally,
initrot=newmatrix,
vecteurref=Bmat,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
distanceterm = np.sqrt((X - pixX) ** 2 + (Y - pixY) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
if verbose:
# if weights is not None:
# print("***********mean weighted pixel deviation ", np.mean(distanceterm), " ********")
# else:
# print("***********mean pixel deviation ", np.mean(distanceterm), " ********")
# print "newmatrix", newmatrix
return distanceterm, deltamat, newmatrix
else:
return distanceterm
def error_function_strain_with_two_orientations(param_strain, DATA_Q, allparameters,
arr_indexvaryingparameters, nspots, pixX, pixY,
initrot=IDENTITYMATRIX,
Bmat=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None):
"""
#All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat with nspots (array of indices)
# allparameters must contain 5 detector calibration parameters + 5 parameters of strain + 3 angles of elementary rotation
# param_strain must contain values of one or many parameters of allparameters
#
# strain = param_strain[:5]
# deltaangles = param_strain[5:8]
# arr_indexvaryingparameters = array of position of parameters whose values are in param_strain
# e.g.: arr_indexvaryingparameters = array([5,6,7,8,9]) for only fit strain without orientation refinement
# e.g.: arr_indexvaryingparameters = array([5,6,7,8,9, 10,11,12, 13,14,15]) for strain AND orientation refinement
# in this function calibration is not refined (but values are needed!), arr_indexvaryingparameters must only contain index >= 5
TODO: not implemented for transmission geometry (kf_direction='X>0') and backreflection ('X<0')
.. warning::
not completed !
"""
mat1, mat2, mat3 = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
# arr_indexvaryingparameters = [5,6,7,8,9,10,11,12] first 5 params for strain and 6 last for misorientation of two grains
index_of_rot_in_arr_indexvaryingparameters_1 = [10, 11, 12]
index_of_rot_in_arr_indexvaryingparameters_2 = [13, 14, 15]
if index_of_rot_in_arr_indexvaryingparameters_1[0] in arr_indexvaryingparameters:
ind1 = np.where(
arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_1[0])[0][0]
if len(arr_indexvaryingparameters) > 1:
a1 = param_strain[ind1] * DEG
else:
a1 = param_strain[0] * DEG
# print "a1 (rad)= ",a1
mat1 = np.array([[np.cos(a1), 0, np.sin(a1)],
[0, 1, 0],
[-np.sin(a1), 0, np.cos(a1)]])
if index_of_rot_in_arr_indexvaryingparameters_1[1] in arr_indexvaryingparameters:
ind2 = np.where(
arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_1[1])[0][0]
if len(arr_indexvaryingparameters) > 1:
a2 = param_strain[ind2] * DEG
else:
a2 = param_strain[0] * DEG
# print "a2 (rad)= ",a2
mat2 = np.array([[1, 0, 0],
[0, np.cos(a2), np.sin(a2)],
[0, np.sin(-a2), np.cos(a2)]])
if index_of_rot_in_arr_indexvaryingparameters_1[2] in arr_indexvaryingparameters:
ind3 = np.where(
arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_1[2])[0][0]
if len(arr_indexvaryingparameters) > 1:
a3 = param_strain[ind3] * DEG
else:
a3 = param_strain[0] * DEG
mat3 = np.array([[np.cos(a3), -np.sin(a3), 0],
[np.sin(a3), np.cos(a3), 0],
[0, 0, 1]])
deltamat_1 = np.dot(mat3, np.dot(mat2, mat1))
if index_of_rot_in_arr_indexvaryingparameters_2[0] in arr_indexvaryingparameters:
ind1 = np.where(
arr_indexvaryingparameters
== index_of_rot_in_arr_indexvaryingparameters_2[0]
)[0][0]
if len(arr_indexvaryingparameters) > 1:
a1 = param_strain[ind1] * DEG
else:
a1 = param_strain[0] * DEG
# print "a1 (rad)= ",a1
mat1 = np.array([[np.cos(a1), 0, np.sin(a1)],
[0, 1, 0],
[-np.sin(a1), 0, np.cos(a1)]])
if index_of_rot_in_arr_indexvaryingparameters_2[1] in arr_indexvaryingparameters:
ind2 = np.where(
arr_indexvaryingparameters
== index_of_rot_in_arr_indexvaryingparameters_2[1])[0][0]
if len(arr_indexvaryingparameters) > 1:
a2 = param_strain[ind2] * DEG
else:
a2 = param_strain[0] * DEG
# print "a2 (rad)= ",a2
mat2 = np.array([[1, 0, 0],
[0, np.cos(a2), np.sin(a2)],
[0, np.sin(-a2), np.cos(a2)]])
if index_of_rot_in_arr_indexvaryingparameters_2[2] in arr_indexvaryingparameters:
ind3 = np.where(
arr_indexvaryingparameters
== index_of_rot_in_arr_indexvaryingparameters_2[2])[0][0]
if len(arr_indexvaryingparameters) > 1:
a3 = param_strain[ind3] * DEG
else:
a3 = param_strain[0] * DEG
mat3 = np.array([[np.cos(a3), -np.sin(a3), 0], [np.sin(a3), np.cos(a3), 0], [0, 0, 1]])
deltamat_2 = np.dot(mat3, np.dot(mat2, mat1))
# building B mat
varyingstrain = np.array(
[[1.0, param_strain[2], param_strain[3]],
[0, param_strain[0], param_strain[4]],
[0, 0, param_strain[1]]])
newmatrix_1 = np.dot(np.dot(deltamat_1, initrot), varyingstrain)
newmatrix_2 = np.dot(np.dot(deltamat_2, initrot), varyingstrain)
# # three last parameters are orientation angles in quaternion expression and are here not used
# varying_parameter_value = array(allparameters[:5])
# arr_indexvaryingparameters = arr_indexvaryingparameters [arr_indexvaryingparameters < 5]
# varying_parameter_value: array of value that will be taken into account
# xy_from_Quat only uses 5 detector calibration parameter
# fitting_param: index of position of varying parameters in allparameters array
# allparameters: array of 8 elements: 5 first of calibration parameters and 3 of angles defining quaternion
patchallparam = allparameters.tolist()
# 5 det parameters + 3 small rotations + 5 strain parameters
ally_1 = np.array(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])
# because elem 5 to 7 are used in quaternion calculation
# TODO : correct also strain calib in the same manner
X1, Y1, _, _ = xy_from_Quat(allparameters[:5],
DATA_Q,
nspots,
np.arange(5),
ally_1,
initrot=newmatrix_1,
vecteurref=Bmat,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim)
distanceterm1 = np.sqrt((X1 - pixX) ** 2 + (Y1 - pixY) ** 2)
# 5 det parameters + 3 small rotations + 5 strain parameters
ally_2 = np.array(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])
# because elem 5 to 7 are used in quaternion calculation
# TODO : correct also strain calib in the same manner
X2, Y2, _, _ = xy_from_Quat(allparameters[:5],
DATA_Q,
nspots,
np.arange(5),
ally_2,
initrot=newmatrix_2,
vecteurref=Bmat,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim)
distanceterm2 = np.sqrt((X2 - pixX) ** 2 + (Y2 - pixY) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm2 * weights / allweights
# print "**mean weighted distanceterm ",mean(distanceterm)," ********"
# print "**mean distanceterm ",mean(distanceterm)," ********"
if verbose:
# if weights is not None:
# print("***********mean weighted pixel deviation ", np.mean(distanceterm), " ********")
# else:
# print("***********mean pixel deviation ", np.mean(distanceterm), " ********")
return distanceterm2, (deltamat_1, deltamat_2), (newmatrix_1, newmatrix_2)
else:
return distanceterm
def fit_on_demand_strain(starting_param,
miller,
allparameters,
_error_function_on_demand_strain,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=IDENTITYMATRIX,
Bmat=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
fitycen=False,
**kwd):
"""
To use it:
allparameters = 5calibdetectorparams + fivestrainparameter + 3deltaangles of orientations
starting_param = [fivestrainparameter + 3deltaangles of orientations] = [1,1,0,0,0,0,0,0] typically
arr_indexvaryingparameters = range(5,13)
"""
# All miller indices must be entered in miller, selection is done in xy_from_Quat with nspots (array of indices)
parameters = ["dd", "xcen", "ycen", "angle1", "angle2", "b/a", "c/a",
"a12", "a13", "a23", "theta1", "theta2", "theta3", ]
parameters_being_fitted = [parameters[k] for k in arr_indexvaryingparameters]
param_strain_0 = starting_param
# print('\n\nstarting_param',starting_param)
if verbose:
# print("\n\n***************************\nfirst error with initial values of:",
# parameters_being_fitted, " \n\n***************************\n")
_error_function_on_demand_strain(param_strain_0,
miller,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=initrot,
Bmat=Bmat,
pureRotation=pureRotation,
verbose=0,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
# print("\n\n***************************\nFitting parameters: ",
# parameters_being_fitted,
# "\n\n***************************\n")
# # NEEDS AT LEAST 5 spots (len of nspots)
# print("With initial values", param_strain_0)
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
_error_function_on_demand_strain.__defaults__ = (initrot,
Bmat,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction)
# LEASTSQUARE
res = leastsq(_error_function_on_demand_strain,
param_strain_0,
args=(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY),
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
#--------------------- other least square ------------------
# For ycen fitting together strain component, changing ycen scale is useful
# x_scale = [1,1,1,1,.1,1,1,1] 1 except for xgam .1
xscale = np.ones(len(arr_indexvaryingparameters))
try:
xscale[-1] = 100
except ValueError:
pass
if 0:
#------------------------
# from scipy.optimize import leastsq, least_squares
calib_sol2 = least_squares(_error_function_on_demand_strain,
param_strain_0,
args=(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY),
tr_solver = 'exact',
x_scale=xscale, max_nfev=None)
# print("\nLEAST_SQUARES")
# #print("calib_sol2", calib_sol2['x'])
# print(calib_sol2['x'])
# print('mean residues', np.mean(calib_sol2['fun']))
#return calib_sol2['x']
#--------------------- other least square ------------------
strain_sol = res[0]
# print("code results", res[-1])
# print("nb iterations", res[2]["nfev"])
# print("mesg", res[-2])
# if verbose:
# print("strain_sol", strain_sol)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if verbose:
# print("\n\n ************** End of Fitting - Final errors ****************** \n\n")
_error_function_on_demand_strain(strain_sol,
miller,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=initrot,
Bmat=Bmat,
pureRotation=pureRotation,
verbose=verbose,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
return strain_sol
def plot_refinement_oneparameter(starting_param,
miller,
allparameters,
_error_function_on_demand_calibration,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
param_range,
initrot=IDENTITYMATRIX,
vecteurref=IDENTITYMATRIX,
pureRotation=1,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
**kwd):
"""
All miller indices must be entered in miller,
selection is done in xy_from_Quat with nspots (array of indices)
"""
parameters = ["distance (mm)", "Xcen (pixel)", "Ycen (pixel)",
"Angle1 (deg)", "Angle2 (deg)", "theta1", "theta2", "theta3"]
# parameters_being_fitted = [parameters[k] for k in arr_indexvaryingparameters]
param_calib_0 = starting_param
mini, maxi, nbsteps = param_range
# setting keywords of _error_function_on_demand_calibration during the fitting because leastsq handle only *args but not **kwds
_error_function_on_demand_calibration.__defaults__ = (initrot,
vecteurref,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction)
# designed for rotation angle
res = []
for angle in np.linspace(mini, maxi, nbsteps) + param_calib_0:
residues = _error_function_on_demand_calibration(np.array([angle]),
miller,
allparameters,
arr_indexvaryingparameters,
nspots,
pixX,
pixY,
initrot=initrot,
vecteurref=vecteurref,
pureRotation=pureRotation,
verbose=0,
pixelsize=pixelsize,
weights=weights,
kf_direction=kf_direction)
# print "mean(residues)",mean(residues)
res.append([angle, np.mean(residues)])
return res
def error_function_XCEN(param_calib,
DATA_Q,
allparameters,
nspots,
pixX,
pixY,
initrot=IDENTITYMATRIX,
pureRotation=1,
verbose=0,
pixelsize=165.0 / 2048):
"""
seems to be useless ?
"""
# All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat with nspots (array of indices)
# param_orient is three elements array representation of quaternion
X, Y, _, R = xy_from_Quat(param_calib,
DATA_Q,
nspots,
np.arange(8)[1],
allparameters,
initrot=initrot,
pureRotation=pureRotation,
labXMAS=0,
verbose=verbose,
pixelsize=pixelsize)
distanceterm = np.sqrt((X - pixX) ** 2 + (Y - pixY) ** 2)
# print "**mean distanceterm ",mean(distanceterm)," ********"
if verbose:
# print("X", X)
# print("pixX", pixX)
# print("Y", Y)
# print("pixY", pixY)
# print("param_orient", param_calib)
# print("distanceterm", distanceterm)
# print("\n*****************\n\nmean distanceterm ", np.mean(distanceterm), " ********\n")
return distanceterm, R
else:
return distanceterm
def fitXCEN(starting_param,
miller,
allparameters,
_error_function_XCEN,
nspots,
pixX,
pixY,
initrot=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1.0]]),
pureRotation=1,
verbose=0,
pixelsize=165.0 / 2048,
**kwd):
"""
#All miller indices must be entered in miller,
selection is done in xy_from_Quat with nspots (array of indices)
"""
param_calib_0 = starting_param
if verbose:
# print("\n\n***************************\nfirst error XCEN************************\n")
_error_function_XCEN(param_calib_0,
miller,
allparameters,
nspots,
pixX,
pixY,
initrot=initrot,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize)
# print("\n\n***************************\nFitting XCEN ...\n\n***************************\n")
# print("Starting parameters", param_calib_0)
# setting keywords of _error_function_XCEN during the fitting because leastsq handle only *args but not **kwds
_error_function_XCEN.__defaults__ = (initrot, pureRotation, 0, pixelsize)
calib_sol = leastsq(_error_function_XCEN,
param_calib_0,
args=(miller, allparameters, nspots, pixX, pixY),
**kwd) # args=(rre,ertetr,) last , is important!
# print("calib_sol", calib_sol)
if calib_sol[-1] in (1, 2, 3, 4, 5):
if verbose:
# print("\n\n ************** End of Fitting - Final errors ****************** \n\n")
_error_function_XCEN(calib_sol[0],
miller,
allparameters,
nspots,
pixX,
pixY,
initrot=initrot,
pureRotation=pureRotation,
verbose=verbose,
pixelsize=pixelsize)
return calib_sol[0] # 5 detector parameters
else:
return None
def fit_on_demand_strain_2grains(starting_param,
miller,
allparameters,
_error_function_on_demand_strain_2grains,
arr_indexvaryingparameters,
absolutespotsindices,
pixX,
pixY,
initrot=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
**kwd):
"""
Fit a model of two grains of the same material
Initial orientation matrices are the same (only strain state differs)
To use it:
allparameters = 5calibdetectorparams + fivestrainparameters_g1 + 3deltaangles_g1 of orientations
+ fivestrainparameters_g2 + 3deltaangles_g2 of orientations
starting_param = [fivestrainparameter + 3deltaangles of orientations] = [1,1,0,0,0,0,0,0]+[1,1,0,0,0,0,0,0] typically
arr_indexvaryingparameters = range(5,21)
B0matrix : B0 matrix defining a*,b*,c* basis vectors (in columns) in initial orientation / LT frame
"""
# All miller indices must be entered in miller
# selection is done in xy_from_Quat with absolutespotsindices (array of indices)
parameterscalib = ["dd", "xcen", "ycen", "angle1", "angle2"]
strain_g1 = ["b/a", "c/a", "a12", "a13", "a23"]
rot_g1 = ["theta1", "theta2", "theta3"]
strain_g2 = ["b/a", "c/a", "a12", "a13", "a23"]
parameters = parameterscalib + strain_g1 + rot_g1 + strain_g2
parameters_being_fitted = [parameters[k] for k in arr_indexvaryingparameters]
init_strain_values = starting_param
if verbose:
# print("\n\n***************************\nfirst error with initial values of:",
# parameters_being_fitted, " \n\n***************************\n")
_error_function_on_demand_strain_2grains(init_strain_values,
miller,
allparameters,
arr_indexvaryingparameters,
absolutespotsindices,
pixX,
pixY,
initrot=initrot,
B0matrix=B0matrix,
nb_grains=nb_grains,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
# print("\n\n***************************\nFitting parameters: ",
# parameters_being_fitted, "\n\n***************************\n")
# # NEEDS AT LEAST 5 spots (len of nspots)
# print("With initial values", init_strain_values)
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
_error_function_on_demand_strain_2grains.__defaults__ = (initrot,
B0matrix,
nb_grains,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction,
False)
# pixX = np.array(pixX, dtype=np.float64)
# pixY = np.array(pixY, dtype=np.float64)
# LEASTSQUARE
res = leastsq(error_function_on_demand_strain_2grains,
init_strain_values,
args=(
miller,
allparameters,
arr_indexvaryingparameters,
absolutespotsindices,
pixX,
pixY), # args=(rre,ertetr,) last , is important!
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
strain_sol = res[0]
# print "res", res
# print "code results", res[-1]
# print("nb iterations", res[2]["nfev"])
# if verbose:
# print("strain_sol", strain_sol)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if verbose:
# print("\n\n ************** End of Fitting - Final errors ****************** \n\n")
_error_function_on_demand_strain_2grains(strain_sol,
miller,
allparameters,
arr_indexvaryingparameters,
absolutespotsindices,
pixX,
pixY,
initrot=initrot,
B0matrix=B0matrix,
nb_grains=nb_grains,
pureRotation=pureRotation,
verbose=verbose,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
returnalldata=True)
return strain_sol
def error_function_on_demand_strain_2grains(varying_parameters_values,
DATA_Q,
allparameters,
arr_indexvaryingparameters,
absolutespotsindices,
pixX,
pixY,
initrot=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False):
"""
compute array of errors of weight*((Xtheo-pixX)**2+(Ytheo-pixY)**2) for each pears
Xtheo, Ytheo derived from kf and q vector: q = UB Bmat B0 G* where G* =[h ,k, l] vector
Bmat is the displacements matrix strain = Bmat-Id
#All miller indices must be entered in DATA_Q, selection is done in xy_from_Quat with absolutespotsindices (array of indices)
# allparameters must contain 5 detector calibration parameters + 5 parameters_g1 of strain + 3 angles_g1 of elementary rotation
# + 5 parameters_g2 of strain
# varying_parameters_values must contain values of one or many parameters of allparameters
#
# strain_g1 = varying_parameters_values[:5]
strain_g2 = varying_parameters_values[8:13]
# deltaangles_g1 = varying_parameters_values[5:8]
# arr_indexvaryingparameters = array of position of parameters whose values are in varying_parameters_values
# e.g.: arr_indexvaryingparameters = array([5,6,7,8,9]) for only fit g1's strain without orientation refinement
# e.g.: arr_indexvaryingparameters = array([5,6,7,8,9, 10,11,12]) for g1's strain AND orientation refinement
# in this function calibration is not refined (but values are needed!), arr_indexvaryingparameters must only contain index >= 5
DATA_Q array of hkl vectors
pixX arrays of pixels exp. peaks X positions [Xs g1,Xs g2]
pixY arrays of pixels exp. peaks Y positions [Ys g1,Ys g2]
absolutespotsindices [absolutespotsindices g1, absolutespotsindices g2]
weights None or [weights g1, weight g2]
initrot = guessed UB orientation matrix
B0matrix B0 matrix defining a*,b*,c* basis vectors (in columns) in initial orientation / LT frame
TODO: ?? not implemented for transmission geometry (kf_direction='X>0') ? and backreflection ('X<0')
"""
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
rotationselements_indices = [[10, 11, 12],[18, 19, 20]] # with counting 5 calib parameters
strainelements_indices = [[5, 6, 7, 8, 9], [13, 14, 15, 16, 17]]
distances_vector_list = []
all_deltamatrices = []
all_newmatrices = []
for grain_index in list(range(nb_grains)):
mat1, mat2, mat3 = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
# arr_indexvaryingparameters = [5,6,7,8,9,10,11,12] first 5 params for strain and 3 last fro roatation
index_of_rot_in_arr_indexvaryingparameters = rotationselements_indices[grain_index]
if index_of_rot_in_arr_indexvaryingparameters[0] in arr_indexvaryingparameters:
ind1 = np.where(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[0])[0][0]
if len(arr_indexvaryingparameters) > 1:
a1 = varying_parameters_values[ind1] * DEG
else:
a1 = varying_parameters_values[0] * DEG
# print "a1 (rad)= ",a1
mat1 = np.array(
[[np.cos(a1), 0, np.sin(a1)], [0, 1, 0], [-np.sin(a1), 0, np.cos(a1)]])
if index_of_rot_in_arr_indexvaryingparameters[1] in arr_indexvaryingparameters:
ind2 = np.where(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[1])[0][0]
if len(arr_indexvaryingparameters) > 1:
a2 = varying_parameters_values[ind2] * DEG
else:
a2 = varying_parameters_values[0] * DEG
# print "a2 (rad)= ",a2
mat2 = np.array(
[[1, 0, 0], [0, np.cos(a2), np.sin(a2)], [0, np.sin(-a2), np.cos(a2)]])
if index_of_rot_in_arr_indexvaryingparameters[2] in arr_indexvaryingparameters:
ind3 = np.where(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[2])[0][0]
if len(arr_indexvaryingparameters) > 1:
a3 = varying_parameters_values[ind3] * DEG
else:
a3 = varying_parameters_values[0] * DEG
mat3 = np.array([[np.cos(a3), -np.sin(a3), 0], [np.sin(a3), np.cos(a3), 0], [0, 0, 1]])
deltamat = np.dot(mat3, np.dot(mat2, mat1))
all_deltamatrices.append(deltamat)
# print("all_deltamatrices", all_deltamatrices)
# building Bmat ------------(triangular up matrix)
index_of_strain_in_arr_indexvaryingparameters = strainelements_indices[grain_index]
# print("arr_indexvaryingparameters", arr_indexvaryingparameters)
# print("varying_parameters_values", varying_parameters_values)
# default parameters
s_list = [1, 1, 0, 0, 0]
for s_index in list(range(5)):
if (
index_of_strain_in_arr_indexvaryingparameters[s_index]
in arr_indexvaryingparameters):
ind1 = np.where(
arr_indexvaryingparameters
== index_of_strain_in_arr_indexvaryingparameters[s_index]
)[0][0]
if len(arr_indexvaryingparameters) > 1:
s_list[s_index] = varying_parameters_values[ind1]
else: # handling fit with single fitting parameter
s_list[s_index] = varying_parameters_values[0]
s0, s1, s2, s3, s4 = s_list
varyingstrain = np.array([[1.0, s2, s3], [0, s0, s4], [0, 0, s1]])
newmatrix = np.dot(np.dot(deltamat, initrot), varyingstrain)
all_newmatrices.append(newmatrix)
# print "varyingstrain", varyingstrain
# print 'all_newmatrices', all_newmatrices
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
DATA_Q,
absolutespotsindices[grain_index],
UBmatrix=newmatrix,
B0matrix=B0matrix,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
Xexp = pixX[grain_index]
Yexp = pixY[grain_index]
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights[grain_index])
distanceterm = distanceterm * weights[grain_index] / allweights
# if verbose:
# print("** grain %d distance residues = " % grain_index,
# distanceterm, " ********")
# print("** grain %d mean distance residue = " % grain_index,
# np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
distances_vector_list.append(distanceterm)
# print 'len(distances_vector_list)', len(distances_vector_list)
if nb_grains == 2:
alldistances_array = np.hstack((distances_vector_list[0], distances_vector_list[1]))
if nb_grains == 1:
alldistances_array = distances_vector_list[0]
if verbose:
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print("***********mean pixel deviation ",
# np.mean(alldistances_array), " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.B0matrix matrices
return alldistances_array, all_deltamatrices, all_newmatrices
else:
return alldistances_array
def error_function_general(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False):
"""
q = T_LT UzUyUz Ustart T_c B0 G*
Interface error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
q = T_LT UzUyUz Ustart T_c B0 G*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
possible keys for parameters to be refined are:
five detector frame calibration parameters:
detectordistance,xcen,ycen,beta, gamma
three misorientation angles with respect to LT orthonormal frame (x, y, z) matrices Ux, Uy,Uz:
anglex,angley,anglez
5 independent elements of a distortion operator
-[[Tc00,Tc01,Tc02],[Tc10,Tc11,Tc12],[Tc20,Tc21,Tc22]]
each column is the transformed reciprocal unit cell vector a*',b*' or c*' expressed in a*,b*,c* frame (reference reciprocal unit cell)
Usually Tc11, Tc22, Tc01,Tc02,Tc12 with Tc00=1 and the all others = 0 (matrix triangular up)
# TODO :- [[Td00,Td01,Td02],[Td10,Td11,Td12],[Td20,Td21,Td22]]
#
#each column is the transformed direct crystal unit cell vector a',b' or c' expressed in a,b,c frame (reference unit cell)
-[[T00,T01,T02],[T10,T11,T12],[T20,T21,T22]]
each column is the transformed LT frame vector x',y' or z' expressed in x,y,z frame
-[[Ts00,Ts01,Ts02],[Ts10,Ts11,Ts12],[Ts20,Ts21,Ts22]]
each column is the transformed sample frame vector xs',ys' or zs' expressed in xs,ys,zs frame
"""
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# print 'allparameters',allparameters
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
Tc = np.array(allparameters[8:17]).reshape((3, 3))
T = np.array(allparameters[17:26]).reshape((3, 3))
Ts = np.array(allparameters[26:35]).reshape((3, 3))
latticeparameters = np.array(allparameters[35:41])
sourcedepth = allparameters[41]
# print "Tc before", Tc
T_has_elements = False
Ts_has_elements = False
Tc_has_elements = False
latticeparameters_has_elements = False
nb_varying_parameters = len(varying_parameters_keys)
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = (varying_parameters_values_array[varying_parameter_index] * DEG)
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif ((not T_has_elements) and (not Ts_has_elements) and parameter_name
in ("Tc00", "Tc01", "Tc02", "Tc10", "Tc11", "Tc12", "Tc20", "Tc21", "Tc22")):
# print 'got Tc elements: ', parameter_name
for i in list(range(3)):
for j in list(range(3)):
if parameter_name == "Tc%d%d" % (i, j):
# print "got parameter_name", parameter_name
if nb_varying_parameters > 1:
Tc[i, j] = varying_parameters_values_array[varying_parameter_index]
else:
Tc[i, j] = varying_parameters_values_array[0]
Tc_has_elements = True
elif (not Tc_has_elements and not Ts_has_elements and parameter_name
in ("T00", "T01", "T02", "T10", "T11", "T12", "T20", "T21", "T22")):
for i in list(range(3)):
for j in list(range(3)):
if parameter_name is "T%d%d" % (i, j):
if nb_varying_parameters > 1:
T[i, j] = varying_parameters_values_array[varying_parameter_index]
else:
T[i, j] = varying_parameters_values_array[0]
T_has_elements = True
elif (not Tc_has_elements and not T_has_elements and parameter_name
in ("Ts00", "Ts01", "Ts02", "Ts10", "Ts11", "Ts12", "Ts20", "Ts21", "Ts22")):
for i in list(range(3)):
for j in list(range(3)):
if parameter_name is "Ts%d%d" % (i, j):
if nb_varying_parameters > 1:
Ts[i, j] = varying_parameters_values_array[varying_parameter_index]
else:
Ts[i, j] = varying_parameters_values_array[0]
Ts_has_elements = True
elif parameter_name in ("a", "b", "c", "alpha", "beta", "gamma"):
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
latticeparameters_has_elements = True
elif parameter_name in ("distance",):
calibrationparameters[0] = varying_parameters_values_array[varying_parameter_index]
elif parameter_name in ("xcen",):
calibrationparameters[1] = varying_parameters_values_array[varying_parameter_index]
elif parameter_name in ("ycen",):
calibrationparameters[2] = varying_parameters_values_array[varying_parameter_index]
elif parameter_name in ("beta",):
calibrationparameters[3] = varying_parameters_values_array[varying_parameter_index]
elif parameter_name in ("gamma",):
calibrationparameters[4] = varying_parameters_values_array[varying_parameter_index]
elif parameter_name in ("depth",):
sourcedepth = varying_parameters_values_array[varying_parameter_index]
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
# if verbose:
# print("Uxyz", Uxyz)
# print("varying_parameters_keys", varying_parameters_keys)
# print("varying_parameters_values_array", varying_parameters_values_array)
# print("Tc_has_elements", Tc_has_elements)
# print("T_has_elements", T_has_elements)
# print("Ts_has_elements", Ts_has_elements)
# print("latticeparameters_has_elements", latticeparameters_has_elements)
# print "Tc after", Tc
# print "T", T
# print 'Ts', Ts
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
newmatrix = np.dot(Uxyz, initrot)
if Tc_has_elements:
newmatrix = np.dot(newmatrix, Tc)
elif T_has_elements:
newmatrix = np.dot(T, newmatrix)
elif Ts_has_elements:
T = np.dot(np.dot(DictLT.RotY40, Ts), DictLT.RotYm40)
newmatrix = np.dot(T, newmatrix)
elif latticeparameters_has_elements:
B0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False)
# if verbose:
# print("newmatrix", newmatrix)
# print("B0matrix", B0matrix)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=B0matrix,
offset=sourcedepth,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "B0matrix",B0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print("***********mean pixel deviation ", np.mean(alldistances_array), " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.B0matrix matrices
return alldistances_array, Uxyz, newmatrix, Tc, T, Ts
else:
return alldistances_array
def fit_function_general(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
UBmatrix_start=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
**kwd):
"""
"""
if verbose:
# print("\n\n******************\nfirst error with initial values of:",
# varying_parameters_keys, " \n\n***************************\n")
error_function_general(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
B0matrix=B0matrix,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
# print("\n\n********************\nFitting parameters: ",
# varying_parameters_keys, "\n\n***************************\n")
# print("With initial values", varying_parameters_values_array)
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
error_function_general.__defaults__ = (UBmatrix_start,
B0matrix,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction,
False)
# pixX = np.array(pixX, dtype=np.float64)
# pixY = np.array(pixY, dtype=np.float64)
# LEASTSQUARE
res = leastsq(error_function_general,
varying_parameters_values_array,
args=(
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
), # args=(rre,ertetr,) last , is important!
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
refined_values = res[0]
# print "res fit in fit function general", res
# print("code results", res[-1])
# print("nb iterations", res[2]["nfev"])
# print("refined_values", refined_values)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if verbose:
# print("\n\n ************** End of Fitting - Final errors (general fit function) ****************** \n\n"
# )
alldata = error_function_general(refined_values,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
B0matrix=B0matrix,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
returnalldata=True)
# alldistances_array, Uxyz, newmatrix, Tc, T, Ts
alldistances_array, Uxyz, refinedUB, refinedTc, refinedT, refinedTs = alldata
# for k, param_key in enumerate(varying_parameters_keys):
# print("%s : start %.4f ---> refined %.4f"
# % (param_key, varying_parameters_values_array[k], refined_values[k]))
# print("results:\n q= refinedT UBstart refinedTc B0 G*\nq = refinedUB B0 G*")
# print("refined UBmatrix", refinedUB)
# print("Uxyz", Uxyz)
# print("refinedTc, refinedT, refinedTs", refinedTc, refinedT, refinedTs)
# print("final mean pixel residues : %f with %d spots"
# % (np.mean(alldistances_array), len(absolutespotsindices)))
return refined_values
dict_lattice_parameters = {"a": 0, "b": 1, "c": 2, "alpha": 3, "beta": 4, "gamma": 5}
def fit_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
UBmatrix_start=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
additional_expression="none",
**kwd):
"""
fit direct (real) unit cell lattice parameters (in refinedB0)
and orientation
q = refinedUzUyUz Ustart refinedB0 G*
with error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
"""
if verbose:
# print("\n\n******************\nfirst error with initial values of:",
# varying_parameters_keys, " \n\n***************************\n",)
error_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
additional_expression=additional_expression)
# print("\n\n********************\nFitting parameters: ",
# varying_parameters_keys, "\n\n***************************\n")
# print("With initial values", varying_parameters_values_array)
# print '*************** UBmatrix_start before fit************'
# print UBmatrix_start
# print '*******************************************'
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
error_function_latticeparameters.__defaults__ = (UBmatrix_start,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction,
False,
additional_expression)
# pixX = np.array(pixX, dtype=np.float64)
# pixY = np.array(pixY, dtype=np.float64)
# LEASTSQUARE
res = leastsq(error_function_latticeparameters,
varying_parameters_values_array,
args=(
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
), # args=(rre,ertetr,) last , is important!
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
refined_values = res[0]
# print "res fit in fit function general", res
# print("code results", res[-1])
# print("nb iterations", res[2]["nfev"])
# print("refined_values", refined_values)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if 1:
# print(
# "\n\n ************** End of Fitting - Final errors (general fit function) ****************** \n\n"
# )
alldata = error_function_latticeparameters(refined_values,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
returnalldata=True,
additional_expression=additional_expression)
# alldistances_array, Uxyz, newmatrix, Tc, T, Ts
alldistances_array, Uxyz, refinedUB, refinedB0matrix, refinedLatticeparameters = (
alldata)
# print("\n--------------------\nresults:\n------------------")
# for k, param_key in enumerate(varying_parameters_keys):
# print("%s : start %f ---> refined %f"
# % (param_key, varying_parameters_values_array[k], refined_values[k]))
# print("q= refinedT UBstart refinedTc B0 G*\nq = refinedUB B0 G*")
# print("refined UBmatrix", refinedUB.tolist())
# print("Uxyz", Uxyz.tolist())
# print("refinedB0matrix", refinedB0matrix.tolist())
# print("refinedLatticeparameters", refinedLatticeparameters)
# print("final mean pixel residues : %f with %d spots"
# % (np.mean(alldistances_array), len(absolutespotsindices)))
return refined_values
def error_function_latticeparameters(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False,
additional_expression="none"):
"""
q = UzUyUz Ustart B0 G*
Interface error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
q = refinedUzUyUz Ustart refinedB0 G*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
refinedB0 is obtained by refining the 5 /6 lattice parameters
possible keys for parameters to be refined are:
five detector frame calibration parameters:
det_distance,det_xcen,det_ycen,det_beta, det_gamma
three misorientation angles with respect to LT orthonormal frame (x, y, z) matrices Ux, Uy,Uz:
anglex,angley,anglez
5 lattice parameters among 6 (a,b,c,alpha, beta,gamma)
"""
# reading default parameters
# CCD plane calibration parameters
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# allparameters[5:8] = 0,0,0
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
latticeparameters = np.array(allparameters[8:14])
nb_varying_parameters = len(varying_parameters_keys)
# factorscale = 1.
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif parameter_name in ("alpha", "beta", "gamma"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[3] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
elif parameter_name in ("a", "b", "c"):
# print 'got Tc elements: ', parameter_name
indparam = dict_lattice_parameters[parameter_name]
# if nb_varying_parameters > 1:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[varying_parameter_index] / factorscale)
# else:
# latticeparameters[indparam] = latticeparameters[0] * np.exp(varying_parameters_values_array[0] / factorscale)
if nb_varying_parameters > 1:
latticeparameters[indparam] = varying_parameters_values_array[varying_parameter_index]
else:
latticeparameters[indparam] = varying_parameters_values_array[0]
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
if additional_expression == "a==b":
indparam = dict_lattice_parameters["b"]
indparam1 = dict_lattice_parameters["a"]
latticeparameters[indparam] = latticeparameters[indparam1]
newB0matrix = CP.calc_B_RR(latticeparameters, directspace=1, setvolume=False)
# if verbose:
# print("\n-------\nvarying_parameters_keys", varying_parameters_keys)
# print("varying_parameters_values_array", varying_parameters_values_array)
# print("Uxyz", Uxyz)
# print("latticeparameters", latticeparameters)
# print("newB0matrix", newB0matrix)
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
newmatrix = np.dot(Uxyz, initrot)
# if 0: # verbose:
# print("initrot", initrot)
# print("newmatrix", newmatrix)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=newB0matrix,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
if 0: # verbose:
print("Xmodel, Ymodel", Xmodel, Ymodel)
if 0: # verbose:
print("Xexp, Yexp", Xexp, Yexp)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "newB0matrix",newB0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print(
# "***********mean pixel deviation ", np.mean(alldistances_array),
# " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.newB0matrix matrices
return alldistances_array, Uxyz, newmatrix, newB0matrix, latticeparameters
else:
return alldistances_array
def error_function_strain(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False):
"""
q = refinedStrain refinedUzUyUz Ustart B0 G*
Interface error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
Strain of reciprocal vectors : 6 compenents of triangular up matrix ( T00 T01 T02)
( 0 T11 T12)
( 0 0 T22)
one must be set (usually T00 = 1)
Algebra:
X=PX' e'1 e'2 e'3
| | |
v v v
e1 ( . . . )
P= e2 ( . . . )
e3 ( . . . )
If A transform expressed in (e1,e2,e3) basis
and A' same transform but expressed in (e'1,e'2,e'3) basis
then A'=P-1 A P
X_LT=P X_sample
P=(cos40, 0 -sin40)
(0 1 0 )
(sin40 0 cos40)
Strain_sample=P-1 Strain_LT P
Strain_LT = P Strain_Sample P-1
"""
# reading default parameters
# CCD plane calibration parameters
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# print 'calibrationparameters', calibrationparameters
# allparameters[5:8] = 0,0,0
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
straincomponents = np.array(allparameters[8:14])
Ts = np.array([straincomponents[:3],
[0.0, straincomponents[3], straincomponents[4]],
[0, 0, straincomponents[5]]])
# print 'Ts before', Ts
nb_varying_parameters = len(varying_parameters_keys)
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif parameter_name in ("Ts00", "Ts01", "Ts02", "Ts11", "Ts12", "Ts22"):
# print 'got Ts elements: ', parameter_name
for i in list(range(3)):
for j in list(range(3)):
if parameter_name == "Ts%d%d" % (i, j):
# print "got parameter_name", parameter_name
if nb_varying_parameters > 1:
Ts[i, j] = varying_parameters_values_array[varying_parameter_index]
else:
Ts[i, j] = varying_parameters_values_array[0]
# print 'Ts after', Ts
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
newmatrix = np.dot(Uxyz, initrot)
# print 'Uxyz', Uxyz
# print 'newmatrix', newmatrix
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
T = np.dot(np.dot(DictLT.RotY40, Ts), DictLT.RotYm40)
# T = np.dot(np.dot(DictLT.RotYm40, Ts), DictLT.RotY40)
# print 'T', T
newmatrix = np.dot(T, newmatrix)
if 0: # verbose:
print("initrot", initrot)
print("newmatrix", newmatrix)
print("Miller_indices", Miller_indices)
print("absolutespotsindices", absolutespotsindices)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=B0matrix,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights not in (None, False, "None", "False", 0, "0"):
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "newB0matrix",newB0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print("***********mean pixel deviation ",
# np.mean(alldistances_array), " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.newB0matrix matrices
return alldistances_array, Uxyz, newmatrix, Ts, T
else:
return alldistances_array
def fit_function_strain(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
UBmatrix_start=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
**kwd):
"""
fit strain components in sample frame
and orientation
q = refinedT refinedUzUyUz Ustart refinedB0 G*
with error function to return array of pair (exp. - model) distances
Sum_i [weights_i((Xmodel_i-Xexp_i)**2+(Ymodel_i-Yexp_i)**2) ]
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
where T comes from Ts
"""
if verbose:
# print("\n\n******************\nfirst error with initial values of:",
# varying_parameters_keys, " \n\n***************************\n")
error_function_strain(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
B0matrix=B0matrix,
pureRotation=pureRotation,
verbose=1,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction)
# print("\n\n********************\nFitting parameters: ",
# varying_parameters_keys, "\n\n***************************\n")
# print("With initial values", varying_parameters_values_array)
# print '*************** UBmatrix_start before fit************'
# print UBmatrix_start
# print '*******************************************'
# setting keywords of _error_function_on_demand_strain during the fitting because leastsq handle only *args but not **kwds
error_function_strain.__defaults__ = (UBmatrix_start,
B0matrix,
pureRotation,
0,
pixelsize,
dim,
weights,
kf_direction,
False)
# pixX = np.array(pixX, dtype=np.float64)
# pixY = np.array(pixY, dtype=np.float64)
# LEASTSQUARE
res = leastsq(error_function_strain,
varying_parameters_values_array,
args=(
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
), # args=(rre,ertetr,) last , is important!
maxfev=5000,
full_output=1,
xtol=1.0e-11,
epsfcn=0.0,
**kwd)
refined_values = res[0]
# print "res fit in fit function general", res
# print("code results", res[-1])
# print("mesg", res[-2])
# print("nb iterations", res[2]["nfev"])
# print("refined_values", refined_values)
if res[-1] not in (1, 2, 3, 4, 5):
return None
else:
if 1:
# print("\n\n ************** End of Fitting - Final errors (general fit function) ****************** \n\n")
alldata = error_function_strain(refined_values,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=UBmatrix_start,
B0matrix=B0matrix,
pureRotation=pureRotation,
verbose=0,
pixelsize=pixelsize,
dim=dim,
weights=weights,
kf_direction=kf_direction,
returnalldata=True)
# alldistances_array, Uxyz, newmatrix, Ts, T
alldistances_array, Uxyz, newmatrix, refinedTs, refinedT = alldata
# print("\n--------------------\nresults:\n------------------")
# for k, param_key in enumerate(varying_parameters_keys):
# print("%s : start %f ---> refined %f"
# % (param_key, varying_parameters_values_array[k], refined_values[k]))
# print("q= refinedT UBstart B0 G*\nq = refinedUB B0 G*")
# print("refined UBmatrix", newmatrix.tolist())
# print("Uxyz", Uxyz.tolist())
# print("refinedT", refinedT.tolist())
# print("refinedTs", refinedTs.tolist())
# print("refined_values", refined_values)
# print("final mean pixel residues : %f with %d spots"
# % (np.mean(alldistances_array), len(absolutespotsindices)))
return refined_values
def error_strain_from_elongation(varying_parameters_values_array,
varying_parameters_keys,
Miller_indices,
allparameters,
absolutespotsindices,
Xexp,
Yexp,
initrot=IDENTITYMATRIX,
B0matrix=IDENTITYMATRIX,
pureRotation=0,
verbose=0,
pixelsize=165.0 / 2048,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0",
returnalldata=False):
"""
calculate array of the sum of 3 distances from aligned points composing one single Laue spot
Each elongated spot is composed by 3 points: P1 Pc P2 (Pc at the center et P1, P2 at the ends)
error = sum (P1-P1exp)**2 + (P2-P2exp)**2 +(Pc-Pcexp)**2
But since P1exp end could be wrongly assign to simulated P2 end
error = sum (P1-P1exp)**2 + (P1-P2exp)**2 -P1P2exp**2 +
(P2-P2exp)**2 + (P2-P1exp)**2 -P1P2exp**2
+(Pc-Pcexp)**2
strain axis in sample frame:
axis_angle_1, axis_angle_2,minstrainamplitude,zerostrain,maxstrainamplitude
example: minstrainamplitude=0.98, maxstrainamplitude=1.05, zerostrain=1
u= (cos angle1, sin angle 1 cos angle 2, sin angle1 sin angle 2)
X1Model, Y1Model, XcModel,YcModel
tensile_along_u(v, tensile, u='zsample')
q = refinedStrain refinedUzUyUz Ustart B0 G*
Xmodel,Ymodel comes from G*=ha*+kb*+lc*
B0 reference structure reciprocal space frame (a*,b*,c*) a* // ki b* perp to a* and perp to z (z belongs to the plane of ki and detector normal vector n)
i.e. columns of B0 are components of a*,b* and c* expressed in x,y,z LT frame
Strain : 6 compenents of triangular up matrix ( T00 T01 T02)
( 0 T11 T12)
( 0 0 T22)
one must be set (usually T00 = 1)
Algebra:
X=PX' e'1 e'2 e'3
| | |
v v v
e1 ( . . . )
P= e2 ( . . . )
e3 ( . . . )
If A transform expressed in (e1,e2,e3) basis
and A' same transform but expressed in (e'1,e'2,e'3) basis
then A'=P-1 A P
X_LT=P X_sample
P=(cos40, 0 -sin40)
(0 1 0 )
(sin40 0 cos40)
Strain_sample=P-1 Strain_LT P
Strain_LT = P Strain_Sample P-1
"""
# reading default parameters
# CCD plane calibration parameters
if isinstance(allparameters, np.ndarray):
calibrationparameters = (allparameters.tolist())[:5]
else:
calibrationparameters = allparameters[:5]
# print 'calibrationparameters', calibrationparameters
# allparameters[5:8] = 0,0,0
Uy, Ux, Uz = IDENTITYMATRIX, IDENTITYMATRIX, IDENTITYMATRIX
straincomponents = np.array(allparameters[8:14])
Ts = np.array([straincomponents[:3],
[0.0, straincomponents[3], straincomponents[4]],
[0, 0, straincomponents[5]]])
# print 'Ts before', Ts
nb_varying_parameters = len(varying_parameters_keys)
for varying_parameter_index, parameter_name in enumerate(varying_parameters_keys):
# print "varying_parameter_index,parameter_name", varying_parameter_index, parameter_name
if parameter_name in ("anglex", "angley", "anglez"):
# print "got angles!"
if nb_varying_parameters > 1:
anglevalue = varying_parameters_values_array[varying_parameter_index] * DEG
else:
anglevalue = varying_parameters_values_array[0] * DEG
# print "anglevalue (rad)= ",anglevalue
ca = np.cos(anglevalue)
sa = np.sin(anglevalue)
if parameter_name is "angley":
Uy = np.array([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])
elif parameter_name is "anglex":
Ux = np.array([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])
elif parameter_name is "anglez":
Uz = np.array([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])
elif parameter_name in ("Ts00", "Ts01", "Ts02", "Ts11", "Ts12", "Ts22"):
# print 'got Ts elements: ', parameter_name
for i in list(range(3)):
for j in list(range(3)):
if parameter_name == "Ts%d%d" % (i, j):
# print "got parameter_name", parameter_name
if nb_varying_parameters > 1:
Ts[i, j] = varying_parameters_values_array[varying_parameter_index]
else:
Ts[i, j] = varying_parameters_values_array[0]
# print 'Ts after', Ts
Uxyz = np.dot(Uz, np.dot(Ux, Uy))
newmatrix = np.dot(Uxyz, initrot)
# print 'Uxyz', Uxyz
# print 'newmatrix', newmatrix
# DictLT.RotY40 such as X=DictLT.RotY40 Xsample (xs,ys,zs =columns expressed in x,y,z frame)
# transform in sample frame Ts
# same transform in x,y,z LT frame T
# Ts = DictLT.RotY40-1 T DictLT.RotY40
# T = DictLT.RotY40 Ts DictLT.RotY40-1
T = np.dot(np.dot(DictLT.RotY40, Ts), DictLT.RotYm40)
# print 'T', T
newmatrix = np.dot(T, newmatrix)
if 0: # verbose:
print("initrot", initrot)
print("newmatrix", newmatrix)
print("Miller_indices", Miller_indices)
print("absolutespotsindices", absolutespotsindices)
Xmodel, Ymodel, _, _ = calc_XY_pixelpositions(calibrationparameters,
Miller_indices,
absolutespotsindices,
UBmatrix=newmatrix,
B0matrix=B0matrix,
pureRotation=0,
labXMAS=0,
verbose=0,
pixelsize=pixelsize,
dim=dim,
kf_direction=kf_direction)
distanceterm = np.sqrt((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)
if weights is not None:
allweights = np.sum(weights)
distanceterm = distanceterm * weights / allweights
# if verbose:
# # print "** distance residues = " , distanceterm, " ********"
# print("** mean distance residue = ", np.mean(distanceterm), " ********")
# print "twthe, chi", twthe, chi
alldistances_array = distanceterm
if verbose:
# print "varying_parameters_values in error_function_on_demand_strain",varying_parameters_values
# print "arr_indexvaryingparameters",arr_indexvaryingparameters
# print "Xmodel",Xmodel
# print "pixX",pixX
# print "Ymodel",Ymodel
# print "pixY",pixY
# print "newmatrix",newmatrix
# print "newB0matrix",newB0matrix
# print "deltamat",deltamat
# print "initrot",initrot
# print "param_orient",param_calib
# print "distanceterm",distanceterm
pass
# if weights is not None:
# print("***********mean weighted pixel deviation ",
# np.mean(alldistances_array), " ********")
# else:
# print("***********mean pixel deviation ",
# np.mean(alldistances_array), " ********")
# print "newmatrix", newmatrix
if returnalldata:
# concatenated all pairs distances, all UB matrices, all UB.newB0matrix matrices
return alldistances_array, Uxyz, newmatrix, Ts, T
else:
return alldistances_array
# --- ----- TESTS & DEMOS ----------------------
def test_generalfitfunction():
# Ge example unstrained
pixX = np.array([1027.1099965580365, 1379.1700028337193, 1288.1100055910788, 926.219994375393, 595.4599989710869, 1183.2699986884652, 1672.670001029018, 1497.400007802548, 780.2700069727559, 819.9099991880139, 873.5600007021501, 1579.39000403102, 1216.4900044928474, 1481.199997684615, 399.87000836895436, 548.2499911593322, 1352.760007116035, 702.5200057620646, 383.7700117705855, 707.2000052800154, 1140.9300043834062, 1730.3299981313016, 289.68999155533413, 1274.8600008806216, 1063.2499947675371, 1660.8600022917144, 1426.670005812432])
pixY = np.array([1293.2799953573963, 1553.5800003037994, 1460.1599988550274, 872.0599978043742, 876.4400033114814, 598.9200007214372, 1258.6199918206175, 1224.7000037967478, 1242.530005349013, 552.8399954684833, 706.9700021553684, 754.63000554209, 1042.2800069222762, 364.8400055136739, 1297.1899933698528, 1260.320007366279, 568.0299942819768, 949.8800073732916, 754.580011319991, 261.1099917270594, 748.3999917806088, 1063.319998717625, 945.9700059216573, 306.9500110237749, 497.7900029269757, 706.310001700921, 858.780004244009])
miller_indices = np.array([[3.0, 3.0, 3.0], [2.0, 4.0, 2.0], [3.0, 5.0, 3.0], [5.0, 3.0, 3.0], [6.0, 2.0, 4.0], [6.0, 4.0, 2.0], [3.0, 5.0, 1.0], [4.0, 6.0, 2.0], [5.0, 3.0, 5.0], [7.0, 3.0, 3.0], [4.0, 2.0, 2.0], [5.0, 5.0, 1.0], [5.0, 5.0, 3.0], [7.0, 5.0, 1.0], [5.0, 1.0, 5.0], [3.0, 1.0, 3.0], [8.0, 6.0, 2.0], [7.0, 3.0, 5.0], [5.0, 1.0, 3.0], [9.0, 3.0, 3.0], [7.0, 5.0, 3.0], [5.0, 7.0, 1.0], [7.0, 1.0, 5.0], [5.0, 3.0, 1.0], [9.0, 5.0, 3.0], [7.0, 7.0, 1.0], [3.0, 3.0, 1.0]])
starting_orientmatrix = np.array([[-0.9727538909589738, -0.21247913537718385, 0.09274958034159074],
[0.22567394392094073, -0.7761682018781203, 0.5887564805829774],
[-0.053107604650232926, 0.593645098498364, 0.8029726516869564]])
# B0matrix = np.array([[0.17675651789659746, -2.8424615990749217e-17, -2.8424615990749217e-17],
# [0.0, 0.17675651789659746, -1.0823215193524997e-17],
# [0.0, 0.0, 0.17675651789659746]])
pixelsize = 0.08057
calibparameters = [69.196, 1050.78, 1116.22, 0.152, -0.251]
absolutespotsindices = np.arange(len(pixY))
#
varying_parameters_keys = ["anglex", "angley", "anglez", "a", "b", "alpha", "beta", "gamma", "depth"]
varying_parameters_values_array = [0.0, -0, 0.0, 5.678, 5.59, 89.999, 90, 90.0001, 0.02]
# varying_parameters_keys = ['distance','xcen','ycen','beta','gamma',
# 'anglex', 'angley', 'anglez',
# 'a', 'b', 'alpha', 'beta', 'gamma']
# varying_parameters_values_array = [68.5, 1049,1116,0,0,
# 0., -0, 0.,
# 5.678, 5.59, 89.999, 90, 90.0001]
# varying_parameters_keys = ['distance','xcen','ycen',
# 'anglex', 'angley', 'anglez',
# 'a', 'b', 'alpha', 'beta', 'gamma']
# varying_parameters_values_array = [68.9, 1050,1116,
# 0., -0, 0.,
# 5.678, 5.59, 89.999, 90, 90.0001]
# varying_parameters_keys = ['distance','ycen',
# 'anglex', 'angley', 'anglez',
# 'a', 'b', 'alpha', 'beta', 'gamma']
# varying_parameters_values_array = [68.9,1116,
# 0., -0, 0.,
# 5.675, 5.65, 89.999, 90, 90.0001]
latticeparameters = DictLT.dict_Materials["Ge"][1]
B0 = CP.calc_B_RR(latticeparameters)
transformparameters = [0, 0, 0, # 3 misorientation / initial UB matrix
1.0, 0, 0, 0, 1.0, 0, 0, -0.0, 1, # Tc
1, 0, 0, 0, 1, 0, 0, 0, 1, # T
1, 0, 0, 0, 1, 0, 0, 0, 1, ] # Ts
sourcedepth = [0]
allparameters = (calibparameters + transformparameters + latticeparameters + sourcedepth)
pureUmatrix, residualdistortion = GT.UBdecomposition_RRPP(starting_orientmatrix)
# print("len(allparameters)", len(allparameters))
# print("starting_orientmatrix", starting_orientmatrix)
# print("pureUmatrix", pureUmatrix)
refined_values = fit_function_general(varying_parameters_values_array,
varying_parameters_keys,
miller_indices,
allparameters,
absolutespotsindices,
pixX,
pixY,
UBmatrix_start=pureUmatrix,
B0matrix=B0,
nb_grains=1,
pureRotation=0,
verbose=0,
pixelsize=pixelsize,
dim=(2048, 2048),
weights=None,
kf_direction="Z>0")
dictRes = {}
# print("\n****** Refined Values *********\n")
for paramname, val in zip(varying_parameters_keys, refined_values):
dictRes[paramname] = val
# print("%s => %.6f" % (paramname, val))
# print("\n*******************************\n")
return dictRes
| [
"numpy.sqrt",
"numpy.hstack",
"lauetoolsnn.lauetools.LaueGeometry.from_qunit_to_twchi",
"numpy.array",
"numpy.sin",
"lauetoolsnn.lauetools.CrystalParameters.calc_B_RR",
"numpy.arange",
"scipy.linalg.qr",
"numpy.mean",
"scipy.optimize.least_squares",
"numpy.where",
"numpy.take",
"scipy.optimi... | [((273, 306), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(15)'}), '(precision=15)\n', (292, 306), True, 'import numpy as np\n'), ((903, 912), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (909, 912), True, 'import numpy as np\n'), ((1054, 1080), 'numpy.zeros', 'np.zeros', (['nn'], {'dtype': 'np.int'}), '(nn, dtype=np.int)\n', (1062, 1080), True, 'import numpy as np\n'), ((1543, 1567), 'numpy.where', 'np.where', (['(isbadpeak == 0)'], {}), '(isbadpeak == 0)\n', (1551, 1567), True, 'import numpy as np\n'), ((3699, 3730), 'numpy.take', 'np.take', (['DATA_Q', 'nspots'], {'axis': '(0)'}), '(DATA_Q, nspots, axis=0)\n', (3706, 3730), True, 'import numpy as np\n'), ((3741, 3760), 'numpy.transpose', 'np.transpose', (['DATAQ'], {}), '(DATAQ)\n', (3753, 3760), True, 'import numpy as np\n'), ((4871, 4895), 'numpy.dot', 'np.dot', (['matfromQuat', 'trQ'], {}), '(matfromQuat, trQ)\n', (4877, 4895), True, 'import numpy as np\n'), ((5021, 5065), 'lauetoolsnn.lauetools.LaueGeometry.from_qunit_to_twchi', 'F2TC.from_qunit_to_twchi', (['(1.0 * Qrot / Qrotn)'], {}), '(1.0 * Qrot / Qrotn)\n', (5045, 5065), True, 'import lauetoolsnn.lauetools.LaueGeometry as F2TC\n'), ((5340, 5468), 'lauetoolsnn.lauetools.LaueGeometry.calc_xycam_from2thetachi', 'F2TC.calc_xycam_from2thetachi', (['twthe', 'chi', 'calibration_parameters'], {'verbose': '(0)', 'pixelsize': 'pixelsize', 'kf_direction': 'kf_direction'}), '(twthe, chi, calibration_parameters, verbose=0,\n pixelsize=pixelsize, kf_direction=kf_direction)\n', (5369, 5468), True, 'import lauetoolsnn.lauetools.LaueGeometry as F2TC\n'), ((6915, 6946), 'numpy.take', 'np.take', (['DATA_Q', 'nspots'], {'axis': '(0)'}), '(DATA_Q, nspots, axis=0)\n', (6922, 6946), True, 'import numpy as np\n'), ((6957, 6976), 'numpy.transpose', 'np.transpose', (['DATAQ'], {}), '(DATAQ)\n', (6969, 6976), True, 'import numpy as np\n'), ((7488, 7543), 'lauetoolsnn.lauetools.LaueGeometry.from_qunit_to_twchi', 'F2TC.from_qunit_to_twchi', (['(Qrot / Qrotn)'], {'labXMAS': 'labXMAS'}), '(Qrot / Qrotn, labXMAS=labXMAS)\n', (7512, 7543), True, 'import lauetoolsnn.lauetools.LaueGeometry as F2TC\n'), ((7808, 7952), 'lauetoolsnn.lauetools.LaueGeometry.calc_xycam_from2thetachi', 'F2TC.calc_xycam_from2thetachi', (['twthe', 'chi', 'calibration_parameters'], {'offset': 'offset', 'verbose': '(0)', 'pixelsize': 'pixelsize', 'kf_direction': 'kf_direction'}), '(twthe, chi, calibration_parameters, offset=\n offset, verbose=0, pixelsize=pixelsize, kf_direction=kf_direction)\n', (7837, 7952), True, 'import lauetoolsnn.lauetools.LaueGeometry as F2TC\n'), ((9515, 9588), 'numpy.array', 'np.array', (['[[invsq2, -0.5, 0.5], [invsq2, 0.5, -0.5], [0, invsq2, invsq2]]'], {}), '([[invsq2, -0.5, 0.5], [invsq2, 0.5, -0.5], [0, invsq2, invsq2]])\n', (9523, 9588), True, 'import numpy as np\n'), ((11019, 11044), 'numpy.dot', 'np.dot', (['deltamat', 'initrot'], {}), '(deltamat, initrot)\n', (11025, 11044), True, 'import numpy as np\n'), ((11913, 11955), 'numpy.sqrt', 'np.sqrt', (['((X - pixX) ** 2 + (Y - pixY) ** 2)'], {}), '((X - pixX) ** 2 + (Y - pixY) ** 2)\n', (11920, 11955), True, 'import numpy as np\n'), ((16892, 17096), 'scipy.optimize.least_squares', 'least_squares', (['_error_function_on_demand_calibration', 'param_calib_0'], {'args': '(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY)', 'tr_solver': '"""exact"""', 'x_scale': 'xscale', 'max_nfev': 'None'}), "(_error_function_on_demand_calibration, param_calib_0, args=(\n miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY),\n tr_solver='exact', x_scale=xscale, max_nfev=None)\n", (16905, 17096), False, 'from scipy.optimize import leastsq, least_squares\n'), ((17443, 17611), 'scipy.optimize.leastsq', 'leastsq', (['_error_function_on_demand_calibration', 'param_calib_0'], {'args': '(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY)', 'maxfev': '(5000)'}), '(_error_function_on_demand_calibration, param_calib_0, args=(miller,\n allparameters, arr_indexvaryingparameters, nspots, pixX, pixY), maxfev=\n 5000, **kwd)\n', (17450, 17611), False, 'from scipy.optimize import leastsq, least_squares\n'), ((22558, 22677), 'numpy.array', 'np.array', (['[[1.0, param_strain[2], param_strain[3]], [0, param_strain[0], param_strain\n [4]], [0, 0, param_strain[1]]]'], {}), '([[1.0, param_strain[2], param_strain[3]], [0, param_strain[0],\n param_strain[4]], [0, 0, param_strain[1]]])\n', (22566, 22677), True, 'import numpy as np\n'), ((23524, 23583), 'numpy.array', 'np.array', (['(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])'], {}), '(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])\n', (23532, 23583), True, 'import numpy as np\n'), ((24398, 24440), 'numpy.sqrt', 'np.sqrt', (['((X - pixX) ** 2 + (Y - pixY) ** 2)'], {}), '((X - pixX) ** 2 + (Y - pixY) ** 2)\n', (24405, 24440), True, 'import numpy as np\n'), ((30246, 30365), 'numpy.array', 'np.array', (['[[1.0, param_strain[2], param_strain[3]], [0, param_strain[0], param_strain\n [4]], [0, 0, param_strain[1]]]'], {}), '([[1.0, param_strain[2], param_strain[3]], [0, param_strain[0],\n param_strain[4]], [0, 0, param_strain[1]]])\n', (30254, 30365), True, 'import numpy as np\n'), ((31275, 31334), 'numpy.array', 'np.array', (['(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])'], {}), '(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])\n', (31283, 31334), True, 'import numpy as np\n'), ((32110, 32154), 'numpy.sqrt', 'np.sqrt', (['((X1 - pixX) ** 2 + (Y1 - pixY) ** 2)'], {}), '((X1 - pixX) ** 2 + (Y1 - pixY) ** 2)\n', (32117, 32154), True, 'import numpy as np\n'), ((32259, 32318), 'numpy.array', 'np.array', (['(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])'], {}), '(patchallparam[:5] + [0, 0, 0] + patchallparam[5:])\n', (32267, 32318), True, 'import numpy as np\n'), ((33050, 33094), 'numpy.sqrt', 'np.sqrt', (['((X2 - pixX) ** 2 + (Y2 - pixY) ** 2)'], {}), '((X2 - pixX) ** 2 + (Y2 - pixY) ** 2)\n', (33057, 33094), True, 'import numpy as np\n'), ((37272, 37475), 'scipy.optimize.leastsq', 'leastsq', (['_error_function_on_demand_strain', 'param_strain_0'], {'args': '(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY)', 'maxfev': '(5000)', 'full_output': '(1)', 'xtol': '(1e-11)', 'epsfcn': '(0.0)'}), '(_error_function_on_demand_strain, param_strain_0, args=(miller,\n allparameters, arr_indexvaryingparameters, nspots, pixX, pixY), maxfev=\n 5000, full_output=1, xtol=1e-11, epsfcn=0.0, **kwd)\n', (37279, 37475), False, 'from scipy.optimize import leastsq, least_squares\n'), ((44355, 44397), 'numpy.sqrt', 'np.sqrt', (['((X - pixX) ** 2 + (Y - pixY) ** 2)'], {}), '((X - pixX) ** 2 + (Y - pixY) ** 2)\n', (44362, 44397), True, 'import numpy as np\n'), ((45102, 45147), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1.0]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1.0]])\n', (45110, 45147), True, 'import numpy as np\n'), ((46356, 46461), 'scipy.optimize.leastsq', 'leastsq', (['_error_function_XCEN', 'param_calib_0'], {'args': '(miller, allparameters, nspots, pixX, pixY)'}), '(_error_function_XCEN, param_calib_0, args=(miller, allparameters,\n nspots, pixX, pixY), **kwd)\n', (46363, 46461), False, 'from scipy.optimize import leastsq, least_squares\n'), ((51837, 52065), 'scipy.optimize.leastsq', 'leastsq', (['error_function_on_demand_strain_2grains', 'init_strain_values'], {'args': '(miller, allparameters, arr_indexvaryingparameters, absolutespotsindices,\n pixX, pixY)', 'maxfev': '(5000)', 'full_output': '(1)', 'xtol': '(1e-11)', 'epsfcn': '(0.0)'}), '(error_function_on_demand_strain_2grains, init_strain_values, args=(\n miller, allparameters, arr_indexvaryingparameters, absolutespotsindices,\n pixX, pixY), maxfev=5000, full_output=1, xtol=1e-11, epsfcn=0.0, **kwd)\n', (51844, 52065), False, 'from scipy.optimize import leastsq, least_squares\n'), ((65920, 65950), 'numpy.array', 'np.array', (['allparameters[35:41]'], {}), '(allparameters[35:41])\n', (65928, 65950), True, 'import numpy as np\n'), ((71660, 71681), 'numpy.dot', 'np.dot', (['Uxyz', 'initrot'], {}), '(Uxyz, initrot)\n', (71666, 71681), True, 'import numpy as np\n'), ((73067, 73119), 'numpy.sqrt', 'np.sqrt', (['((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)'], {}), '((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)\n', (73074, 73119), True, 'import numpy as np\n'), ((77377, 77611), 'scipy.optimize.leastsq', 'leastsq', (['error_function_general', 'varying_parameters_values_array'], {'args': '(varying_parameters_keys, Miller_indices, allparameters,\n absolutespotsindices, Xexp, Yexp)', 'maxfev': '(5000)', 'full_output': '(1)', 'xtol': '(1e-11)', 'epsfcn': '(0.0)'}), '(error_function_general, varying_parameters_values_array, args=(\n varying_parameters_keys, Miller_indices, allparameters,\n absolutespotsindices, Xexp, Yexp), maxfev=5000, full_output=1, xtol=\n 1e-11, epsfcn=0.0, **kwd)\n', (77384, 77611), False, 'from scipy.optimize import leastsq, least_squares\n'), ((83891, 84134), 'scipy.optimize.leastsq', 'leastsq', (['error_function_latticeparameters', 'varying_parameters_values_array'], {'args': '(varying_parameters_keys, Miller_indices, allparameters,\n absolutespotsindices, Xexp, Yexp)', 'maxfev': '(5000)', 'full_output': '(1)', 'xtol': '(1e-11)', 'epsfcn': '(0.0)'}), '(error_function_latticeparameters, varying_parameters_values_array,\n args=(varying_parameters_keys, Miller_indices, allparameters,\n absolutespotsindices, Xexp, Yexp), maxfev=5000, full_output=1, xtol=\n 1e-11, epsfcn=0.0, **kwd)\n', (83898, 84134), False, 'from scipy.optimize import leastsq, least_squares\n'), ((89363, 89392), 'numpy.array', 'np.array', (['allparameters[8:14]'], {}), '(allparameters[8:14])\n', (89371, 89392), True, 'import numpy as np\n'), ((92392, 92455), 'lauetoolsnn.lauetools.CrystalParameters.calc_B_RR', 'CP.calc_B_RR', (['latticeparameters'], {'directspace': '(1)', 'setvolume': '(False)'}), '(latticeparameters, directspace=1, setvolume=False)\n', (92404, 92455), True, 'import lauetoolsnn.lauetools.CrystalParameters as CP\n'), ((93050, 93071), 'numpy.dot', 'np.dot', (['Uxyz', 'initrot'], {}), '(Uxyz, initrot)\n', (93056, 93071), True, 'import numpy as np\n'), ((94097, 94149), 'numpy.sqrt', 'np.sqrt', (['((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)'], {}), '((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)\n', (94104, 94149), True, 'import numpy as np\n'), ((98202, 98231), 'numpy.array', 'np.array', (['allparameters[8:14]'], {}), '(allparameters[8:14])\n', (98210, 98231), True, 'import numpy as np\n'), ((98242, 98357), 'numpy.array', 'np.array', (['[straincomponents[:3], [0.0, straincomponents[3], straincomponents[4]], [0,\n 0, straincomponents[5]]]'], {}), '([straincomponents[:3], [0.0, straincomponents[3], straincomponents\n [4]], [0, 0, straincomponents[5]]])\n', (98250, 98357), True, 'import numpy as np\n'), ((100186, 100207), 'numpy.dot', 'np.dot', (['Uxyz', 'initrot'], {}), '(Uxyz, initrot)\n', (100192, 100207), True, 'import numpy as np\n'), ((100706, 100726), 'numpy.dot', 'np.dot', (['T', 'newmatrix'], {}), '(T, newmatrix)\n', (100712, 100726), True, 'import numpy as np\n'), ((101757, 101809), 'numpy.sqrt', 'np.sqrt', (['((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)'], {}), '((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)\n', (101764, 101809), True, 'import numpy as np\n'), ((106332, 106565), 'scipy.optimize.leastsq', 'leastsq', (['error_function_strain', 'varying_parameters_values_array'], {'args': '(varying_parameters_keys, Miller_indices, allparameters,\n absolutespotsindices, Xexp, Yexp)', 'maxfev': '(5000)', 'full_output': '(1)', 'xtol': '(1e-11)', 'epsfcn': '(0.0)'}), '(error_function_strain, varying_parameters_values_array, args=(\n varying_parameters_keys, Miller_indices, allparameters,\n absolutespotsindices, Xexp, Yexp), maxfev=5000, full_output=1, xtol=\n 1e-11, epsfcn=0.0, **kwd)\n', (106339, 106565), False, 'from scipy.optimize import leastsq, least_squares\n'), ((112450, 112479), 'numpy.array', 'np.array', (['allparameters[8:14]'], {}), '(allparameters[8:14])\n', (112458, 112479), True, 'import numpy as np\n'), ((112490, 112605), 'numpy.array', 'np.array', (['[straincomponents[:3], [0.0, straincomponents[3], straincomponents[4]], [0,\n 0, straincomponents[5]]]'], {}), '([straincomponents[:3], [0.0, straincomponents[3], straincomponents\n [4]], [0, 0, straincomponents[5]]])\n', (112498, 112605), True, 'import numpy as np\n'), ((114434, 114455), 'numpy.dot', 'np.dot', (['Uxyz', 'initrot'], {}), '(Uxyz, initrot)\n', (114440, 114455), True, 'import numpy as np\n'), ((114890, 114910), 'numpy.dot', 'np.dot', (['T', 'newmatrix'], {}), '(T, newmatrix)\n', (114896, 114910), True, 'import numpy as np\n'), ((115861, 115913), 'numpy.sqrt', 'np.sqrt', (['((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)'], {}), '((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)\n', (115868, 115913), True, 'import numpy as np\n'), ((117569, 118142), 'numpy.array', 'np.array', (['[1027.1099965580365, 1379.1700028337193, 1288.1100055910788, \n 926.219994375393, 595.4599989710869, 1183.2699986884652, \n 1672.670001029018, 1497.400007802548, 780.2700069727559, \n 819.9099991880139, 873.5600007021501, 1579.39000403102, \n 1216.4900044928474, 1481.199997684615, 399.87000836895436, \n 548.2499911593322, 1352.760007116035, 702.5200057620646, \n 383.7700117705855, 707.2000052800154, 1140.9300043834062, \n 1730.3299981313016, 289.68999155533413, 1274.8600008806216, \n 1063.2499947675371, 1660.8600022917144, 1426.670005812432]'], {}), '([1027.1099965580365, 1379.1700028337193, 1288.1100055910788, \n 926.219994375393, 595.4599989710869, 1183.2699986884652, \n 1672.670001029018, 1497.400007802548, 780.2700069727559, \n 819.9099991880139, 873.5600007021501, 1579.39000403102, \n 1216.4900044928474, 1481.199997684615, 399.87000836895436, \n 548.2499911593322, 1352.760007116035, 702.5200057620646, \n 383.7700117705855, 707.2000052800154, 1140.9300043834062, \n 1730.3299981313016, 289.68999155533413, 1274.8600008806216, \n 1063.2499947675371, 1660.8600022917144, 1426.670005812432])\n', (117577, 118142), True, 'import numpy as np\n'), ((118114, 118679), 'numpy.array', 'np.array', (['[1293.2799953573963, 1553.5800003037994, 1460.1599988550274, \n 872.0599978043742, 876.4400033114814, 598.9200007214372, \n 1258.6199918206175, 1224.7000037967478, 1242.530005349013, \n 552.8399954684833, 706.9700021553684, 754.63000554209, \n 1042.2800069222762, 364.8400055136739, 1297.1899933698528, \n 1260.320007366279, 568.0299942819768, 949.8800073732916, \n 754.580011319991, 261.1099917270594, 748.3999917806088, \n 1063.319998717625, 945.9700059216573, 306.9500110237749, \n 497.7900029269757, 706.310001700921, 858.780004244009]'], {}), '([1293.2799953573963, 1553.5800003037994, 1460.1599988550274, \n 872.0599978043742, 876.4400033114814, 598.9200007214372, \n 1258.6199918206175, 1224.7000037967478, 1242.530005349013, \n 552.8399954684833, 706.9700021553684, 754.63000554209, \n 1042.2800069222762, 364.8400055136739, 1297.1899933698528, \n 1260.320007366279, 568.0299942819768, 949.8800073732916, \n 754.580011319991, 261.1099917270594, 748.3999917806088, \n 1063.319998717625, 945.9700059216573, 306.9500110237749, \n 497.7900029269757, 706.310001700921, 858.780004244009])\n', (118122, 118679), True, 'import numpy as np\n'), ((118661, 119157), 'numpy.array', 'np.array', (['[[3.0, 3.0, 3.0], [2.0, 4.0, 2.0], [3.0, 5.0, 3.0], [5.0, 3.0, 3.0], [6.0, \n 2.0, 4.0], [6.0, 4.0, 2.0], [3.0, 5.0, 1.0], [4.0, 6.0, 2.0], [5.0, 3.0,\n 5.0], [7.0, 3.0, 3.0], [4.0, 2.0, 2.0], [5.0, 5.0, 1.0], [5.0, 5.0, 3.0\n ], [7.0, 5.0, 1.0], [5.0, 1.0, 5.0], [3.0, 1.0, 3.0], [8.0, 6.0, 2.0],\n [7.0, 3.0, 5.0], [5.0, 1.0, 3.0], [9.0, 3.0, 3.0], [7.0, 5.0, 3.0], [\n 5.0, 7.0, 1.0], [7.0, 1.0, 5.0], [5.0, 3.0, 1.0], [9.0, 5.0, 3.0], [7.0,\n 7.0, 1.0], [3.0, 3.0, 1.0]]'], {}), '([[3.0, 3.0, 3.0], [2.0, 4.0, 2.0], [3.0, 5.0, 3.0], [5.0, 3.0, 3.0\n ], [6.0, 2.0, 4.0], [6.0, 4.0, 2.0], [3.0, 5.0, 1.0], [4.0, 6.0, 2.0],\n [5.0, 3.0, 5.0], [7.0, 3.0, 3.0], [4.0, 2.0, 2.0], [5.0, 5.0, 1.0], [\n 5.0, 5.0, 3.0], [7.0, 5.0, 1.0], [5.0, 1.0, 5.0], [3.0, 1.0, 3.0], [8.0,\n 6.0, 2.0], [7.0, 3.0, 5.0], [5.0, 1.0, 3.0], [9.0, 3.0, 3.0], [7.0, 5.0,\n 3.0], [5.0, 7.0, 1.0], [7.0, 1.0, 5.0], [5.0, 3.0, 1.0], [9.0, 5.0, 3.0\n ], [7.0, 7.0, 1.0], [3.0, 3.0, 1.0]])\n', (118669, 119157), True, 'import numpy as np\n'), ((119159, 119372), 'numpy.array', 'np.array', (['[[-0.9727538909589738, -0.21247913537718385, 0.09274958034159074], [\n 0.22567394392094073, -0.7761682018781203, 0.5887564805829774], [-\n 0.053107604650232926, 0.593645098498364, 0.8029726516869564]]'], {}), '([[-0.9727538909589738, -0.21247913537718385, 0.09274958034159074],\n [0.22567394392094073, -0.7761682018781203, 0.5887564805829774], [-\n 0.053107604650232926, 0.593645098498364, 0.8029726516869564]])\n', (119167, 119372), True, 'import numpy as np\n'), ((121283, 121314), 'lauetoolsnn.lauetools.CrystalParameters.calc_B_RR', 'CP.calc_B_RR', (['latticeparameters'], {}), '(latticeparameters)\n', (121295, 121314), True, 'import lauetoolsnn.lauetools.CrystalParameters as CP\n'), ((121738, 121784), 'lauetoolsnn.lauetools.generaltools.UBdecomposition_RRPP', 'GT.UBdecomposition_RRPP', (['starting_orientmatrix'], {}), '(starting_orientmatrix)\n', (121761, 121784), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((4642, 4678), 'lauetoolsnn.lauetools.generaltools.from3rotangles_toQuat', 'GT.from3rotangles_toQuat', (['angle_Quat'], {}), '(angle_Quat)\n', (4666, 4678), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((4849, 4858), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4855, 4858), True, 'import numpy as np\n'), ((4954, 4979), 'numpy.sum', 'np.sum', (['(Qrot ** 2)'], {'axis': '(0)'}), '(Qrot ** 2, axis=0)\n', (4960, 4979), True, 'import numpy as np\n'), ((7421, 7446), 'numpy.sum', 'np.sum', (['(Qrot ** 2)'], {'axis': '(0)'}), '(Qrot ** 2, axis=0)\n', (7427, 7446), True, 'import numpy as np\n'), ((9479, 9489), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9486, 9489), True, 'import numpy as np\n'), ((10037, 10063), 'lauetoolsnn.lauetools.generaltools.matRot', 'GT.matRot', (['AXIS1', '(a1 / DEG)'], {}), '(AXIS1, a1 / DEG)\n', (10046, 10063), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((10499, 10525), 'lauetoolsnn.lauetools.generaltools.matRot', 'GT.matRot', (['AXIS2', '(a2 / DEG)'], {}), '(AXIS2, a2 / DEG)\n', (10508, 10525), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((10929, 10955), 'lauetoolsnn.lauetools.generaltools.matRot', 'GT.matRot', (['AXIS3', '(a3 / DEG)'], {}), '(AXIS3, a3 / DEG)\n', (10938, 10955), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((10983, 11001), 'numpy.dot', 'np.dot', (['mat2', 'mat1'], {}), '(mat2, mat1)\n', (10989, 11001), True, 'import numpy as np\n'), ((12090, 12105), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (12096, 12105), True, 'import numpy as np\n'), ((22496, 22514), 'numpy.dot', 'np.dot', (['mat2', 'mat1'], {}), '(mat2, mat1)\n', (22502, 22514), True, 'import numpy as np\n'), ((22762, 22787), 'numpy.dot', 'np.dot', (['deltamat', 'initrot'], {}), '(deltamat, initrot)\n', (22768, 22787), True, 'import numpy as np\n'), ((23939, 23951), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (23948, 23951), True, 'import numpy as np\n'), ((24491, 24506), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (24497, 24506), True, 'import numpy as np\n'), ((28576, 28594), 'numpy.dot', 'np.dot', (['mat2', 'mat1'], {}), '(mat2, mat1)\n', (28582, 28594), True, 'import numpy as np\n'), ((30184, 30202), 'numpy.dot', 'np.dot', (['mat2', 'mat1'], {}), '(mat2, mat1)\n', (30190, 30202), True, 'import numpy as np\n'), ((30421, 30448), 'numpy.dot', 'np.dot', (['deltamat_1', 'initrot'], {}), '(deltamat_1, initrot)\n', (30427, 30448), True, 'import numpy as np\n'), ((30491, 30518), 'numpy.dot', 'np.dot', (['deltamat_2', 'initrot'], {}), '(deltamat_2, initrot)\n', (30497, 30518), True, 'import numpy as np\n'), ((31641, 31653), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (31650, 31653), True, 'import numpy as np\n'), ((32613, 32625), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (32622, 32625), True, 'import numpy as np\n'), ((33145, 33160), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (33151, 33160), True, 'import numpy as np\n'), ((38086, 38286), 'scipy.optimize.least_squares', 'least_squares', (['_error_function_on_demand_strain', 'param_strain_0'], {'args': '(miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY)', 'tr_solver': '"""exact"""', 'x_scale': 'xscale', 'max_nfev': 'None'}), "(_error_function_on_demand_strain, param_strain_0, args=(\n miller, allparameters, arr_indexvaryingparameters, nspots, pixX, pixY),\n tr_solver='exact', x_scale=xscale, max_nfev=None)\n", (38099, 38286), False, 'from scipy.optimize import leastsq, least_squares\n'), ((42078, 42110), 'numpy.linspace', 'np.linspace', (['mini', 'maxi', 'nbsteps'], {}), '(mini, maxi, nbsteps)\n', (42089, 42110), True, 'import numpy as np\n'), ((60357, 60407), 'numpy.array', 'np.array', (['[[1.0, s2, s3], [0, s0, s4], [0, 0, s1]]'], {}), '([[1.0, s2, s3], [0, s0, s4], [0, 0, s1]])\n', (60365, 60407), True, 'import numpy as np\n'), ((61582, 61634), 'numpy.sqrt', 'np.sqrt', (['((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)'], {}), '((Xmodel - Xexp) ** 2 + (Ymodel - Yexp) ** 2)\n', (61589, 61634), True, 'import numpy as np\n'), ((62309, 62372), 'numpy.hstack', 'np.hstack', (['(distances_vector_list[0], distances_vector_list[1])'], {}), '((distances_vector_list[0], distances_vector_list[1]))\n', (62318, 62372), True, 'import numpy as np\n'), ((70844, 70858), 'numpy.dot', 'np.dot', (['Ux', 'Uy'], {}), '(Ux, Uy)\n', (70850, 70858), True, 'import numpy as np\n'), ((71727, 71748), 'numpy.dot', 'np.dot', (['newmatrix', 'Tc'], {}), '(newmatrix, Tc)\n', (71733, 71748), True, 'import numpy as np\n'), ((73170, 73185), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (73176, 73185), True, 'import numpy as np\n'), ((92140, 92154), 'numpy.dot', 'np.dot', (['Ux', 'Uy'], {}), '(Ux, Uy)\n', (92146, 92154), True, 'import numpy as np\n'), ((94200, 94215), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (94206, 94215), True, 'import numpy as np\n'), ((100153, 100167), 'numpy.dot', 'np.dot', (['Ux', 'Uy'], {}), '(Ux, Uy)\n', (100159, 100167), True, 'import numpy as np\n'), ((100558, 100583), 'numpy.dot', 'np.dot', (['DictLT.RotY40', 'Ts'], {}), '(DictLT.RotY40, Ts)\n', (100564, 100583), True, 'import numpy as np\n'), ((101894, 101909), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (101900, 101909), True, 'import numpy as np\n'), ((114401, 114415), 'numpy.dot', 'np.dot', (['Ux', 'Uy'], {}), '(Ux, Uy)\n', (114407, 114415), True, 'import numpy as np\n'), ((114806, 114831), 'numpy.dot', 'np.dot', (['DictLT.RotY40', 'Ts'], {}), '(DictLT.RotY40, Ts)\n', (114812, 114831), True, 'import numpy as np\n'), ((115964, 115979), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (115970, 115979), True, 'import numpy as np\n'), ((4037, 4048), 'scipy.linalg.qr', 'qr', (['initrot'], {}), '(initrot)\n', (4039, 4048), False, 'from scipy.linalg import qr\n'), ((4272, 4293), 'numpy.dot', 'np.dot', (['R', 'vecteurref'], {}), '(R, vecteurref)\n', (4278, 4293), True, 'import numpy as np\n'), ((4738, 4768), 'lauetoolsnn.lauetools.generaltools.fromQuat_to_MatrixRot', 'GT.fromQuat_to_MatrixRot', (['Quat'], {}), '(Quat)\n', (4762, 4768), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((7173, 7192), 'numpy.dot', 'np.dot', (['R', 'B0matrix'], {}), '(R, B0matrix)\n', (7179, 7192), True, 'import numpy as np\n'), ((7628, 7648), 'numpy.transpose', 'np.transpose', (['DATA_Q'], {}), '(DATA_Q)\n', (7640, 7648), True, 'import numpy as np\n'), ((42185, 42202), 'numpy.array', 'np.array', (['[angle]'], {}), '([angle])\n', (42193, 42202), True, 'import numpy as np\n'), ((44018, 44030), 'numpy.arange', 'np.arange', (['(8)'], {}), '(8)\n', (44027, 44030), True, 'import numpy as np\n'), ((59195, 59213), 'numpy.dot', 'np.dot', (['mat2', 'mat1'], {}), '(mat2, mat1)\n', (59201, 59213), True, 'import numpy as np\n'), ((60436, 60461), 'numpy.dot', 'np.dot', (['deltamat', 'initrot'], {}), '(deltamat, initrot)\n', (60442, 60461), True, 'import numpy as np\n'), ((61693, 61721), 'numpy.sum', 'np.sum', (['weights[grain_index]'], {}), '(weights[grain_index])\n', (61699, 61721), True, 'import numpy as np\n'), ((65739, 65768), 'numpy.array', 'np.array', (['allparameters[8:17]'], {}), '(allparameters[8:17])\n', (65747, 65768), True, 'import numpy as np\n'), ((65793, 65823), 'numpy.array', 'np.array', (['allparameters[17:26]'], {}), '(allparameters[17:26])\n', (65801, 65823), True, 'import numpy as np\n'), ((65849, 65879), 'numpy.array', 'np.array', (['allparameters[26:35]'], {}), '(allparameters[26:35])\n', (65857, 65879), True, 'import numpy as np\n'), ((66800, 66818), 'numpy.cos', 'np.cos', (['anglevalue'], {}), '(anglevalue)\n', (66806, 66818), True, 'import numpy as np\n'), ((66836, 66854), 'numpy.sin', 'np.sin', (['anglevalue'], {}), '(anglevalue)\n', (66842, 66854), True, 'import numpy as np\n'), ((71794, 71814), 'numpy.dot', 'np.dot', (['T', 'newmatrix'], {}), '(T, newmatrix)\n', (71800, 71814), True, 'import numpy as np\n'), ((90072, 90090), 'numpy.cos', 'np.cos', (['anglevalue'], {}), '(anglevalue)\n', (90078, 90090), True, 'import numpy as np\n'), ((90108, 90126), 'numpy.sin', 'np.sin', (['anglevalue'], {}), '(anglevalue)\n', (90114, 90126), True, 'import numpy as np\n'), ((99061, 99079), 'numpy.cos', 'np.cos', (['anglevalue'], {}), '(anglevalue)\n', (99067, 99079), True, 'import numpy as np\n'), ((99097, 99115), 'numpy.sin', 'np.sin', (['anglevalue'], {}), '(anglevalue)\n', (99103, 99115), True, 'import numpy as np\n'), ((113309, 113327), 'numpy.cos', 'np.cos', (['anglevalue'], {}), '(anglevalue)\n', (113315, 113327), True, 'import numpy as np\n'), ((113345, 113363), 'numpy.sin', 'np.sin', (['anglevalue'], {}), '(anglevalue)\n', (113351, 113363), True, 'import numpy as np\n'), ((9633, 9674), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == 5)'], {}), '(arr_indexvaryingparameters == 5)\n', (9641, 9674), True, 'import numpy as np\n'), ((10118, 10159), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == 6)'], {}), '(arr_indexvaryingparameters == 6)\n', (10126, 10159), True, 'import numpy as np\n'), ((10580, 10621), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == 7)'], {}), '(arr_indexvaryingparameters == 7)\n', (10588, 10621), True, 'import numpy as np\n'), ((21119, 21208), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[0])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters[0])\n', (21127, 21208), True, 'import numpy as np\n'), ((21604, 21693), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[1])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters[1])\n', (21612, 21693), True, 'import numpy as np\n'), ((22067, 22156), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[2])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters[2])\n', (22075, 22156), True, 'import numpy as np\n'), ((27087, 27178), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_1[0])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters_1[0])\n', (27095, 27178), True, 'import numpy as np\n'), ((27615, 27706), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_1[1])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters_1[1])\n', (27623, 27706), True, 'import numpy as np\n'), ((28143, 28234), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_1[2])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters_1[2])\n', (28151, 28234), True, 'import numpy as np\n'), ((28698, 28789), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_2[0])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters_2[0])\n', (28706, 28789), True, 'import numpy as np\n'), ((29247, 29338), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_2[1])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters_2[1])\n', (29255, 29338), True, 'import numpy as np\n'), ((29795, 29886), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters_2[2])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters_2[2])\n', (29803, 29886), True, 'import numpy as np\n'), ((43221, 43238), 'numpy.mean', 'np.mean', (['residues'], {}), '(residues)\n', (43228, 43238), True, 'import numpy as np\n'), ((66919, 66967), 'numpy.array', 'np.array', (['[[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]]'], {}), '([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])\n', (66927, 66967), True, 'import numpy as np\n'), ((71923, 71943), 'numpy.dot', 'np.dot', (['T', 'newmatrix'], {}), '(T, newmatrix)\n', (71929, 71943), True, 'import numpy as np\n'), ((90191, 90239), 'numpy.array', 'np.array', (['[[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]]'], {}), '([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])\n', (90199, 90239), True, 'import numpy as np\n'), ((99180, 99228), 'numpy.array', 'np.array', (['[[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]]'], {}), '([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])\n', (99188, 99228), True, 'import numpy as np\n'), ((113428, 113476), 'numpy.array', 'np.array', (['[[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]]'], {}), '([[ca, 0, sa], [0, 1, 0], [-sa, 0, ca]])\n', (113436, 113476), True, 'import numpy as np\n'), ((4077, 4087), 'numpy.diag', 'np.diag', (['Q'], {}), '(Q)\n', (4084, 4087), True, 'import numpy as np\n'), ((9880, 9890), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (9886, 9890), True, 'import numpy as np\n'), ((9895, 9905), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (9901, 9905), True, 'import numpy as np\n'), ((10007, 10017), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (10013, 10017), True, 'import numpy as np\n'), ((10403, 10413), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (10409, 10413), True, 'import numpy as np\n'), ((10415, 10425), 'numpy.sin', 'np.sin', (['a2'], {}), '(a2)\n', (10421, 10425), True, 'import numpy as np\n'), ((10456, 10467), 'numpy.sin', 'np.sin', (['(-a2)'], {}), '(-a2)\n', (10462, 10467), True, 'import numpy as np\n'), ((10469, 10479), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (10475, 10479), True, 'import numpy as np\n'), ((10795, 10805), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (10801, 10805), True, 'import numpy as np\n'), ((10849, 10859), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (10855, 10859), True, 'import numpy as np\n'), ((10861, 10871), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (10867, 10871), True, 'import numpy as np\n'), ((21434, 21444), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (21440, 21444), True, 'import numpy as np\n'), ((21449, 21459), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (21455, 21459), True, 'import numpy as np\n'), ((21490, 21500), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (21496, 21500), True, 'import numpy as np\n'), ((21911, 21921), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (21917, 21921), True, 'import numpy as np\n'), ((21923, 21933), 'numpy.sin', 'np.sin', (['a2'], {}), '(a2)\n', (21929, 21933), True, 'import numpy as np\n'), ((21940, 21951), 'numpy.sin', 'np.sin', (['(-a2)'], {}), '(-a2)\n', (21946, 21951), True, 'import numpy as np\n'), ((21953, 21963), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (21959, 21963), True, 'import numpy as np\n'), ((22341, 22351), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (22347, 22351), True, 'import numpy as np\n'), ((22399, 22409), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (22405, 22409), True, 'import numpy as np\n'), ((22411, 22421), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (22417, 22421), True, 'import numpy as np\n'), ((27395, 27405), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (27401, 27405), True, 'import numpy as np\n'), ((27410, 27420), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (27416, 27420), True, 'import numpy as np\n'), ((27499, 27509), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (27505, 27509), True, 'import numpy as np\n'), ((27961, 27971), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (27967, 27971), True, 'import numpy as np\n'), ((27973, 27983), 'numpy.sin', 'np.sin', (['a2'], {}), '(a2)\n', (27979, 27983), True, 'import numpy as np\n'), ((28014, 28025), 'numpy.sin', 'np.sin', (['(-a2)'], {}), '(-a2)\n', (28020, 28025), True, 'import numpy as np\n'), ((28027, 28037), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (28033, 28037), True, 'import numpy as np\n'), ((28419, 28429), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (28425, 28429), True, 'import numpy as np\n'), ((28477, 28487), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (28483, 28487), True, 'import numpy as np\n'), ((28489, 28499), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (28495, 28499), True, 'import numpy as np\n'), ((29027, 29037), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (29033, 29037), True, 'import numpy as np\n'), ((29042, 29052), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (29048, 29052), True, 'import numpy as np\n'), ((29131, 29141), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (29137, 29141), True, 'import numpy as np\n'), ((29609, 29619), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (29615, 29619), True, 'import numpy as np\n'), ((29621, 29631), 'numpy.sin', 'np.sin', (['a2'], {}), '(a2)\n', (29627, 29631), True, 'import numpy as np\n'), ((29666, 29677), 'numpy.sin', 'np.sin', (['(-a2)'], {}), '(-a2)\n', (29672, 29677), True, 'import numpy as np\n'), ((29679, 29689), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (29685, 29689), True, 'import numpy as np\n'), ((30083, 30093), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (30089, 30093), True, 'import numpy as np\n'), ((30113, 30123), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (30119, 30123), True, 'import numpy as np\n'), ((30125, 30135), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (30131, 30135), True, 'import numpy as np\n'), ((57709, 57798), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[0])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters[0])\n', (57717, 57798), True, 'import numpy as np\n'), ((58247, 58336), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[1])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters[1])\n', (58255, 58336), True, 'import numpy as np\n'), ((58785, 58874), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters == index_of_rot_in_arr_indexvaryingparameters[2])'], {}), '(arr_indexvaryingparameters ==\n index_of_rot_in_arr_indexvaryingparameters[2])\n', (58793, 58874), True, 'import numpy as np\n'), ((67034, 67084), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]]'], {}), '([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])\n', (67042, 67084), True, 'import numpy as np\n'), ((71860, 71885), 'numpy.dot', 'np.dot', (['DictLT.RotY40', 'Ts'], {}), '(DictLT.RotY40, Ts)\n', (71866, 71885), True, 'import numpy as np\n'), ((72004, 72067), 'lauetoolsnn.lauetools.CrystalParameters.calc_B_RR', 'CP.calc_B_RR', (['latticeparameters'], {'directspace': '(1)', 'setvolume': '(False)'}), '(latticeparameters, directspace=1, setvolume=False)\n', (72016, 72067), True, 'import lauetoolsnn.lauetools.CrystalParameters as CP\n'), ((90306, 90356), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]]'], {}), '([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])\n', (90314, 90356), True, 'import numpy as np\n'), ((99295, 99345), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]]'], {}), '([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])\n', (99303, 99345), True, 'import numpy as np\n'), ((113543, 113593), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]]'], {}), '([[1.0, 0, 0], [0, ca, sa], [0, -sa, ca]])\n', (113551, 113593), True, 'import numpy as np\n'), ((1265, 1304), 'lauetoolsnn.lauetools.generaltools.norme_vec', 'GT.norme_vec', (['(uflab[j, :] - uflab[i, :])'], {}), '(uflab[j, :] - uflab[i, :])\n', (1277, 1304), True, 'import lauetoolsnn.lauetools.generaltools as GT\n'), ((9992, 10002), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (9998, 10002), True, 'import numpy as np\n'), ((10808, 10818), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (10814, 10818), True, 'import numpy as np\n'), ((21475, 21485), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (21481, 21485), True, 'import numpy as np\n'), ((22354, 22364), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (22360, 22364), True, 'import numpy as np\n'), ((27484, 27494), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (27490, 27494), True, 'import numpy as np\n'), ((28432, 28442), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (28438, 28442), True, 'import numpy as np\n'), ((29116, 29126), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (29122, 29126), True, 'import numpy as np\n'), ((30096, 30106), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (30102, 30106), True, 'import numpy as np\n'), ((58069, 58079), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (58075, 58079), True, 'import numpy as np\n'), ((58084, 58094), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (58090, 58094), True, 'import numpy as np\n'), ((58125, 58135), 'numpy.cos', 'np.cos', (['a1'], {}), '(a1)\n', (58131, 58135), True, 'import numpy as np\n'), ((58621, 58631), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (58627, 58631), True, 'import numpy as np\n'), ((58633, 58643), 'numpy.sin', 'np.sin', (['a2'], {}), '(a2)\n', (58639, 58643), True, 'import numpy as np\n'), ((58650, 58661), 'numpy.sin', 'np.sin', (['(-a2)'], {}), '(-a2)\n', (58656, 58661), True, 'import numpy as np\n'), ((58663, 58673), 'numpy.cos', 'np.cos', (['a2'], {}), '(a2)\n', (58669, 58673), True, 'import numpy as np\n'), ((59092, 59102), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (59098, 59102), True, 'import numpy as np\n'), ((59122, 59132), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (59128, 59132), True, 'import numpy as np\n'), ((59134, 59144), 'numpy.cos', 'np.cos', (['a3'], {}), '(a3)\n', (59140, 59144), True, 'import numpy as np\n'), ((59876, 59974), 'numpy.where', 'np.where', (['(arr_indexvaryingparameters ==\n index_of_strain_in_arr_indexvaryingparameters[s_index])'], {}), '(arr_indexvaryingparameters ==\n index_of_strain_in_arr_indexvaryingparameters[s_index])\n', (59884, 59974), True, 'import numpy as np\n'), ((67152, 67202), 'numpy.array', 'np.array', (['[[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]]'], {}), '([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])\n', (67160, 67202), True, 'import numpy as np\n'), ((90424, 90474), 'numpy.array', 'np.array', (['[[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]]'], {}), '([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])\n', (90432, 90474), True, 'import numpy as np\n'), ((99413, 99463), 'numpy.array', 'np.array', (['[[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]]'], {}), '([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])\n', (99421, 99463), True, 'import numpy as np\n'), ((113661, 113711), 'numpy.array', 'np.array', (['[[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]]'], {}), '([[ca, -sa, 0], [sa, ca, 0], [0, 0, 1.0]])\n', (113669, 113711), True, 'import numpy as np\n'), ((58110, 58120), 'numpy.sin', 'np.sin', (['a1'], {}), '(a1)\n', (58116, 58120), True, 'import numpy as np\n'), ((59105, 59115), 'numpy.sin', 'np.sin', (['a3'], {}), '(a3)\n', (59111, 59115), True, 'import numpy as np\n')] |
"""Action selector implementations.
Action selectors are objects that when called return a desired
action. These actions may be stochastically chosen (e.g. randomly chosen
from a list of candidates) depending on the choice of `ActionSelector`
implementation, and how it is configured.
Examples include the following
* `DeterministicActionSelector`: always returns the same (specified) action
* `UniformDiscreteActionSelector`: selections an action uniformly at random
from a specified discrete action space
* `NoisyActionSelector`: uses either a "preferred" action selector (with
probability `1 - epsilon`) or a "noise" action selector (with probability
`epsilon`) to determine the action. Useful, for example, to implement an
epsilon-greedy agent.
"""
from abc import ABC, abstractmethod
from numpy.typing import ArrayLike
import numpy as np
class ActionSelector(ABC):
@abstractmethod
def __call__(self):
"""Returns selected action."""
pass
class DeterministicActionSelector(ActionSelector):
"""Deterministic action selector.
Always returns the specified action when called.
Args:
chosen_action: action to return when called.
"""
def __init__(self, chosen_action):
self.chosen_action = chosen_action
def __call__(self):
return self.chosen_action
class UniformDiscreteActionSelector(ActionSelector):
"""Uniform discrete action selector.
Picks an action from a discrete action space (zero-indexed) of
size `n_actions` uniformly at random.
Args:
n_actions: number of actions to choose from
random_state: `None`, `int`, `np.random.Generator`, etc for initialising
the random number generator.
"""
def __init__(self, n_actions: int, *, random_state=None):
self.n_actions = n_actions
self._rng = np.random.default_rng(random_state)
def __call__(self) -> int:
return self._rng.integers(self.n_actions)
class NoisyActionSelector(ActionSelector):
"""Noisy action selector.
With probability `1 - epsilon` this uses `preferred_selector` to
select actions; with probability `epsilon` this uses `noise_selector`
to select actions. Useful, for example, for implementing an
epsilon-greedy agent, or any other agent (e.g. continuous action spaces)
where you wish to inject noise into action decisions.
"""
def __init__(
self,
epsilon: float,
preferred_selector: ActionSelector,
noise_selector: ActionSelector,
*,
random_state=None,
):
self.epsilon = epsilon
self.preferred = preferred_selector
self.noise = noise_selector
self._rng = np.random.default_rng(random_state)
def select_noise_not_preferred(self) -> bool:
"""Returns `True` (indicating 'select noise') epsilon of the time."""
return bool(self._rng.binomial(n=1, p=self.epsilon))
def __call__(self):
if self.select_noise_not_preferred():
return self.noise()
else:
return self.preferred()
class EpsilonGreedyActionSelector(NoisyActionSelector):
"""Specialised `NoisyActionSelector` for epsilon-greedy selection.
Subclass of `NoisyActionSelector` configured for epsilon greedy action
selection from a discrete action space.
Args:
epsilon: probability of choosing a (uniformly) random action
chosen_action: desired (greedy) action
n_actions: size of discrete action space
random_state: initial state for RNG
Returns:
`NoisyActionSelector` instance that when called performs epsilon-greedy
action selection.
"""
# Specialising superclass types for this subclass (so type checker knows
# their specialised types)
preferred: DeterministicActionSelector
noise: UniformDiscreteActionSelector
def __init__(
self,
epsilon: float,
chosen_action: int,
n_actions: int,
*,
random_state=None,
):
rng = np.random.default_rng(random_state)
preferred = DeterministicActionSelector(chosen_action)
noise = UniformDiscreteActionSelector(n_actions, random_state=rng)
super().__init__(epsilon, preferred, noise, random_state=rng)
class DiscreteActionSelector(ActionSelector):
"""Action selection using a specified discrete probability distribution.
Args:
p: probability vector defining the distribution
random_state: initial state for RNG
"""
def __init__(self, p: ArrayLike, *, random_state=None):
self.p = np.array(p)
self._rng = np.random.default_rng(random_state)
def __call__(self) -> int:
return self._rng.choice(len(self.p), p=self.p)
| [
"numpy.array",
"numpy.random.default_rng"
] | [((1845, 1880), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (1866, 1880), True, 'import numpy as np\n'), ((2706, 2741), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (2727, 2741), True, 'import numpy as np\n'), ((4031, 4066), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (4052, 4066), True, 'import numpy as np\n'), ((4593, 4604), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (4601, 4604), True, 'import numpy as np\n'), ((4625, 4660), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (4646, 4660), True, 'import numpy as np\n')] |
import numpy as np
from abc import ABC, abstractmethod
# Defining base loss class
class Loss(ABC):
@abstractmethod
def __call__(self, pred, target):
pass
@abstractmethod
def gradient(self, *args, **kwargs):
pass
class MSELoss(Loss):
def __call__(self, pred, target):
return np.square(pred-target).mean(axis=0) / 2
def gradient(self, pred, target):
return (pred - target).mean(axis=0)
class L2RegularizationLoss(Loss):
def __call__(self, weights):
return sum([np.square(w).sum() for w in weights]) / 2
def gradient(self, w):
return w
class CrossEntropyLoss(Loss):
def __call__(self, pred, target):
return -np.sum(target * np.log(np.maximum(pred, 1e-9)), axis=1).mean()
def gradient(self, pred, target):
return target/pred + (1-target)/(1-pred)
class CrossEntropyLossWithSoftmax(CrossEntropyLoss):
def gradient(self, pred, target):
return pred - target | [
"numpy.maximum",
"numpy.square"
] | [((325, 349), 'numpy.square', 'np.square', (['(pred - target)'], {}), '(pred - target)\n', (334, 349), True, 'import numpy as np\n'), ((538, 550), 'numpy.square', 'np.square', (['w'], {}), '(w)\n', (547, 550), True, 'import numpy as np\n'), ((734, 757), 'numpy.maximum', 'np.maximum', (['pred', '(1e-09)'], {}), '(pred, 1e-09)\n', (744, 757), True, 'import numpy as np\n')] |
import glob
import xml.etree.ElementTree as ET
from unittest import TestCase
import numpy as np
from kmeans import kmeans, avg_iou
ANNOTATIONS_PATH = "Annotations"
class TestVoc2007(TestCase):
def __load_dataset(self):
dataset = []
for xml_file in glob.glob("{}/*xml".format(ANNOTATIONS_PATH)):
tree = ET.parse(xml_file)
height = int(tree.findtext("./size/height"))
width = int(tree.findtext("./size/width"))
for obj in tree.iter("object"):
xmin = int(obj.findtext("bndbox/xmin")) / width
ymin = int(obj.findtext("bndbox/ymin")) / height
xmax = int(obj.findtext("bndbox/xmax")) / width
ymax = int(obj.findtext("bndbox/ymax")) / height
dataset.append([xmax - xmin, ymax - ymin])
return np.array(dataset)
def test_kmeans_5(self):
dataset = self.__load_dataset()
out = kmeans(dataset, 5)
percentage = avg_iou(dataset, out)
np.testing.assert_almost_equal(percentage, 0.61, decimal=2)
def test_kmeans_9(self):
dataset = self.__load_dataset()
out = kmeans(dataset, 9)
percentage = avg_iou(dataset, out)
np.testing.assert_almost_equal(percentage, 0.672, decimal=2)
| [
"xml.etree.ElementTree.parse",
"kmeans.avg_iou",
"kmeans.kmeans",
"numpy.array",
"numpy.testing.assert_almost_equal"
] | [((850, 867), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (858, 867), True, 'import numpy as np\n'), ((953, 971), 'kmeans.kmeans', 'kmeans', (['dataset', '(5)'], {}), '(dataset, 5)\n', (959, 971), False, 'from kmeans import kmeans, avg_iou\n'), ((993, 1014), 'kmeans.avg_iou', 'avg_iou', (['dataset', 'out'], {}), '(dataset, out)\n', (1000, 1014), False, 'from kmeans import kmeans, avg_iou\n'), ((1024, 1083), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['percentage', '(0.61)'], {'decimal': '(2)'}), '(percentage, 0.61, decimal=2)\n', (1054, 1083), True, 'import numpy as np\n'), ((1169, 1187), 'kmeans.kmeans', 'kmeans', (['dataset', '(9)'], {}), '(dataset, 9)\n', (1175, 1187), False, 'from kmeans import kmeans, avg_iou\n'), ((1209, 1230), 'kmeans.avg_iou', 'avg_iou', (['dataset', 'out'], {}), '(dataset, out)\n', (1216, 1230), False, 'from kmeans import kmeans, avg_iou\n'), ((1240, 1300), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['percentage', '(0.672)'], {'decimal': '(2)'}), '(percentage, 0.672, decimal=2)\n', (1270, 1300), True, 'import numpy as np\n'), ((339, 357), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (347, 357), True, 'import xml.etree.ElementTree as ET\n')] |
'''
This example show how to perform a DMR topic model using tomotopy
and visualize the topic distribution for each metadata
Required Packages:
matplotlib
'''
import tomotopy as tp
import numpy as np
import matplotlib.pyplot as plt
'''
You can get the sample data file from https://drive.google.com/file/d/1AUHdwaPzw5qW0j8MaKqFNfw-SQDMbIzw/view?usp=sharing .
'''
corpus = tp.utils.Corpus()
for line in open('text_mining.txt', encoding='utf-8'):
fd = line.strip().split('\t')
corpus.add_doc(fd[1].lower().split(), metadata=fd[0])
# We set a range of the first metadata as [2000, 2017]
# and one of the second metadata as [0, 1].
mdl = tp.DMRModel(tw=tp.TermWeight.PMI,
k=15,
corpus=corpus
)
mdl.optim_interval = 20
mdl.burn_in = 200
mdl.train(0)
print('Num docs:{}, Num Vocabs:{}, Total Words:{}'.format(
len(mdl.docs), len(mdl.used_vocabs), mdl.num_words
))
# Let's train the model
for i in range(0, 2000, 20):
print('Iteration: {:04} LL per word: {:.4}'.format(i, mdl.ll_per_word))
mdl.train(20)
print('Iteration: {:04} LL per word: {:.4}'.format(2000, mdl.ll_per_word))
mdl.summary()
# calculate topic distribution for each metadata using softmax
probs = np.exp(mdl.lambdas - mdl.lambdas.max(axis=0))
probs /= probs.sum(axis=0)
print('Topic distributions for each metadata')
for f, metadata_name in enumerate(mdl.metadata_dict):
print(metadata_name, probs[:, f], '\n')
x = np.arange(mdl.k)
width = 1 / (mdl.f + 2)
fig, ax = plt.subplots()
for f, metadata_name in enumerate(mdl.metadata_dict):
ax.bar(x + width * (f - mdl.f / 2), probs[:, f], width, label=mdl.metadata_dict[f])
ax.set_ylabel('Probabilities')
ax.set_yscale('log')
ax.set_title('Topic distributions')
ax.set_xticks(x)
ax.set_xticklabels(['Topic #{}'.format(k) for k in range(mdl.k)])
ax.legend()
fig.tight_layout()
plt.show()
| [
"tomotopy.utils.Corpus",
"tomotopy.DMRModel",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((380, 397), 'tomotopy.utils.Corpus', 'tp.utils.Corpus', ([], {}), '()\n', (395, 397), True, 'import tomotopy as tp\n'), ((652, 706), 'tomotopy.DMRModel', 'tp.DMRModel', ([], {'tw': 'tp.TermWeight.PMI', 'k': '(15)', 'corpus': 'corpus'}), '(tw=tp.TermWeight.PMI, k=15, corpus=corpus)\n', (663, 706), True, 'import tomotopy as tp\n'), ((1425, 1441), 'numpy.arange', 'np.arange', (['mdl.k'], {}), '(mdl.k)\n', (1434, 1441), True, 'import numpy as np\n'), ((1477, 1491), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1489, 1491), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1846, 1848), True, 'import matplotlib.pyplot as plt\n')] |
"""
Functions for making a consistent dataset with fixed and free variables as is expected in our dataset.
"""
import logging
import sys
from itertools import chain
from pathlib import Path
import numpy as np
import pandas as pd
import sympy
from src.util import get_free_fluxes
RT = 0.008314 * 298.15
logger = logging.getLogger(__name__)
def namevec(name, vec):
return [f"{name}_{i}" for i in range(len(vec))]
def calc_internal_fluxes(s_gamma, e, b, dgf, c):
""" From a set of parameters, calculate the fluxes"""
dgr = s_gamma.T @ (dgf + RT * np.log(c))
return dgr.multiply(b * e, axis=0)
def get_s_x(S, b, e, exchange_rxns):
""" Get the modified s matrix for calculating the free and fixed fluxes
"""
n_exchange = exchange_rxns.sum()
n_mets, n_rxns = S.shape
s_x = np.zeros((n_rxns, n_exchange + n_mets))
s_x[:n_exchange, :n_exchange] = np.identity(n_exchange)
s_x[n_exchange:, n_exchange:] = S.loc[:, ~exchange_rxns].T.mul(b * e, axis=0)
return s_x
def get_s_c(S, b, e, exchange_rxns):
s_x = get_s_x(S, b, e, exchange_rxns)
return S.values @ s_x
def calc_fixed(S, b, e, c_free, t_free, dgf, free_vars):
"""
Calculate all fixed parameters from the free parameters
"""
num_mets, num_rxns = S.shape
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
# Check that they are at the start of the reactions
assert ~any(exchange_rxns[exchange_rxns.sum():]), "All reactions should be first"
# Determine the s_c and s_x matrices
s_c = get_s_c(S, b, e, exchange_rxns)
s_x = get_s_x(S, b, e, exchange_rxns)
# More useful numbers
num_exchange = exchange_rxns.sum()
num_x = num_exchange + num_mets
# Define some masks for the different parts of the x vector
conc_x = np.full(num_x, False)
conc_x[num_exchange:] = True
free_c_mask = free_vars[conc_x]
fixed_c_mask = ~free_vars[conc_x]
# Calculate the rhs of the equation (from the free vars)
x = np.full(num_x, np.NAN)
assert len(c_free) == free_c_mask.sum(), "The number of free c must be correct"
assert len(t_free) == free_vars[~conc_x].sum(), "The number of free t must be correct"
x[conc_x & free_vars] = dgf[free_c_mask] + RT * c_free
x[~conc_x & free_vars] = t_free
rhs = -s_c[:, free_vars] @ x[free_vars]
# Determine the corresponding fixed variables
x[~free_vars] = np.linalg.solve(s_c[:, ~free_vars], rhs)
# Back-calculate all the fixed variables
c = np.zeros(num_mets)
c[free_c_mask] = c_free # The concentration vars of the fixed variables
c[fixed_c_mask] = (x[~free_vars & conc_x] - dgf[fixed_c_mask]) / RT
# Calculate the fluxes
# Exchange fluxes
v = s_x @ x
check_fluxes(S, b, c, conc_x, dgf, e, exchange_rxns, num_rxns, s_c, s_x, x)
return v, c
def check_fluxes(S, b, c, conc_x, dgf, e, exchange_rxns, num_rxns, s_c, s_x, x):
# Check the s_x matrix
assert all(S @ s_x @ x < 1e-10), "All conc changes should be approximately 0"
# Check the s_c matrix
assert all(s_c @ x < 1e-10), "All conc changes should be approximately 0"
# Check the standard calculation
test_v = np.zeros(num_rxns)
dgr = S.T[~exchange_rxns] @ (dgf + RT * c)
test_v[~exchange_rxns] = dgr * b * e
test_v[exchange_rxns] = x[~conc_x]
assert all(S @ test_v < 1e-10)
def find_params(temp_dir):
""" Make a dataframe filled with samples of model parameters that have reasonable values"""
# Now write the measurements to file
result_dir = temp_dir / "results"
S = pd.read_csv(temp_dir / "stoichiometry.csv", index_col=0)
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
# Get the free and fixed fluxes
n_internal = (~exchange_rxns).sum()
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
s_c = get_s_c(S, np.ones(n_internal), np.ones(n_internal), exchange_rxns)
free_vars, _ = get_free_fluxes(np.flip(s_c, axis=1))
free_vars = np.flip(free_vars)
dgf = pd.read_csv(temp_dir / "priors.csv", index_col=1)["loc"]
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
n_internal = (~exchange_rxns).sum()
params = []
for i in range(1000):
c_free = np.exp(np.random.randn(1) * 2 - 8)
t_free = np.array([1])
b = np.exp(np.random.randn(n_internal) * 3 + 3)
e = np.exp(np.random.randn(n_internal) * 2 - 8)
v, c = calc_fixed(S, b, e, np.log(c_free), t_free, dgf, free_vars)
dgr = S.loc[:, ~exchange_rxns].T @ (dgf + RT * c)
# Check for reasonable values of all parameters (including the fixed params)
c_range = (c > -11) & (c < -5)
b_range = (np.log(b) > -4) & (np.log(b) < 8)
e_range = (np.log(e) > -11) & (np.log(e) < -5)
if all(c_range) and all(b_range) & all(e_range):
param = chain.from_iterable([dgf, c, b, e, v, dgr])
params.append(param)
columns = chain.from_iterable(
[namevec("dgf", dgf), namevec("c", c), namevec("b", b), namevec("e", e), namevec("v", v),
namevec("dgr", dgr)])
return pd.DataFrame(params, columns=list(columns))
def m_ind(mask):
""" Convert the mask into a list of integers that sympy can handle"""
return np.arange(len(mask))[mask].tolist()
def sym_algrebra_solve(S, free_vars):
exchange_rxns = S.columns.str.contains("SK_") | S.columns.str.contains("EX_")
S_sym = sympy.Matrix(S)
n_mets, n_rxns = S.shape
n_exchange = exchange_rxns.sum()
s_x = np.zeros((n_rxns, n_exchange + n_mets))
s_x[:n_exchange, :n_exchange] = np.identity(n_exchange)
s_x[n_exchange:, n_exchange:] = S.loc[:, ~exchange_rxns].T
S_x_sym = sympy.Matrix(s_x)
b1, b2, b3, b4, b5 = sympy.symbols("b1 b2 b3 b4 b5")
S_x_sym[2, :] = S_x_sym[2, :] * b1
S_x_sym[3, :] = S_x_sym[3, :] * b2
S_x_sym[4, :] = S_x_sym[4, :] * b3
S_x_sym[5, :] = S_x_sym[5, :] * b4
S_x_sym[6, :] = S_x_sym[6, :] * b5
s_c_sym = S_sym @ S_x_sym
s_fi_sym = s_c_sym[:, m_ind(~free_vars)]
s_fi_inv = s_fi_sym.inv()
s_fr_sym = s_c_sym[:, m_ind(free_vars)]
x_fr = sympy.symbols("t1 c1")
x_fr = sympy.Matrix(x_fr)
x_fi = s_fi_inv @ -s_fr_sym @ x_fr
jacobian = x_fi.jacobian([b1, b2, b3, b4, b5, x_fr[0], x_fr[1]])
# TODO: Contine to solve the
if __name__ == "__main__":
"./data/fake/simulation_study"
temp_dir = Path(sys.argv[1])
print(temp_dir.absolute())
if not temp_dir.exists():
logger.error("The given directory doesn't exist")
sys.exit()
reasonable_samples = find_params(temp_dir)
reasonable_samples.to_csv(temp_dir / "samples.csv") | [
"logging.getLogger",
"numpy.identity",
"numpy.flip",
"numpy.linalg.solve",
"numpy.ones",
"pandas.read_csv",
"pathlib.Path",
"numpy.log",
"sympy.Matrix",
"sympy.symbols",
"numpy.array",
"numpy.zeros",
"itertools.chain.from_iterable",
"sys.exit",
"numpy.full",
"numpy.random.randn"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((813, 852), 'numpy.zeros', 'np.zeros', (['(n_rxns, n_exchange + n_mets)'], {}), '((n_rxns, n_exchange + n_mets))\n', (821, 852), True, 'import numpy as np\n'), ((889, 912), 'numpy.identity', 'np.identity', (['n_exchange'], {}), '(n_exchange)\n', (900, 912), True, 'import numpy as np\n'), ((1812, 1833), 'numpy.full', 'np.full', (['num_x', '(False)'], {}), '(num_x, False)\n', (1819, 1833), True, 'import numpy as np\n'), ((2010, 2032), 'numpy.full', 'np.full', (['num_x', 'np.NAN'], {}), '(num_x, np.NAN)\n', (2017, 2032), True, 'import numpy as np\n'), ((2417, 2457), 'numpy.linalg.solve', 'np.linalg.solve', (['s_c[:, ~free_vars]', 'rhs'], {}), '(s_c[:, ~free_vars], rhs)\n', (2432, 2457), True, 'import numpy as np\n'), ((2511, 2529), 'numpy.zeros', 'np.zeros', (['num_mets'], {}), '(num_mets)\n', (2519, 2529), True, 'import numpy as np\n'), ((3187, 3205), 'numpy.zeros', 'np.zeros', (['num_rxns'], {}), '(num_rxns)\n', (3195, 3205), True, 'import numpy as np\n'), ((3580, 3636), 'pandas.read_csv', 'pd.read_csv', (["(temp_dir / 'stoichiometry.csv')"], {'index_col': '(0)'}), "(temp_dir / 'stoichiometry.csv', index_col=0)\n", (3591, 3636), True, 'import pandas as pd\n'), ((4028, 4046), 'numpy.flip', 'np.flip', (['free_vars'], {}), '(free_vars)\n', (4035, 4046), True, 'import numpy as np\n'), ((5484, 5499), 'sympy.Matrix', 'sympy.Matrix', (['S'], {}), '(S)\n', (5496, 5499), False, 'import sympy\n'), ((5576, 5615), 'numpy.zeros', 'np.zeros', (['(n_rxns, n_exchange + n_mets)'], {}), '((n_rxns, n_exchange + n_mets))\n', (5584, 5615), True, 'import numpy as np\n'), ((5652, 5675), 'numpy.identity', 'np.identity', (['n_exchange'], {}), '(n_exchange)\n', (5663, 5675), True, 'import numpy as np\n'), ((5753, 5770), 'sympy.Matrix', 'sympy.Matrix', (['s_x'], {}), '(s_x)\n', (5765, 5770), False, 'import sympy\n'), ((5796, 5827), 'sympy.symbols', 'sympy.symbols', (['"""b1 b2 b3 b4 b5"""'], {}), "('b1 b2 b3 b4 b5')\n", (5809, 5827), False, 'import sympy\n'), ((6183, 6205), 'sympy.symbols', 'sympy.symbols', (['"""t1 c1"""'], {}), "('t1 c1')\n", (6196, 6205), False, 'import sympy\n'), ((6217, 6235), 'sympy.Matrix', 'sympy.Matrix', (['x_fr'], {}), '(x_fr)\n', (6229, 6235), False, 'import sympy\n'), ((6455, 6472), 'pathlib.Path', 'Path', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (6459, 6472), False, 'from pathlib import Path\n'), ((3898, 3917), 'numpy.ones', 'np.ones', (['n_internal'], {}), '(n_internal)\n', (3905, 3917), True, 'import numpy as np\n'), ((3919, 3938), 'numpy.ones', 'np.ones', (['n_internal'], {}), '(n_internal)\n', (3926, 3938), True, 'import numpy as np\n'), ((3990, 4010), 'numpy.flip', 'np.flip', (['s_c'], {'axis': '(1)'}), '(s_c, axis=1)\n', (3997, 4010), True, 'import numpy as np\n'), ((4057, 4106), 'pandas.read_csv', 'pd.read_csv', (["(temp_dir / 'priors.csv')"], {'index_col': '(1)'}), "(temp_dir / 'priors.csv', index_col=1)\n", (4068, 4106), True, 'import pandas as pd\n'), ((4347, 4360), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (4355, 4360), True, 'import numpy as np\n'), ((6600, 6610), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6608, 6610), False, 'import sys\n'), ((4508, 4522), 'numpy.log', 'np.log', (['c_free'], {}), '(c_free)\n', (4514, 4522), True, 'import numpy as np\n'), ((4915, 4958), 'itertools.chain.from_iterable', 'chain.from_iterable', (['[dgf, c, b, e, v, dgr]'], {}), '([dgf, c, b, e, v, dgr])\n', (4934, 4958), False, 'from itertools import chain\n'), ((564, 573), 'numpy.log', 'np.log', (['c'], {}), '(c)\n', (570, 573), True, 'import numpy as np\n'), ((4749, 4758), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (4755, 4758), True, 'import numpy as np\n'), ((4768, 4777), 'numpy.log', 'np.log', (['b'], {}), '(b)\n', (4774, 4777), True, 'import numpy as np\n'), ((4802, 4811), 'numpy.log', 'np.log', (['e'], {}), '(e)\n', (4808, 4811), True, 'import numpy as np\n'), ((4822, 4831), 'numpy.log', 'np.log', (['e'], {}), '(e)\n', (4828, 4831), True, 'import numpy as np\n'), ((4302, 4320), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (4317, 4320), True, 'import numpy as np\n'), ((4380, 4407), 'numpy.random.randn', 'np.random.randn', (['n_internal'], {}), '(n_internal)\n', (4395, 4407), True, 'import numpy as np\n'), ((4436, 4463), 'numpy.random.randn', 'np.random.randn', (['n_internal'], {}), '(n_internal)\n', (4451, 4463), True, 'import numpy as np\n')] |
from numpy import matlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse.linalg import svds
from scipy.sparse import csc_matrix
class ohmlr(object):
def __init__(self, x_classes=None, y_classes=None, random_coeff=False):
self.x_classes = x_classes
self.y_classes = y_classes
self.random_coeff = random_coeff
if x_classes is not None and y_classes is not None:
n_y_classes = len(y_classes)
n_features = len(x_classes)
n_x_classes = np.asarray([len(x_class) for x_class in x_classes])
n_x_classes_sum = np.sum(n_x_classes)
y_map = {s: i for i, s in enumerate(np.sort(y_classes))}
x_map = [{s: i
for i, s in enumerate(np.sort(x_class))}
for x_class in x_classes]
if random_coeff:
v = np.random.normal(size=n_y_classes)
w = np.array([
np.random.normal(size=(n, n_y_classes))
for n in n_x_classes
])
v -= v.mean()
for i in range(n_features):
w[i] -= w[i].mean(0)
w[i] -= w[i].mean(1)[:, np.newaxis]
else:
v = np.zeros(n_y_classes)
w = np.array(
[np.zeros(shape=(n, n_y_classes)) for n in n_x_classes])
self.n_y_classes = n_y_classes
self.n_features = n_features
self.n_x_classes = n_x_classes
self.n_x_classes_sum = n_x_classes_sum
self.x_map = x_map
self.y_map = y_map
self.v = v
self.w = w
# decision_function(X) Predict confidence scores for samples.
def categorize_(self, u, u_classes):
if u_classes is None:
u_classes = np.unique(u)
u_map = {s: i for i, s in enumerate(u_classes)}
u_int = np.asarray([u_map[ui] for ui in u])
return u_int, u_map
def predict_proba(self, x, return_weights=False):
n_features = self.n_features
v = self.v
w = self.w
x_map = self.x_map
x = np.asarray(x)
n_samples = x.shape[0]
x_int = np.asarray(
[[x_map[j][xij] for j, xij in enumerate(xi)] for xi in x])
h = v + np.asarray([
np.sum([w[j][x_int[i, j]] for j in range(n_features)], 0)
for i in range(n_samples)
])
h = np.asarray(h)
p = np.exp(h)
p /= p.sum(1)[:, np.newaxis]
if return_weights:
return p, h
return p
def predict_log_proba(self, x):
return np.log(self.predict_proba(x))
def predict(self, x):
y_map = self.y_map
p = self.predict_proba(x)
y_int = p.argmax(1)
y = np.asarray([y_map[yi] for yi in y_int])
return y
def score(self, x, y):
return (self.predict(x) == y).mean()
def fit(self,
x,
y,
atol=1e-4,
rtol=1e-3,
max_iter=500,
v_init=None,
w_init=None):
x = np.asarray(x)
y = np.asarray(y)
n_samples, n_features = x.shape
x_classes = self.x_classes
y_classes = self.y_classes
if x_classes is None:
x_classes = n_features * [None]
elif np.asarray(x_classes).ndim == 1:
x_classes = np.tile(
np.asarray(x_classes)[:, np.newaxis], n_features).T
tmp = [self.categorize_(xi, ci) for xi, ci in zip(x.T, x_classes)]
x_int = np.asarray([t[0] for t in tmp]).T
x_map = [t[1] for t in tmp]
n_x_classes = np.asarray([len(m) for m in x_map])
n_x_classes_sum = np.sum(n_x_classes)
n_x_classes_cumsum = np.insert(n_x_classes.cumsum(), 0, 0)
# one-hot encoding of x
x_oh = csc_matrix((np.ones(n_samples * n_features),
(np.repeat(np.arange(n_samples), n_features),
(x_int + n_x_classes_cumsum[:-1]).flatten())),
shape=(n_samples, n_x_classes_sum))
y_int, y_map = self.categorize_(y, y_classes)
n_y_classes = len(y_map)
# one-hot encoding of y
y_oh = csc_matrix((np.ones(n_samples), (np.arange(n_samples), y_int)))
# 'cold' classes
y_hot = (y_oh.toarray().astype(bool))
y_cold = ~(y_oh.toarray().astype(bool))
i1i2 = np.stack([n_x_classes_cumsum[:-1], n_x_classes_cumsum[1:]]).T
if v_init is None:
v = matlib.zeros(n_y_classes)
else:
v = np.asmatrix(v_init)
if w_init is None:
w = matlib.zeros((n_x_classes_sum, n_y_classes))
else:
w = np.asmatrix(np.vstack(w_init))
def solve1(u, pinv):
w = pinv[2].dot(u)
w = np.multiply(pinv[1], w)
w = pinv[0] * w
return w
def solve2(u, pinv):
return solve1(x_oh.T * u, pinv)
if x_oh.shape[0] < x_oh.shape[1]:
solve = solve1
z = x_oh
k = x_oh.shape[0] - 1
else:
solve = solve2
z = x_oh.T * x_oh
k = n_x_classes_sum - n_features + 1
# SVD-based solve of x_oh * w = h
svd = svds(z, k=k)
sv_pinv = svd[1].copy()
zero_sv = np.isclose(sv_pinv, 0)
sv_pinv[zero_sv] = 0.0
sv_pinv[~zero_sv] = 1.0 / sv_pinv[~zero_sv]
pinv = (svd[2].T, sv_pinv[:, np.newaxis], svd[0].T)
# discrepancy
disc = [1.0 / float(n_y_classes)**2 + 1]
err = [1.0 / float(n_y_classes)**2 + 1]
ll = []
for it in range(1, max_iter + 1):
h0 = v
h1 = x_oh * w
p = np.exp(h0 + h1)
p /= p.sum(1)
# additive update
dh = y_oh - p
v = (h0 + dh).mean(0)
w = solve(h1 + dh, pinv)
v -= v.mean()
w -= w.mean(1)
for i1, i2 in i1i2:
w[i1:i2] -= w[i1:i2].mean(0)
# discrepancy: avg 2-norm squared of cold entries
disc.append(np.power(p[y_cold], 2).mean())
err.append((np.asarray(dh)**2).sum(1).mean(0))
ll.append(-np.log(p[y_hot]).mean())
# if disc[-1] > disc[-2]:
# # print('DISC BREAK !!!!!!', it, '!!!!!!!!!!!!')
# break
# if np.abs(err[-1] - err[-2]) < atol:
# # print('AERR BREAK !!!!!!', it, '!!!!!!!!!!!!')
# break
# if np.abs(err[-1] - err[-2]) / err[-2] < rtol:
# # print('RERR BREAK !!!!!!', it, '!!!!!!!!!!!!')
# break
# if it == max_iter:
# # print('NO BREAKKKKKK', it)
v = np.asarray(v).squeeze()
w = np.array([np.asarray(w[i1:i2]) for i1, i2 in i1i2])
disc = disc[1:]
err = err[1:]
self.n_samples = n_samples
self.n_features = n_features
self.n_x_classes = n_x_classes
self.n_y_classes = n_y_classes
self.x = x
self.x_int = x_int
self.x_map = x_map
self.x_oh = x_oh
self.y = y
self.y_int = y_int
self.y_map = y_map
self.y_oh = y_oh
self.pinv = pinv
self.v = v
self.w = w
self.disc = disc
self.err = err
self.ll = ll
# def random(self, n_features=None, n_x_classes=None, n_y_classes=None):
# if self.x_classes is not None:
# n_features = len(self.x_classes)
# n_x_classes = [len(x_class) for x_class in self.x_classes]
# if self.y_classes is not None:
# n_y_classes = len(self.y_classes)
# v = np.random.normal(size=n_y_classes)
# w = np.array(
# [np.random.normal(size=(n, n_y_classes)) for n in n_x_classes])
# v -= v.mean()
# for i in range(n_features):
# w[i] -= w[i].mean(0)
# w[i] -= w[i].mean(1)[:, np.newaxis]
# self.n_features = n_features
# self.n_x_classes = n_x_classes
# self.n_y_classes = n_y_classes
# self.v = v
# self.w = w
# return self
def generate_data(self, n_samples):
n_features = self.n_features
n_x_classes = self.n_x_classes
n_y_classes = self.n_y_classes
v = self.v
w = self.w
x = np.hstack([
np.random.randint(n, size=(n_samples, 1), dtype=int)
for n in n_x_classes
])
h = v + np.array([
np.sum([w[j][x[i, j]] for j in range(n_features)], 0)
for i in range(n_samples)
])
p = np.exp(h)
p /= p.sum(1)[:, np.newaxis]
y = (p.cumsum(1) < np.random.uniform(size=(n_samples, 1))).sum(1)
return x, y
def get_params(self, deep=True):
return dict(
x_classes=self.x_classes,
y_classes=self.y_classes,
random_coeff=self.random_coeff)
| [
"numpy.asmatrix",
"numpy.log",
"scipy.sparse.linalg.svds",
"numpy.arange",
"numpy.multiply",
"numpy.sort",
"numpy.asarray",
"numpy.exp",
"numpy.stack",
"numpy.vstack",
"numpy.random.normal",
"numpy.ones",
"numpy.matlib.zeros",
"numpy.isclose",
"numpy.unique",
"numpy.power",
"numpy.su... | [((1943, 1978), 'numpy.asarray', 'np.asarray', (['[u_map[ui] for ui in u]'], {}), '([u_map[ui] for ui in u])\n', (1953, 1978), True, 'import numpy as np\n'), ((2177, 2190), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2187, 2190), True, 'import numpy as np\n'), ((2482, 2495), 'numpy.asarray', 'np.asarray', (['h'], {}), '(h)\n', (2492, 2495), True, 'import numpy as np\n'), ((2508, 2517), 'numpy.exp', 'np.exp', (['h'], {}), '(h)\n', (2514, 2517), True, 'import numpy as np\n'), ((2834, 2873), 'numpy.asarray', 'np.asarray', (['[y_map[yi] for yi in y_int]'], {}), '([y_map[yi] for yi in y_int])\n', (2844, 2873), True, 'import numpy as np\n'), ((3149, 3162), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3159, 3162), True, 'import numpy as np\n'), ((3175, 3188), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (3185, 3188), True, 'import numpy as np\n'), ((3769, 3788), 'numpy.sum', 'np.sum', (['n_x_classes'], {}), '(n_x_classes)\n', (3775, 3788), True, 'import numpy as np\n'), ((5352, 5364), 'scipy.sparse.linalg.svds', 'svds', (['z'], {'k': 'k'}), '(z, k=k)\n', (5356, 5364), False, 'from scipy.sparse.linalg import svds\n'), ((5415, 5437), 'numpy.isclose', 'np.isclose', (['sv_pinv', '(0)'], {}), '(sv_pinv, 0)\n', (5425, 5437), True, 'import numpy as np\n'), ((8770, 8779), 'numpy.exp', 'np.exp', (['h'], {}), '(h)\n', (8776, 8779), True, 'import numpy as np\n'), ((609, 628), 'numpy.sum', 'np.sum', (['n_x_classes'], {}), '(n_x_classes)\n', (615, 628), True, 'import numpy as np\n'), ((1858, 1870), 'numpy.unique', 'np.unique', (['u'], {}), '(u)\n', (1867, 1870), True, 'import numpy as np\n'), ((3614, 3645), 'numpy.asarray', 'np.asarray', (['[t[0] for t in tmp]'], {}), '([t[0] for t in tmp])\n', (3624, 3645), True, 'import numpy as np\n'), ((4495, 4554), 'numpy.stack', 'np.stack', (['[n_x_classes_cumsum[:-1], n_x_classes_cumsum[1:]]'], {}), '([n_x_classes_cumsum[:-1], n_x_classes_cumsum[1:]])\n', (4503, 4554), True, 'import numpy as np\n'), ((4601, 4626), 'numpy.matlib.zeros', 'matlib.zeros', (['n_y_classes'], {}), '(n_y_classes)\n', (4613, 4626), False, 'from numpy import matlib\n'), ((4657, 4676), 'numpy.asmatrix', 'np.asmatrix', (['v_init'], {}), '(v_init)\n', (4668, 4676), True, 'import numpy as np\n'), ((4720, 4764), 'numpy.matlib.zeros', 'matlib.zeros', (['(n_x_classes_sum, n_y_classes)'], {}), '((n_x_classes_sum, n_y_classes))\n', (4732, 4764), False, 'from numpy import matlib\n'), ((4903, 4926), 'numpy.multiply', 'np.multiply', (['pinv[1]', 'w'], {}), '(pinv[1], w)\n', (4914, 4926), True, 'import numpy as np\n'), ((5822, 5837), 'numpy.exp', 'np.exp', (['(h0 + h1)'], {}), '(h0 + h1)\n', (5828, 5837), True, 'import numpy as np\n'), ((885, 919), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'n_y_classes'}), '(size=n_y_classes)\n', (901, 919), True, 'import numpy as np\n'), ((1280, 1301), 'numpy.zeros', 'np.zeros', (['n_y_classes'], {}), '(n_y_classes)\n', (1288, 1301), True, 'import numpy as np\n'), ((3916, 3947), 'numpy.ones', 'np.ones', (['(n_samples * n_features)'], {}), '(n_samples * n_features)\n', (3923, 3947), True, 'import numpy as np\n'), ((4308, 4326), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (4315, 4326), True, 'import numpy as np\n'), ((4807, 4824), 'numpy.vstack', 'np.vstack', (['w_init'], {}), '(w_init)\n', (4816, 4824), True, 'import numpy as np\n'), ((6860, 6873), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (6870, 6873), True, 'import numpy as np\n'), ((6906, 6926), 'numpy.asarray', 'np.asarray', (['w[i1:i2]'], {}), '(w[i1:i2])\n', (6916, 6926), True, 'import numpy as np\n'), ((8519, 8571), 'numpy.random.randint', 'np.random.randint', (['n'], {'size': '(n_samples, 1)', 'dtype': 'int'}), '(n, size=(n_samples, 1), dtype=int)\n', (8536, 8571), True, 'import numpy as np\n'), ((3388, 3409), 'numpy.asarray', 'np.asarray', (['x_classes'], {}), '(x_classes)\n', (3398, 3409), True, 'import numpy as np\n'), ((4329, 4349), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (4338, 4349), True, 'import numpy as np\n'), ((8844, 8882), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n_samples, 1)'}), '(size=(n_samples, 1))\n', (8861, 8882), True, 'import numpy as np\n'), ((677, 695), 'numpy.sort', 'np.sort', (['y_classes'], {}), '(y_classes)\n', (684, 695), True, 'import numpy as np\n'), ((971, 1010), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, n_y_classes)'}), '(size=(n, n_y_classes))\n', (987, 1010), True, 'import numpy as np\n'), ((1353, 1385), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n_y_classes)'}), '(shape=(n, n_y_classes))\n', (1361, 1385), True, 'import numpy as np\n'), ((3987, 4007), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3996, 4007), True, 'import numpy as np\n'), ((6210, 6232), 'numpy.power', 'np.power', (['p[y_cold]', '(2)'], {}), '(p[y_cold], 2)\n', (6218, 6232), True, 'import numpy as np\n'), ((769, 785), 'numpy.sort', 'np.sort', (['x_class'], {}), '(x_class)\n', (776, 785), True, 'import numpy as np\n'), ((3470, 3491), 'numpy.asarray', 'np.asarray', (['x_classes'], {}), '(x_classes)\n', (3480, 3491), True, 'import numpy as np\n'), ((6323, 6339), 'numpy.log', 'np.log', (['p[y_hot]'], {}), '(p[y_hot])\n', (6329, 6339), True, 'import numpy as np\n'), ((6265, 6279), 'numpy.asarray', 'np.asarray', (['dh'], {}), '(dh)\n', (6275, 6279), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#move this notebook to folder above syndef to run
from syndef import synfits #import synestia snapshot (impact database)
import numpy as np
import matplotlib.pyplot as plt
test_rxy=np.linspace(7e6,60e6,100) #m
test_z=np.linspace(0.001e6,30e6,50) #m
rxy=np.log10(test_rxy/1e6) #Mm log10
z=np.log10(test_z/1e6) #Mm log10
TESTRXY,TESTZ=np.meshgrid(test_rxy,test_z) #2-D grid of rxy, z for color plot
#y=np.zeros(np.shape(rxy)) #array of zeros for residual fit
#rho1=synfits.resfuncspl(synfits.SNAP_Canup.rhomidfit[1],rxy,y)
#rho2=synfits.resfuncspl(synfits.SNAP_CukStewart.rhomidfit[1],rxy,y)
#rho3=synfits.resfuncspl(synfits.SNAP_Quintana.rhomidfit[1],rxy,y)
snaprho1=np.log10(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer])
snaprho2=np.log10(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer])
snaprho3=np.log10(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer])
snaprhomid1=np.log10(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer_mid])
snaprhomid2=np.log10(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer_mid])
snaprhomid3=np.log10(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer_mid])
snaprxy1=np.log10(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer]/1e6)
snaprxy2=np.log10(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer]/1e6)
snaprxy3=np.log10(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer]/1e6)
snaprxymid1=np.log10(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer_mid]/1e6)
snaprxymid2=np.log10(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer_mid]/1e6)
snaprxymid3=np.log10(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer_mid]/1e6)
snapz1=np.log10(synfits.SNAP_Canup.z[synfits.SNAP_Canup.ind_outer]/1e6)
snapz2=np.log10(synfits.SNAP_CukStewart.z[synfits.SNAP_CukStewart.ind_outer]/1e6)
snapz3=np.log10(synfits.SNAP_Quintana.z[synfits.SNAP_Quintana.ind_outer]/1e6)
const1=10.5#10 to 11; 10.55 (fiducial)
const2=0.86#0.85 to 0.9; 0.86 (fiducial)
const3=1e38 #0.9e35 (fiducial) / 1.5e33 (underestimate) / 1.1e37 (cross) / 1e38 (overestimate)
const4=-5.1 #-4.7 (fiducial) / -4.5 (underestimate) / -5 (cross) / -5.1 (overestimate)
test_z_s=const1*np.power(TESTRXY,const2) #scale height fit in m
test_rho_g=const3*np.power(TESTRXY,const4)*np.exp(-np.power(TESTZ/test_z_s,2))
test_rho_gmid=const3*np.power(test_rxy,const4)
plt.figure(figsize=(16,5))
plt.subplot(131)
#plt.plot(rxy,rho1,'b')
plt.plot(snaprxymid1,snaprhomid1,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Canup')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.subplot(132)
#plt.plot(rxy,rho2,'b')
plt.plot(snaprxymid2,snaprhomid2,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Cuk and Stewart')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.subplot(133)
#plt.plot(rxy,rho3,'b')
plt.plot(snaprxymid3,snaprhomid3,'r.')
plt.plot(np.log10(test_rxy/1e6),np.log10(test_rho_gmid),'k')
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log midplane density (kg/m$^3$)')
plt.title('Quintana')
plt.xlim([.8,2])
plt.ylim([-2,3])
plt.show()
plt.close()
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy1,snapz1,c=snaprho1)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.subplot(132)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy2,snapz2,c=snaprho2)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.subplot(133)
plt.pcolor(np.log10(TESTRXY/1e6),np.log10(TESTZ/1e6),np.log10(test_rho_g))
plt.scatter(snaprxy3,snapz3,c=snaprho3)
plt.xlabel('log r$_{xy}$ (Mm)')
plt.ylabel('log z (Mm)')
plt.colorbar(label='log density (kg/m$^3$)')
plt.xlim([.8,2])
plt.show()
plt.close()
# In[ ]:
| [
"numpy.log10",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"numpy.meshgrid",
"ma... | [((232, 271), 'numpy.linspace', 'np.linspace', (['(7000000.0)', '(60000000.0)', '(100)'], {}), '(7000000.0, 60000000.0, 100)\n', (243, 271), True, 'import numpy as np\n'), ((268, 303), 'numpy.linspace', 'np.linspace', (['(1000.0)', '(30000000.0)', '(50)'], {}), '(1000.0, 30000000.0, 50)\n', (279, 303), True, 'import numpy as np\n'), ((304, 334), 'numpy.log10', 'np.log10', (['(test_rxy / 1000000.0)'], {}), '(test_rxy / 1000000.0)\n', (312, 334), True, 'import numpy as np\n'), ((339, 367), 'numpy.log10', 'np.log10', (['(test_z / 1000000.0)'], {}), '(test_z / 1000000.0)\n', (347, 367), True, 'import numpy as np\n'), ((384, 413), 'numpy.meshgrid', 'np.meshgrid', (['test_rxy', 'test_z'], {}), '(test_rxy, test_z)\n', (395, 413), True, 'import numpy as np\n'), ((719, 781), 'numpy.log10', 'np.log10', (['synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer]'], {}), '(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer])\n', (727, 781), True, 'import numpy as np\n'), ((791, 863), 'numpy.log10', 'np.log10', (['synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer]'], {}), '(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer])\n', (799, 863), True, 'import numpy as np\n'), ((873, 941), 'numpy.log10', 'np.log10', (['synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer]'], {}), '(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer])\n', (881, 941), True, 'import numpy as np\n'), ((955, 1021), 'numpy.log10', 'np.log10', (['synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer_mid]'], {}), '(synfits.SNAP_Canup.rho[synfits.SNAP_Canup.ind_outer_mid])\n', (963, 1021), True, 'import numpy as np\n'), ((1034, 1110), 'numpy.log10', 'np.log10', (['synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer_mid]'], {}), '(synfits.SNAP_CukStewart.rho[synfits.SNAP_CukStewart.ind_outer_mid])\n', (1042, 1110), True, 'import numpy as np\n'), ((1123, 1195), 'numpy.log10', 'np.log10', (['synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer_mid]'], {}), '(synfits.SNAP_Quintana.rho[synfits.SNAP_Quintana.ind_outer_mid])\n', (1131, 1195), True, 'import numpy as np\n'), ((1206, 1280), 'numpy.log10', 'np.log10', (['(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer] / 1000000.0)'], {}), '(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer] / 1000000.0)\n', (1214, 1280), True, 'import numpy as np\n'), ((1282, 1371), 'numpy.log10', 'np.log10', (['(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer] / 1000000.0)'], {}), '(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer] / \n 1000000.0)\n', (1290, 1371), True, 'import numpy as np\n'), ((1368, 1453), 'numpy.log10', 'np.log10', (['(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer] / 1000000.0)'], {}), '(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer] / 1000000.0\n )\n', (1376, 1453), True, 'import numpy as np\n'), ((1454, 1532), 'numpy.log10', 'np.log10', (['(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer_mid] / 1000000.0)'], {}), '(synfits.SNAP_Canup.rxy[synfits.SNAP_Canup.ind_outer_mid] / 1000000.0)\n', (1462, 1532), True, 'import numpy as np\n'), ((1537, 1629), 'numpy.log10', 'np.log10', (['(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer_mid] / 1000000.0\n )'], {}), '(synfits.SNAP_CukStewart.rxy[synfits.SNAP_CukStewart.ind_outer_mid] /\n 1000000.0)\n', (1545, 1629), True, 'import numpy as np\n'), ((1630, 1719), 'numpy.log10', 'np.log10', (['(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer_mid] / 1000000.0)'], {}), '(synfits.SNAP_Quintana.rxy[synfits.SNAP_Quintana.ind_outer_mid] / \n 1000000.0)\n', (1638, 1719), True, 'import numpy as np\n'), ((1715, 1787), 'numpy.log10', 'np.log10', (['(synfits.SNAP_Canup.z[synfits.SNAP_Canup.ind_outer] / 1000000.0)'], {}), '(synfits.SNAP_Canup.z[synfits.SNAP_Canup.ind_outer] / 1000000.0)\n', (1723, 1787), True, 'import numpy as np\n'), ((1787, 1874), 'numpy.log10', 'np.log10', (['(synfits.SNAP_CukStewart.z[synfits.SNAP_CukStewart.ind_outer] / 1000000.0)'], {}), '(synfits.SNAP_CukStewart.z[synfits.SNAP_CukStewart.ind_outer] / \n 1000000.0)\n', (1795, 1874), True, 'import numpy as np\n'), ((1869, 1947), 'numpy.log10', 'np.log10', (['(synfits.SNAP_Quintana.z[synfits.SNAP_Quintana.ind_outer] / 1000000.0)'], {}), '(synfits.SNAP_Quintana.z[synfits.SNAP_Quintana.ind_outer] / 1000000.0)\n', (1877, 1947), True, 'import numpy as np\n'), ((2394, 2421), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 5)'}), '(figsize=(16, 5))\n', (2404, 2421), True, 'import matplotlib.pyplot as plt\n'), ((2421, 2437), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (2432, 2437), True, 'import matplotlib.pyplot as plt\n'), ((2462, 2502), 'matplotlib.pyplot.plot', 'plt.plot', (['snaprxymid1', 'snaprhomid1', '"""r."""'], {}), "(snaprxymid1, snaprhomid1, 'r.')\n", (2470, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2593), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log r$_{xy}$ (Mm)"""'], {}), "('log r$_{xy}$ (Mm)')\n", (2572, 2593), True, 'import matplotlib.pyplot as plt\n'), ((2594, 2639), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log midplane density (kg/m$^3$)"""'], {}), "('log midplane density (kg/m$^3$)')\n", (2604, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2640, 2658), 'matplotlib.pyplot.title', 'plt.title', (['"""Canup"""'], {}), "('Canup')\n", (2649, 2658), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2677), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.8, 2]'], {}), '([0.8, 2])\n', (2667, 2677), True, 'import matplotlib.pyplot as plt\n'), ((2676, 2693), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 3]'], {}), '([-2, 3])\n', (2684, 2693), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2710), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (2705, 2710), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2775), 'matplotlib.pyplot.plot', 'plt.plot', (['snaprxymid2', 'snaprhomid2', '"""r."""'], {}), "(snaprxymid2, snaprhomid2, 'r.')\n", (2743, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2866), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log r$_{xy}$ (Mm)"""'], {}), "('log r$_{xy}$ (Mm)')\n", (2845, 2866), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2912), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log midplane density (kg/m$^3$)"""'], {}), "('log midplane density (kg/m$^3$)')\n", (2877, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2913, 2941), 'matplotlib.pyplot.title', 'plt.title', (['"""Cuk and Stewart"""'], {}), "('Cuk and Stewart')\n", (2922, 2941), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2960), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.8, 2]'], {}), '([0.8, 2])\n', (2950, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2959, 2976), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 3]'], {}), '([-2, 3])\n', (2967, 2976), True, 'import matplotlib.pyplot as plt\n'), ((2977, 2993), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2988, 2993), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3058), 'matplotlib.pyplot.plot', 'plt.plot', (['snaprxymid3', 'snaprhomid3', '"""r."""'], {}), "(snaprxymid3, snaprhomid3, 'r.')\n", (3026, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3118, 3149), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log r$_{xy}$ (Mm)"""'], {}), "('log r$_{xy}$ (Mm)')\n", (3128, 3149), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3195), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log midplane density (kg/m$^3$)"""'], {}), "('log midplane density (kg/m$^3$)')\n", (3160, 3195), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3217), 'matplotlib.pyplot.title', 'plt.title', (['"""Quintana"""'], {}), "('Quintana')\n", (3205, 3217), True, 'import matplotlib.pyplot as plt\n'), ((3218, 3236), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.8, 2]'], {}), '([0.8, 2])\n', (3226, 3236), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3252), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-2, 3]'], {}), '([-2, 3])\n', (3243, 3252), True, 'import matplotlib.pyplot as plt\n'), ((3252, 3262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3260, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3274), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3272, 3274), True, 'import matplotlib.pyplot as plt\n'), ((3276, 3303), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 5)'}), '(figsize=(16, 5))\n', (3286, 3303), True, 'import matplotlib.pyplot as plt\n'), ((3303, 3319), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (3314, 3319), True, 'import matplotlib.pyplot as plt\n'), ((3395, 3436), 'matplotlib.pyplot.scatter', 'plt.scatter', (['snaprxy1', 'snapz1'], {'c': 'snaprho1'}), '(snaprxy1, snapz1, c=snaprho1)\n', (3406, 3436), True, 'import matplotlib.pyplot as plt\n'), ((3435, 3466), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log r$_{xy}$ (Mm)"""'], {}), "('log r$_{xy}$ (Mm)')\n", (3445, 3466), True, 'import matplotlib.pyplot as plt\n'), ((3467, 3491), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log z (Mm)"""'], {}), "('log z (Mm)')\n", (3477, 3491), True, 'import matplotlib.pyplot as plt\n'), ((3492, 3536), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""log density (kg/m$^3$)"""'}), "(label='log density (kg/m$^3$)')\n", (3504, 3536), True, 'import matplotlib.pyplot as plt\n'), ((3537, 3555), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.8, 2]'], {}), '([0.8, 2])\n', (3545, 3555), True, 'import matplotlib.pyplot as plt\n'), ((3555, 3571), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (3566, 3571), True, 'import matplotlib.pyplot as plt\n'), ((3647, 3688), 'matplotlib.pyplot.scatter', 'plt.scatter', (['snaprxy2', 'snapz2'], {'c': 'snaprho2'}), '(snaprxy2, snapz2, c=snaprho2)\n', (3658, 3688), True, 'import matplotlib.pyplot as plt\n'), ((3687, 3718), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log r$_{xy}$ (Mm)"""'], {}), "('log r$_{xy}$ (Mm)')\n", (3697, 3718), True, 'import matplotlib.pyplot as plt\n'), ((3719, 3743), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log z (Mm)"""'], {}), "('log z (Mm)')\n", (3729, 3743), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3788), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""log density (kg/m$^3$)"""'}), "(label='log density (kg/m$^3$)')\n", (3756, 3788), True, 'import matplotlib.pyplot as plt\n'), ((3789, 3807), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.8, 2]'], {}), '([0.8, 2])\n', (3797, 3807), True, 'import matplotlib.pyplot as plt\n'), ((3807, 3823), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (3818, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3899, 3940), 'matplotlib.pyplot.scatter', 'plt.scatter', (['snaprxy3', 'snapz3'], {'c': 'snaprho3'}), '(snaprxy3, snapz3, c=snaprho3)\n', (3910, 3940), True, 'import matplotlib.pyplot as plt\n'), ((3939, 3970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log r$_{xy}$ (Mm)"""'], {}), "('log r$_{xy}$ (Mm)')\n", (3949, 3970), True, 'import matplotlib.pyplot as plt\n'), ((3971, 3995), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log z (Mm)"""'], {}), "('log z (Mm)')\n", (3981, 3995), True, 'import matplotlib.pyplot as plt\n'), ((3996, 4040), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""log density (kg/m$^3$)"""'}), "(label='log density (kg/m$^3$)')\n", (4008, 4040), True, 'import matplotlib.pyplot as plt\n'), ((4041, 4059), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.8, 2]'], {}), '([0.8, 2])\n', (4049, 4059), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4066, 4068), True, 'import matplotlib.pyplot as plt\n'), ((4069, 4080), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4078, 4080), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2244), 'numpy.power', 'np.power', (['TESTRXY', 'const2'], {}), '(TESTRXY, const2)\n', (2227, 2244), True, 'import numpy as np\n'), ((2367, 2393), 'numpy.power', 'np.power', (['test_rxy', 'const4'], {}), '(test_rxy, const4)\n', (2375, 2393), True, 'import numpy as np\n'), ((2510, 2540), 'numpy.log10', 'np.log10', (['(test_rxy / 1000000.0)'], {}), '(test_rxy / 1000000.0)\n', (2518, 2540), True, 'import numpy as np\n'), ((2533, 2556), 'numpy.log10', 'np.log10', (['test_rho_gmid'], {}), '(test_rho_gmid)\n', (2541, 2556), True, 'import numpy as np\n'), ((2783, 2813), 'numpy.log10', 'np.log10', (['(test_rxy / 1000000.0)'], {}), '(test_rxy / 1000000.0)\n', (2791, 2813), True, 'import numpy as np\n'), ((2806, 2829), 'numpy.log10', 'np.log10', (['test_rho_gmid'], {}), '(test_rho_gmid)\n', (2814, 2829), True, 'import numpy as np\n'), ((3066, 3096), 'numpy.log10', 'np.log10', (['(test_rxy / 1000000.0)'], {}), '(test_rxy / 1000000.0)\n', (3074, 3096), True, 'import numpy as np\n'), ((3089, 3112), 'numpy.log10', 'np.log10', (['test_rho_gmid'], {}), '(test_rho_gmid)\n', (3097, 3112), True, 'import numpy as np\n'), ((3331, 3360), 'numpy.log10', 'np.log10', (['(TESTRXY / 1000000.0)'], {}), '(TESTRXY / 1000000.0)\n', (3339, 3360), True, 'import numpy as np\n'), ((3353, 3380), 'numpy.log10', 'np.log10', (['(TESTZ / 1000000.0)'], {}), '(TESTZ / 1000000.0)\n', (3361, 3380), True, 'import numpy as np\n'), ((3373, 3393), 'numpy.log10', 'np.log10', (['test_rho_g'], {}), '(test_rho_g)\n', (3381, 3393), True, 'import numpy as np\n'), ((3583, 3612), 'numpy.log10', 'np.log10', (['(TESTRXY / 1000000.0)'], {}), '(TESTRXY / 1000000.0)\n', (3591, 3612), True, 'import numpy as np\n'), ((3605, 3632), 'numpy.log10', 'np.log10', (['(TESTZ / 1000000.0)'], {}), '(TESTZ / 1000000.0)\n', (3613, 3632), True, 'import numpy as np\n'), ((3625, 3645), 'numpy.log10', 'np.log10', (['test_rho_g'], {}), '(test_rho_g)\n', (3633, 3645), True, 'import numpy as np\n'), ((3835, 3864), 'numpy.log10', 'np.log10', (['(TESTRXY / 1000000.0)'], {}), '(TESTRXY / 1000000.0)\n', (3843, 3864), True, 'import numpy as np\n'), ((3857, 3884), 'numpy.log10', 'np.log10', (['(TESTZ / 1000000.0)'], {}), '(TESTZ / 1000000.0)\n', (3865, 3884), True, 'import numpy as np\n'), ((3877, 3897), 'numpy.log10', 'np.log10', (['test_rho_g'], {}), '(test_rho_g)\n', (3885, 3897), True, 'import numpy as np\n'), ((2285, 2310), 'numpy.power', 'np.power', (['TESTRXY', 'const4'], {}), '(TESTRXY, const4)\n', (2293, 2310), True, 'import numpy as np\n'), ((2318, 2347), 'numpy.power', 'np.power', (['(TESTZ / test_z_s)', '(2)'], {}), '(TESTZ / test_z_s, 2)\n', (2326, 2347), True, 'import numpy as np\n')] |
from typing import List, Tuple, Optional
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import cm
import matplotlib.colors as mplcolors
from ramachandran.io import read_residue_torsion_collection_from_file
def get_coordinates_on_reference_map(
phi_psi_angle: Tuple[float, float],
reference_map: np.ndarray) -> Tuple[int, int]:
phi = phi_psi_angle[0]
psi = phi_psi_angle[1]
height = reference_map.shape[0]
width = reference_map.shape[1]
i = int((180 - psi) / 360 * height)
j = int((phi + 180) / 360 * width)
# If i or j == resolution, adjust it.
if i == height:
i = height - 1
if j == width:
j = width - 1
return (i, j)
def create_ramachandran_plot(phi_psi_angles: List[Tuple[float, float]],
plot_file_path: str,
reference_map: Optional[np.ndarray] = None,
cmap: Optional[mplcolors.ListedColormap] = None,
protein_name: Optional[str] = None,
rendering_interpolation: bool = True) -> None:
phi_psi_angles_numpy = np.array(phi_psi_angles)
x_numpy = phi_psi_angles_numpy[:, 0]
y_numpy = phi_psi_angles_numpy[:, 1]
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
if protein_name is not None:
ax.set_title(protein_name, fontsize=24)
interpolation = None
if rendering_interpolation is True:
interpolation = "bilinear"
if reference_map is not None:
percentile_1 = np.percentile(reference_map, 60)
percentile_2 = np.percentile(reference_map, 90)
ax.imshow(np.rot90(reference_map),
interpolation=interpolation,
cmap=cmap,
norm=mplcolors.BoundaryNorm(
boundaries=[0, percentile_1, percentile_2, 1],
ncolors=cmap.N),
origin="upper",
extent=(-180, 180, -180, 180))
# Find outliers
outliers_idx = []
for i, phi_psi_angle in enumerate(phi_psi_angles):
map_i, map_j = get_coordinates_on_reference_map(
phi_psi_angle=phi_psi_angle,
reference_map=np.rot90(reference_map))
if np.rot90(reference_map)[map_i, map_j] < percentile_1:
outliers_idx.append(i)
x_outliers_numpy = x_numpy[outliers_idx]
y_outliers_numpy = y_numpy[outliers_idx]
x_numpy = np.delete(x_numpy, outliers_idx)
y_numpy = np.delete(y_numpy, outliers_idx)
ax.scatter(x_outliers_numpy,
y_outliers_numpy,
s=20,
color="red",
edgecolors="black")
ax.scatter(x_numpy, y_numpy, s=20, color="blue", edgecolors="black")
ax.set_xlim(-180, 180)
ax.set_ylim(-180, 180)
ax.xaxis.set_major_locator(ticker.MultipleLocator(45))
ax.yaxis.set_major_locator(ticker.MultipleLocator(45))
ax.xaxis.set_tick_params(labelsize=12)
ax.yaxis.set_tick_params(labelsize=12)
ax.plot([-180, 180], [0, 0], "--", linewidth=0.5, color="black")
ax.plot([0, 0], [-180, 180], "--", linewidth=0.5, color="black")
ax.set_xlabel(r"${\phi}$", fontsize=18, fontweight="bold")
ax.set_ylabel(r"${\psi}$", fontsize=18, fontweight="bold")
fig.savefig(plot_file_path, format="svg", dpi=600, bbox_inches="tight")
plt.close()
return
def create_ramachandran_plots_from_file(
file_path: str,
save_dir_path: str,
# reference_map_type: Optional[str] = "unsmoothed",
protein_name: Optional[str] = None,
rendering_interpolation: bool = False) -> None:
if not os.path.exists(save_dir_path):
os.makedirs(save_dir_path)
residue_torsion_collection = read_residue_torsion_collection_from_file(
file_path=file_path)
phi_psi_angles_general = residue_torsion_collection.collect_torsion_angles_general(
)
phi_psi_angles_gly = residue_torsion_collection.collect_torsion_angles_gly(
)
phi_psi_angles_pro = residue_torsion_collection.collect_torsion_angles_pro(
)
phi_psi_angles_prepro = residue_torsion_collection.collect_torsion_angles_prepro(
)
phi_psi_angles_list = [
phi_psi_angles_general, phi_psi_angles_gly, phi_psi_angles_pro,
phi_psi_angles_prepro
]
package_dir, filename = os.path.split(__file__)
# Using unsmoothed probability.npz is problematic because
# many probabilities are exactly zeros and thus the many percentiles are exactly zeros.
# Plotting these zero values is very problematic.
# Gaussian density is fine because none of the probability density values are exactly zero.
# if reference_map_type == "unsmoothed":
# npz_file_path = os.path.join(package_dir, "data", "probability.npz")
# npz_file = np.load(npz_file_path)
# elif reference_map_type == "smoothed":
# npz_file_path = os.path.join(package_dir, "data", "gaussian_density.npz")
# npz_file = np.load(npz_file_path)
# else:
# raise RuntimeError("Unsupported reference map type.")
npz_file_path = os.path.join(package_dir, "data", "gaussian_density.npz")
npz_file = np.load(npz_file_path)
reference_map_general = npz_file["general"]
reference_map_gly = npz_file["gly"]
reference_map_pro = npz_file["pro"]
reference_map_prepro = npz_file["prepro"]
reference_map_list = [
reference_map_general, reference_map_gly, reference_map_pro,
reference_map_prepro
]
# Using Erdős Gábor's cmaps.
# https://github.com/gerdos/PyRAMA/blob/301df17e5f2c32544b34321c4f8b0254697183ce/pyrama/config.py
cmap_general = mplcolors.ListedColormap(['#FFFFFF', '#B3E8FF', '#7FD9FF'])
cmap_gly = mplcolors.ListedColormap(['#FFFFFF', '#FFE8C5', '#FFCC7F'])
cmap_pro = mplcolors.ListedColormap(['#FFFFFF', '#D0FFC5', '#7FFF8C'])
cmap_prepro = mplcolors.ListedColormap(['#FFFFFF', '#B3E8FF', '#7FD9FF'])
cmap_list = [cmap_general, cmap_gly, cmap_pro, cmap_prepro]
filename_list = ["general.svg", "gly.svg", "pro.svg", "prepro.svg"]
file_path_list = [
os.path.join(save_dir_path, filename) for filename in filename_list
]
for phi_psi_angles, reference_map, cmap, file_path in zip(
phi_psi_angles_list, reference_map_list, cmap_list,
file_path_list):
create_ramachandran_plot(
phi_psi_angles=phi_psi_angles,
reference_map=reference_map,
cmap=cmap,
plot_file_path=file_path,
rendering_interpolation=rendering_interpolation,
protein_name=protein_name)
return
| [
"os.path.exists",
"ramachandran.io.read_residue_torsion_collection_from_file",
"os.makedirs",
"matplotlib.use",
"numpy.delete",
"matplotlib.ticker.MultipleLocator",
"os.path.join",
"os.path.split",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.colors.ListedC... | [((88, 109), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (102, 109), False, 'import matplotlib\n'), ((1247, 1271), 'numpy.array', 'np.array', (['phi_psi_angles'], {}), '(phi_psi_angles)\n', (1255, 1271), True, 'import numpy as np\n'), ((1365, 1393), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1375, 1393), True, 'import matplotlib.pyplot as plt\n'), ((3548, 3559), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3557, 3559), True, 'import matplotlib.pyplot as plt\n'), ((3939, 4001), 'ramachandran.io.read_residue_torsion_collection_from_file', 'read_residue_torsion_collection_from_file', ([], {'file_path': 'file_path'}), '(file_path=file_path)\n', (3980, 4001), False, 'from ramachandran.io import read_residue_torsion_collection_from_file\n'), ((4536, 4559), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (4549, 4559), False, 'import os\n'), ((5304, 5361), 'os.path.join', 'os.path.join', (['package_dir', '"""data"""', '"""gaussian_density.npz"""'], {}), "(package_dir, 'data', 'gaussian_density.npz')\n", (5316, 5361), False, 'import os\n'), ((5377, 5399), 'numpy.load', 'np.load', (['npz_file_path'], {}), '(npz_file_path)\n', (5384, 5399), True, 'import numpy as np\n'), ((5862, 5921), 'matplotlib.colors.ListedColormap', 'mplcolors.ListedColormap', (["['#FFFFFF', '#B3E8FF', '#7FD9FF']"], {}), "(['#FFFFFF', '#B3E8FF', '#7FD9FF'])\n", (5886, 5921), True, 'import matplotlib.colors as mplcolors\n'), ((5937, 5996), 'matplotlib.colors.ListedColormap', 'mplcolors.ListedColormap', (["['#FFFFFF', '#FFE8C5', '#FFCC7F']"], {}), "(['#FFFFFF', '#FFE8C5', '#FFCC7F'])\n", (5961, 5996), True, 'import matplotlib.colors as mplcolors\n'), ((6012, 6071), 'matplotlib.colors.ListedColormap', 'mplcolors.ListedColormap', (["['#FFFFFF', '#D0FFC5', '#7FFF8C']"], {}), "(['#FFFFFF', '#D0FFC5', '#7FFF8C'])\n", (6036, 6071), True, 'import matplotlib.colors as mplcolors\n'), ((6090, 6149), 'matplotlib.colors.ListedColormap', 'mplcolors.ListedColormap', (["['#FFFFFF', '#B3E8FF', '#7FD9FF']"], {}), "(['#FFFFFF', '#B3E8FF', '#7FD9FF'])\n", (6114, 6149), True, 'import matplotlib.colors as mplcolors\n'), ((1666, 1698), 'numpy.percentile', 'np.percentile', (['reference_map', '(60)'], {}), '(reference_map, 60)\n', (1679, 1698), True, 'import numpy as np\n'), ((1722, 1754), 'numpy.percentile', 'np.percentile', (['reference_map', '(90)'], {}), '(reference_map, 90)\n', (1735, 1754), True, 'import numpy as np\n'), ((2611, 2643), 'numpy.delete', 'np.delete', (['x_numpy', 'outliers_idx'], {}), '(x_numpy, outliers_idx)\n', (2620, 2643), True, 'import numpy as np\n'), ((2662, 2694), 'numpy.delete', 'np.delete', (['y_numpy', 'outliers_idx'], {}), '(y_numpy, outliers_idx)\n', (2671, 2694), True, 'import numpy as np\n'), ((3027, 3053), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(45)'], {}), '(45)\n', (3049, 3053), True, 'import matplotlib.ticker as ticker\n'), ((3086, 3112), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', (['(45)'], {}), '(45)\n', (3108, 3112), True, 'import matplotlib.ticker as ticker\n'), ((3839, 3868), 'os.path.exists', 'os.path.exists', (['save_dir_path'], {}), '(save_dir_path)\n', (3853, 3868), False, 'import os\n'), ((3878, 3904), 'os.makedirs', 'os.makedirs', (['save_dir_path'], {}), '(save_dir_path)\n', (3889, 3904), False, 'import os\n'), ((6319, 6356), 'os.path.join', 'os.path.join', (['save_dir_path', 'filename'], {}), '(save_dir_path, filename)\n', (6331, 6356), False, 'import os\n'), ((1774, 1797), 'numpy.rot90', 'np.rot90', (['reference_map'], {}), '(reference_map)\n', (1782, 1797), True, 'import numpy as np\n'), ((1898, 1987), 'matplotlib.colors.BoundaryNorm', 'mplcolors.BoundaryNorm', ([], {'boundaries': '[0, percentile_1, percentile_2, 1]', 'ncolors': 'cmap.N'}), '(boundaries=[0, percentile_1, percentile_2, 1],\n ncolors=cmap.N)\n', (1920, 1987), True, 'import matplotlib.colors as mplcolors\n'), ((2360, 2383), 'numpy.rot90', 'np.rot90', (['reference_map'], {}), '(reference_map)\n', (2368, 2383), True, 'import numpy as np\n'), ((2400, 2423), 'numpy.rot90', 'np.rot90', (['reference_map'], {}), '(reference_map)\n', (2408, 2423), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue May 26 07:49:48 2020
@author: X202722
"""
def make_parameter_BPT_fit (T_sim, T_exp, method, na, M):
import numpy as np
# fit if possible BPT guess to
#nannoolal
if method == 0:
a = 0.6583
b= 1.6868
c= 84.3395
value = (T_sim-c)*(np.power(na,a)+b)
OffsetP = (T_exp-c)*(np.power(na,a)+b)-value
# OffsetP = T_exp-T_sim
elif method == 1:
#champion
OffsetP = T_exp-T_sim
elif method == 2:
#stein and brown
A=-94.84
B=1.5577
C=-0.0007705
A2=282.7
B2= 0.5209
if (T_sim-A2)/(1-B2) < 700:
a = C
b = B
c = (A-T_sim)
# value = (-b-np.sqrt(np.power(b,2)-4*a*c))/(2*a)
value = (-b+np.sqrt(np.power(b,2)-4*a*c))/(2*a)
a = A+B*value+C*np.power(value,2)-T_exp
b = B + 2 * C * value
c = C
OffsetP =-1/2*(2*C*value + B + np.sqrt(B**2 - 4*A*C + 4*C*T_exp))/C
# -1/2*(2*C*value + B + sqrt(B^2 - 4*A*C + 4*C*T_exp))/C, O == -1/2*(2*C*value + B - sqrt(B^2 - 4*A*C + 4*C*T_exp))/C]
# OffsetP = -(A - (A**2 + 2*A + 4*C*T_exp + 1)**(1/2) + 2*C*value + 1)/(2*C)
else:
# value = (T_sim-A2)/(1-B2)
OffsetP = (T_exp-A2)/(1-B2)-T_sim
elif method == 3:
#devottta
OffsetP = T_exp-T_sim
elif method == 4:
#joback
OffsetP = T_exp-T_sim
elif method == 5:
#GVS
OffsetP = T_exp-T_sim
elif method == 6:
#gani
k = 204.359
OffsetP = np.exp(T_exp/k) -np.exp(T_sim/k)
# OffsetP = T_exp-T_sim
elif method == 7:
#marrero
k = -0.366
if M == []:
OffsetP = T_exp-T_sim
else:
OffsetP = (T_exp-T_sim)/np.power(M,k)
elif method == 8:
#marrero simple groups
OffsetP = T_exp-T_sim
elif method == 9:
#marrero simple groups, simple approach
OffsetP = T_exp-T_sim
return OffsetP
def useOffset (OffsetP, T_sim, method, na, M):
import numpy as np
# use fit to create new value
#nannoolal
if method == 0:
a = 0.6583
b= 1.6868
c= 84.3395
value = (T_sim-c)*(na**a+b)
Topt = (value + OffsetP)/(na**a+b)+c
# Topt = T_sim + OffsetP
elif method == 1:
#champion
Topt = T_sim + OffsetP
elif method == 2:
# stein and brown
A=-94.84
B=0.5577
C=-0.0007705
A2=282.7
B2= 0.5209
bb = (B+1)
if (T_sim-A2)/(1-B2) < 700:
a = C
b = bb
c = (A-T_sim)
# value = (-b-np.sqrt(np.power(b,2)-4*a*c))/(2*a)
value2 = (-b+np.sqrt(np.power(b,2)-4*a*c))/(2*a)
Topt = A + bb*(value2+OffsetP) + C * (value2+OffsetP)**2
else:
value = (T_sim-A2)/(1-B2)
Topt = (T_sim+OffsetP)*(1-B2)+A2
elif method == 3:
#devottta
Topt = T_sim + OffsetP
elif method == 4:
#joback
Topt = T_sim + OffsetP
elif method == 5:
#GVS
Topt = T_sim + OffsetP
elif method == 6:
#gani
k = 204.359
value = np.exp(T_sim/k)
if value + OffsetP<0:
print(value)
print(OffsetP)
Topt = k * np.log(value + OffsetP)
# Topt = T_sim + OffsetP
elif method == 7:
#marrero
k = -0.366
b = 149.84
if M == []:
Topt = T_sim + OffsetP
else:
value = (T_sim-b)/(M**(k))
Topt = (value+OffsetP)*M**k+b
# Topt = T_sim + OffsetP
elif method == 8:
#marrero simple groups
Topt = T_sim + OffsetP
elif method == 9:
#marrero simple groups, simple approach
Topt = T_sim + OffsetP
return Topt
def boiling_point_wrapper (res, meta_real):
#['T', 'AZ', 'TC', 'PC', 'BPT', 'MPT', 'HFUS']
#loop through columns and compare available data with simulated ones
# choose best fit and calculate best fit
import pandas as pd
import numpy as np
#create variables
T_fit = np.zeros((10,len(res)))
T_fit_list = []
names = []
M = []
#run through all interesting molecules
for p in range(len(res)):
# 10 methods to look at
if not np.isnan(meta_real.iloc[p,0]):
# get experimental value
T_exp = res[p].iloc[0,0]+273
#grab auxiliary values
na = res[p].iloc[0,10]
M = res[p].iloc[0,11]
#loop through methods
for i in range(10):
#get simulated values
T_sim = res[p].iloc[i*3+1,0]
method = i
# check if there is a simulated value
if np.isnan(T_sim):
offset = np.nan
T_fit[i,:] = np.nan
else:
#if yes calculate offset
offset = make_parameter_BPT_fit(T_sim, T_exp, method, na, M)
#offset = 0
#then use offset to calculate new values
for k in range(len(res)):
T_sim_fit = res[k].iloc[i*3+1,0]
T_exp_fit = res[k].iloc[0,0]+273
na_fit = res[k].iloc[0,10]
M_fit = res[k].iloc[0,11]
#check if offset is nan
if np.isnan(T_sim_fit):
T_fit[i,k] = np.nan
else:
# fit value with calculated offset
T_fit[i,k] = useOffset(offset,T_sim_fit, method, na_fit,M_fit, T_exp_fit)
T_fit_list.append(pd.DataFrame(T_fit))
names.append(meta_real.index[p])
T_fit = np.zeros((10,len(res)))
return T_fit_list, names
def BPT_sim_data(res_clean, meta_real):
# calculate b
import numpy as np
import pandas as pd
T_fit_list = []
T_sim = np.zeros(10)
for p in range(len(res_clean)):
# 10 methods to look at
if not np.isnan(meta_real.iloc[p,0]):
for i in range(10):
T_sim[i] = res_clean[p].iloc[i*3+1,0]
T_fit_list.append(T_sim)
T_sim = np.zeros(10)
return T_fit_list
def prepare_Violin_plot(data, title, ax):
# print violinplot
import numpy as np
import matplotlib.pyplot as plt
data_plot_T_pure = []
#remove nan for boxplot
for T in data.T.columns:
T_sim_flat = data.T[T].to_numpy().flatten()
T_sim_flat = T_sim_flat[~np.isnan(T_sim_flat)]
data_plot_T_pure.append(T_sim_flat)
ax.set_title(title)
ax.boxplot(data_plot_T_pure)
return ax
| [
"numpy.sqrt",
"numpy.power",
"numpy.log",
"numpy.exp",
"numpy.zeros",
"numpy.isnan",
"pandas.DataFrame"
] | [((6738, 6750), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (6746, 6750), True, 'import numpy as np\n'), ((7007, 7019), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (7015, 7019), True, 'import numpy as np\n'), ((4856, 4886), 'numpy.isnan', 'np.isnan', (['meta_real.iloc[p, 0]'], {}), '(meta_real.iloc[p, 0])\n', (4864, 4886), True, 'import numpy as np\n'), ((6837, 6867), 'numpy.isnan', 'np.isnan', (['meta_real.iloc[p, 0]'], {}), '(meta_real.iloc[p, 0])\n', (6845, 6867), True, 'import numpy as np\n'), ((349, 364), 'numpy.power', 'np.power', (['na', 'a'], {}), '(na, a)\n', (357, 364), True, 'import numpy as np\n'), ((5376, 5391), 'numpy.isnan', 'np.isnan', (['T_sim'], {}), '(T_sim)\n', (5384, 5391), True, 'import numpy as np\n'), ((6445, 6464), 'pandas.DataFrame', 'pd.DataFrame', (['T_fit'], {}), '(T_fit)\n', (6457, 6464), True, 'import pandas as pd\n'), ((7368, 7388), 'numpy.isnan', 'np.isnan', (['T_sim_flat'], {}), '(T_sim_flat)\n', (7376, 7388), True, 'import numpy as np\n'), ((407, 422), 'numpy.power', 'np.power', (['na', 'a'], {}), '(na, a)\n', (415, 422), True, 'import numpy as np\n'), ((6115, 6134), 'numpy.isnan', 'np.isnan', (['T_sim_fit'], {}), '(T_sim_fit)\n', (6123, 6134), True, 'import numpy as np\n'), ((991, 1009), 'numpy.power', 'np.power', (['value', '(2)'], {}), '(value, 2)\n', (999, 1009), True, 'import numpy as np\n'), ((1139, 1182), 'numpy.sqrt', 'np.sqrt', (['(B ** 2 - 4 * A * C + 4 * C * T_exp)'], {}), '(B ** 2 - 4 * A * C + 4 * C * T_exp)\n', (1146, 1182), True, 'import numpy as np\n'), ((3646, 3663), 'numpy.exp', 'np.exp', (['(T_sim / k)'], {}), '(T_sim / k)\n', (3652, 3663), True, 'import numpy as np\n'), ((920, 934), 'numpy.power', 'np.power', (['b', '(2)'], {}), '(b, 2)\n', (928, 934), True, 'import numpy as np\n'), ((1819, 1836), 'numpy.exp', 'np.exp', (['(T_exp / k)'], {}), '(T_exp / k)\n', (1825, 1836), True, 'import numpy as np\n'), ((1836, 1853), 'numpy.exp', 'np.exp', (['(T_sim / k)'], {}), '(T_sim / k)\n', (1842, 1853), True, 'import numpy as np\n'), ((3115, 3129), 'numpy.power', 'np.power', (['b', '(2)'], {}), '(b, 2)\n', (3123, 3129), True, 'import numpy as np\n'), ((3781, 3804), 'numpy.log', 'np.log', (['(value + OffsetP)'], {}), '(value + OffsetP)\n', (3787, 3804), True, 'import numpy as np\n'), ((2054, 2068), 'numpy.power', 'np.power', (['M', 'k'], {}), '(M, k)\n', (2062, 2068), True, 'import numpy as np\n')] |
import numpy
import pytest
from testfixtures import LogCapture
from matchms.filtering import add_losses
from .builder_Spectrum import SpectrumBuilder
@pytest.mark.parametrize("mz, loss_mz_to, expected_mz, expected_intensities", [
[numpy.array([100, 150, 200, 300], dtype="float"), 1000, numpy.array([145, 245, 295, 345], "float"), numpy.array([1000, 100, 200, 700], "float")],
[numpy.array([100, 150, 200, 450], dtype="float"), 1000, numpy.array([245, 295, 345], "float"), numpy.array([100, 200, 700], "float")],
[numpy.array([100, 150, 200, 300], dtype="float"), 250, numpy.array([145, 245], "float"), numpy.array([1000, 100], "float")]
])
def test_add_losses_parameterized(mz, loss_mz_to, expected_mz, expected_intensities):
intensities = numpy.array([700, 200, 100, 1000], "float")
metadata = {"precursor_mz": 445.0}
spectrum_in = SpectrumBuilder().with_mz(mz).with_intensities(
intensities).with_metadata(metadata).build()
spectrum = add_losses(spectrum_in, loss_mz_to=loss_mz_to)
assert numpy.allclose(spectrum.losses.mz, expected_mz), "Expected different loss m/z."
assert numpy.allclose(spectrum.losses.intensities, expected_intensities), "Expected different intensities."
@pytest.mark.parametrize("mz, intensities", [
[numpy.array([100, 150, 200, 300], dtype="float"), numpy.array([700, 200, 100, 1000], dtype="float")],
[[], []]
])
def test_add_losses_without_precursor_mz_parameterized(mz, intensities):
spectrum_in = SpectrumBuilder().with_mz(mz).with_intensities(intensities).build()
spectrum = add_losses(spectrum_in)
with LogCapture() as log:
spectrum = add_losses(spectrum_in)
assert spectrum == spectrum_in and spectrum is not spectrum_in
log.check(
("matchms", "WARNING",
"No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
)
def test_add_losses_with_precursor_mz_wrong_type():
"""Test if correct assert error is raised for precursor-mz as string."""
mz = numpy.array([100, 150, 200, 300], dtype="float")
intensities = numpy.array([700, 200, 100, 1000], "float")
metadata = {"precursor_mz": "445.0"}
spectrum_in = SpectrumBuilder().with_mz(mz).with_intensities(
intensities).with_metadata(metadata).build()
with pytest.raises(AssertionError) as msg:
_ = add_losses(spectrum_in)
assert "Expected 'precursor_mz' to be a scalar number." in str(msg.value)
def test_add_losses_with_input_none():
"""Test if input spectrum is None."""
spectrum_in = None
spectrum = add_losses(spectrum_in)
assert spectrum is None
| [
"numpy.allclose",
"matchms.filtering.add_losses",
"numpy.array",
"pytest.raises",
"testfixtures.LogCapture"
] | [((759, 802), 'numpy.array', 'numpy.array', (['[700, 200, 100, 1000]', '"""float"""'], {}), "([700, 200, 100, 1000], 'float')\n", (770, 802), False, 'import numpy\n'), ((977, 1023), 'matchms.filtering.add_losses', 'add_losses', (['spectrum_in'], {'loss_mz_to': 'loss_mz_to'}), '(spectrum_in, loss_mz_to=loss_mz_to)\n', (987, 1023), False, 'from matchms.filtering import add_losses\n'), ((1036, 1083), 'numpy.allclose', 'numpy.allclose', (['spectrum.losses.mz', 'expected_mz'], {}), '(spectrum.losses.mz, expected_mz)\n', (1050, 1083), False, 'import numpy\n'), ((1127, 1192), 'numpy.allclose', 'numpy.allclose', (['spectrum.losses.intensities', 'expected_intensities'], {}), '(spectrum.losses.intensities, expected_intensities)\n', (1141, 1192), False, 'import numpy\n'), ((1573, 1596), 'matchms.filtering.add_losses', 'add_losses', (['spectrum_in'], {}), '(spectrum_in)\n', (1583, 1596), False, 'from matchms.filtering import add_losses\n'), ((2017, 2065), 'numpy.array', 'numpy.array', (['[100, 150, 200, 300]'], {'dtype': '"""float"""'}), "([100, 150, 200, 300], dtype='float')\n", (2028, 2065), False, 'import numpy\n'), ((2084, 2127), 'numpy.array', 'numpy.array', (['[700, 200, 100, 1000]', '"""float"""'], {}), "([700, 200, 100, 1000], 'float')\n", (2095, 2127), False, 'import numpy\n'), ((2572, 2595), 'matchms.filtering.add_losses', 'add_losses', (['spectrum_in'], {}), '(spectrum_in)\n', (2582, 2595), False, 'from matchms.filtering import add_losses\n'), ((1607, 1619), 'testfixtures.LogCapture', 'LogCapture', ([], {}), '()\n', (1617, 1619), False, 'from testfixtures import LogCapture\n'), ((1647, 1670), 'matchms.filtering.add_losses', 'add_losses', (['spectrum_in'], {}), '(spectrum_in)\n', (1657, 1670), False, 'from matchms.filtering import add_losses\n'), ((2298, 2327), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2311, 2327), False, 'import pytest\n'), ((2348, 2371), 'matchms.filtering.add_losses', 'add_losses', (['spectrum_in'], {}), '(spectrum_in)\n', (2358, 2371), False, 'from matchms.filtering import add_losses\n'), ((237, 285), 'numpy.array', 'numpy.array', (['[100, 150, 200, 300]'], {'dtype': '"""float"""'}), "([100, 150, 200, 300], dtype='float')\n", (248, 285), False, 'import numpy\n'), ((293, 335), 'numpy.array', 'numpy.array', (['[145, 245, 295, 345]', '"""float"""'], {}), "([145, 245, 295, 345], 'float')\n", (304, 335), False, 'import numpy\n'), ((337, 380), 'numpy.array', 'numpy.array', (['[1000, 100, 200, 700]', '"""float"""'], {}), "([1000, 100, 200, 700], 'float')\n", (348, 380), False, 'import numpy\n'), ((388, 436), 'numpy.array', 'numpy.array', (['[100, 150, 200, 450]'], {'dtype': '"""float"""'}), "([100, 150, 200, 450], dtype='float')\n", (399, 436), False, 'import numpy\n'), ((444, 481), 'numpy.array', 'numpy.array', (['[245, 295, 345]', '"""float"""'], {}), "([245, 295, 345], 'float')\n", (455, 481), False, 'import numpy\n'), ((483, 520), 'numpy.array', 'numpy.array', (['[100, 200, 700]', '"""float"""'], {}), "([100, 200, 700], 'float')\n", (494, 520), False, 'import numpy\n'), ((528, 576), 'numpy.array', 'numpy.array', (['[100, 150, 200, 300]'], {'dtype': '"""float"""'}), "([100, 150, 200, 300], dtype='float')\n", (539, 576), False, 'import numpy\n'), ((583, 615), 'numpy.array', 'numpy.array', (['[145, 245]', '"""float"""'], {}), "([145, 245], 'float')\n", (594, 615), False, 'import numpy\n'), ((617, 650), 'numpy.array', 'numpy.array', (['[1000, 100]', '"""float"""'], {}), "([1000, 100], 'float')\n", (628, 650), False, 'import numpy\n'), ((1281, 1329), 'numpy.array', 'numpy.array', (['[100, 150, 200, 300]'], {'dtype': '"""float"""'}), "([100, 150, 200, 300], dtype='float')\n", (1292, 1329), False, 'import numpy\n'), ((1331, 1380), 'numpy.array', 'numpy.array', (['[700, 200, 100, 1000]'], {'dtype': '"""float"""'}), "([700, 200, 100, 1000], dtype='float')\n", (1342, 1380), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
import imageio
import matplotlib.pyplot as plt
import numpy
img = imageio.imread('Z:/DRPI/questoes_aula/sat_map3.tif')
dim = img.shape
col = dim[1]
lin = dim[0]
def histogram(img, s, rgb):
"""
Função que desenha os histogramas
:param img: A imagem
:param s: A legenda
:param rgb: A cor pretendida
:return: O histograma
"""
hs = plt.hist(img.ravel(), bins = 256, range=(0, 255), color= rgb, histtype = 'step')
helc = hs[0]
plt.title(s, fontsize = 10)
return helc
def elc_no_saturation(img):
"""
Função que devolve o elc da imagem sem saturação
:param img: a imagem a utilizar
:return: O elc da imagem
"""
min_img = abs(float(numpy.min(img)))
max_img = abs(float(numpy.max(img)))
lower_threshhold = 0
higher_threshhold = 255
elc_result = (img - min_img) * ((higher_threshhold - lower_threshhold) / (max_img - min_img)) + lower_threshhold
return elc_result
def elc_saturation(img):
"""
Função que devolve o elc com saturação
:param img: a imagem a utilizar
:return: O elc da imagem com saturação
"""
h, r = numpy.histogram(img, bins=256, range=(0, 256))
c = 0
d = 255
saturation = 2.0 / 100
p = h.astype(float) / (lin * col)
pa = numpy.cumsum(p)
a1 = float(numpy.count_nonzero(pa <= saturation / 2) - 1)
b1 = float(numpy.count_nonzero(pa <= (1 - saturation / 2)) - 1)
elc_sat_final = ((img.astype(float) - a1) * ((d - c) / (b1 - a1)) + c).clip(0, 255)
return elc_sat_final
def equalization(img):
"""
Função que equaliza o histograma
:param img: a imagem a aplicar a equalização
:return: O histograma equalizado
"""
h,r = numpy.histogram(img, bins=256, range=(0, 256))
p = h/float(dim[0] * dim[1])
pa = numpy.cumsum(p)
pa_norm = pa*255
equ = numpy.zeros((lin,col, dim[2]))
for i in range(len(pa_norm)):
equ = equ+(img==i)*int(pa_norm[i])
return equ
def equalization_2(img):
"""
Função que equaliza o histograma
:param img: a imagem a aplicar a equalização
:return: O histograma equalizado
"""
h,r = numpy.histogram(img, bins=256, range=(0, 256))
p = h/float(lin * col)
pa = numpy.cumsum(p)
pa_norm = pa*255
eq = numpy.zeros((lin,col))
for i in range(len(pa_norm)):
eq = eq+(img==i)*int(pa_norm[i])
return numpy.cumsum(h), eq
if __name__ == '__main__':
# ------------------ Exercicio 1 ----------------------------- #
# Plot dos Histogramas ELC
plt.interactive(False)
plt.figure(figsize=(20, 3))
plt.subplot(131);
histogram(elc_no_saturation(img[:, :, 0]), 'Histograma ELC de r', [1, 0, 0])
plt.subplot(132);
histogram(elc_no_saturation(img[:, :, 1]), 'Histograma ELC de g', [0, 1, 0])
plt.subplot(133);
histogram(elc_no_saturation(img[:, :, 2]), 'Histograma ELC de b', [0, 0, 1])
# Plot dos histogramas ELC saturados
plt.figure(figsize=(20, 3))
plt.subplot(131);
histogram(elc_saturation(img[:, :, 0]), 'Histograma ELC de sat. r', [1, 0, 0])
plt.subplot(132);
histogram(elc_saturation(img[:, :, 1]), 'Histograma ELC de sat. g', [0, 1, 0])
plt.subplot(133);
histogram(elc_saturation(img[:, :, 2]), 'Histograma ELC de sat. b', [0, 0, 1])
# Plot das imagens
plt.figure(figsize=(20, 3))
plt.subplot(131);
plt.imshow(numpy.uint8(img))
plt.title('Inicial');
plt.axis('off')
plt.subplot(132);
plt.imshow(numpy.uint8(elc_no_saturation(img)))
plt.title('ELC sem saturacao');
plt.axis('off')
plt.subplot(133);plt.imshow(numpy.uint8(elc_saturation(img)), 'gray')
plt.title('ELC com 0.01% de saturação bilateral');
plt.axis('off')
plt.show()
# ------------------ Exercicio 2 ----------------------------- #
# Plot dos histogramas não equalizados
plt.figure(figsize=(20, 3))
plt.subplot(141);histogram(img[:,:,0], 'Histograma de r', [1,0,0])
plt.subplot(142);histogram(img[:,:,1], 'Histograma de g', [0,1,0])
plt.subplot(143);histogram(img[:,:,2], 'Histograma de b', [0,0,1])
# Plot dos histogramas acumulados
plt.figure(figsize=(20, 3))
plt.subplot(141);plt.plot(equalization_2(img[:,:,0])[0], color = 'r')
plt.title('acumulado hist de r')
plt.subplot(142);plt.plot(equalization_2(img[:,:,1])[0], color = 'g')
plt.title('acumulado hist de g')
plt.subplot(143);plt.plot(equalization_2(img[:,:,2])[0], color = 'b')
plt.title('acumulado hist de b')
# Plot dos histogramas equalizados
plt.figure(figsize=(20, 3))
plt.subplot(141);red = histogram(equalization_2(img[:,:,0])[1],'Hist equalizado r',[1,0,0])
plt.subplot(142);green = histogram(equalization_2(img[:,:,0])[1],'Hist equalizado g',[0,1,0])
plt.subplot(143);blue = histogram(equalization_2(img[:,:,0])[1],'Hist equalizado b',[0,0,1])
# Plot dos histogramas acumulados a partir do seu equalizado
plt.figure(figsize=(20, 3))
plt.subplot(141);plt.plot(numpy.cumsum(red), color = 'r')
plt.title('acumulado do equal. de r')
plt.subplot(142);plt.plot(numpy.cumsum(green), color = 'g')
plt.title('acumulado do equal. de g')
plt.subplot(143);plt.plot(numpy.cumsum(blue), color = 'b')
plt.title('acumulado do equal. de b')
# Plot das imagens
plt.figure(figsize=(20, 2))
plt.subplot(141); plt.imshow(numpy.uint8(img))
plt.title('Inicial'); plt.axis('off')
plt.subplot(142); plt.imshow(numpy.uint8(equalization(img)), 'gray')
plt.title('Imagem equalizada'); plt.axis('off')
plt.show() | [
"numpy.uint8",
"numpy.histogram",
"matplotlib.pyplot.axis",
"numpy.max",
"numpy.count_nonzero",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.cumsum",
"matplotlib.pyplot.interactive",
"numpy.min",
"imageio.imread",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyp... | [((100, 152), 'imageio.imread', 'imageio.imread', (['"""Z:/DRPI/questoes_aula/sat_map3.tif"""'], {}), "('Z:/DRPI/questoes_aula/sat_map3.tif')\n", (114, 152), False, 'import imageio\n'), ((514, 539), 'matplotlib.pyplot.title', 'plt.title', (['s'], {'fontsize': '(10)'}), '(s, fontsize=10)\n', (523, 539), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1242), 'numpy.histogram', 'numpy.histogram', (['img'], {'bins': '(256)', 'range': '(0, 256)'}), '(img, bins=256, range=(0, 256))\n', (1211, 1242), False, 'import numpy\n'), ((1350, 1365), 'numpy.cumsum', 'numpy.cumsum', (['p'], {}), '(p)\n', (1362, 1365), False, 'import numpy\n'), ((1806, 1852), 'numpy.histogram', 'numpy.histogram', (['img'], {'bins': '(256)', 'range': '(0, 256)'}), '(img, bins=256, range=(0, 256))\n', (1821, 1852), False, 'import numpy\n'), ((1897, 1912), 'numpy.cumsum', 'numpy.cumsum', (['p'], {}), '(p)\n', (1909, 1912), False, 'import numpy\n'), ((1946, 1977), 'numpy.zeros', 'numpy.zeros', (['(lin, col, dim[2])'], {}), '((lin, col, dim[2]))\n', (1957, 1977), False, 'import numpy\n'), ((2261, 2307), 'numpy.histogram', 'numpy.histogram', (['img'], {'bins': '(256)', 'range': '(0, 256)'}), '(img, bins=256, range=(0, 256))\n', (2276, 2307), False, 'import numpy\n'), ((2346, 2361), 'numpy.cumsum', 'numpy.cumsum', (['p'], {}), '(p)\n', (2358, 2361), False, 'import numpy\n'), ((2394, 2417), 'numpy.zeros', 'numpy.zeros', (['(lin, col)'], {}), '((lin, col))\n', (2405, 2417), False, 'import numpy\n'), ((2675, 2697), 'matplotlib.pyplot.interactive', 'plt.interactive', (['(False)'], {}), '(False)\n', (2690, 2697), True, 'import matplotlib.pyplot as plt\n'), ((2703, 2730), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (2713, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2736, 2752), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (2747, 2752), True, 'import matplotlib.pyplot as plt\n'), ((2841, 2857), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (2852, 2857), True, 'import matplotlib.pyplot as plt\n'), ((2946, 2962), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2957, 2962), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3122), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (3105, 3122), True, 'import matplotlib.pyplot as plt\n'), ((3128, 3144), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (3139, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3251), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (3246, 3251), True, 'import matplotlib.pyplot as plt\n'), ((3342, 3358), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (3353, 3358), True, 'import matplotlib.pyplot as plt\n'), ((3475, 3502), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (3485, 3502), True, 'import matplotlib.pyplot as plt\n'), ((3508, 3524), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (3519, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3585), 'matplotlib.pyplot.title', 'plt.title', (['"""Inicial"""'], {}), "('Inicial')\n", (3574, 3585), True, 'import matplotlib.pyplot as plt\n'), ((3592, 3607), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3600, 3607), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3629), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (3624, 3629), True, 'import matplotlib.pyplot as plt\n'), ((3689, 3719), 'matplotlib.pyplot.title', 'plt.title', (['"""ELC sem saturacao"""'], {}), "('ELC sem saturacao')\n", (3698, 3719), True, 'import matplotlib.pyplot as plt\n'), ((3726, 3741), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3734, 3741), True, 'import matplotlib.pyplot as plt\n'), ((3753, 3769), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (3764, 3769), True, 'import matplotlib.pyplot as plt\n'), ((3828, 3877), 'matplotlib.pyplot.title', 'plt.title', (['"""ELC com 0.01% de saturação bilateral"""'], {}), "('ELC com 0.01% de saturação bilateral')\n", (3837, 3877), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3899), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3892, 3899), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3915), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3913, 3915), True, 'import matplotlib.pyplot as plt\n'), ((4047, 4074), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (4057, 4074), True, 'import matplotlib.pyplot as plt\n'), ((4080, 4096), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (4091, 4096), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4168), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (4163, 4168), True, 'import matplotlib.pyplot as plt\n'), ((4225, 4241), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (4236, 4241), True, 'import matplotlib.pyplot as plt\n'), ((4343, 4370), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (4353, 4370), True, 'import matplotlib.pyplot as plt\n'), ((4376, 4392), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (4387, 4392), True, 'import matplotlib.pyplot as plt\n'), ((4451, 4483), 'matplotlib.pyplot.title', 'plt.title', (['"""acumulado hist de r"""'], {}), "('acumulado hist de r')\n", (4460, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4489, 4505), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (4500, 4505), True, 'import matplotlib.pyplot as plt\n'), ((4564, 4596), 'matplotlib.pyplot.title', 'plt.title', (['"""acumulado hist de g"""'], {}), "('acumulado hist de g')\n", (4573, 4596), True, 'import matplotlib.pyplot as plt\n'), ((4602, 4618), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (4613, 4618), True, 'import matplotlib.pyplot as plt\n'), ((4677, 4709), 'matplotlib.pyplot.title', 'plt.title', (['"""acumulado hist de b"""'], {}), "('acumulado hist de b')\n", (4686, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4761, 4788), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (4771, 4788), True, 'import matplotlib.pyplot as plt\n'), ((4794, 4810), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (4805, 4810), True, 'import matplotlib.pyplot as plt\n'), ((4891, 4907), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (4902, 4907), True, 'import matplotlib.pyplot as plt\n'), ((4990, 5006), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (5001, 5006), True, 'import matplotlib.pyplot as plt\n'), ((5164, 5191), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 3)'}), '(figsize=(20, 3))\n', (5174, 5191), True, 'import matplotlib.pyplot as plt\n'), ((5197, 5213), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (5208, 5213), True, 'import matplotlib.pyplot as plt\n'), ((5260, 5297), 'matplotlib.pyplot.title', 'plt.title', (['"""acumulado do equal. de r"""'], {}), "('acumulado do equal. de r')\n", (5269, 5297), True, 'import matplotlib.pyplot as plt\n'), ((5303, 5319), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (5314, 5319), True, 'import matplotlib.pyplot as plt\n'), ((5368, 5405), 'matplotlib.pyplot.title', 'plt.title', (['"""acumulado do equal. de g"""'], {}), "('acumulado do equal. de g')\n", (5377, 5405), True, 'import matplotlib.pyplot as plt\n'), ((5411, 5427), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(143)'], {}), '(143)\n', (5422, 5427), True, 'import matplotlib.pyplot as plt\n'), ((5475, 5512), 'matplotlib.pyplot.title', 'plt.title', (['"""acumulado do equal. de b"""'], {}), "('acumulado do equal. de b')\n", (5484, 5512), True, 'import matplotlib.pyplot as plt\n'), ((5548, 5575), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 2)'}), '(figsize=(20, 2))\n', (5558, 5575), True, 'import matplotlib.pyplot as plt\n'), ((5581, 5597), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(141)'], {}), '(141)\n', (5592, 5597), True, 'import matplotlib.pyplot as plt\n'), ((5633, 5653), 'matplotlib.pyplot.title', 'plt.title', (['"""Inicial"""'], {}), "('Inicial')\n", (5642, 5653), True, 'import matplotlib.pyplot as plt\n'), ((5655, 5670), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5663, 5670), True, 'import matplotlib.pyplot as plt\n'), ((5676, 5692), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(142)'], {}), '(142)\n', (5687, 5692), True, 'import matplotlib.pyplot as plt\n'), ((5750, 5780), 'matplotlib.pyplot.title', 'plt.title', (['"""Imagem equalizada"""'], {}), "('Imagem equalizada')\n", (5759, 5780), True, 'import matplotlib.pyplot as plt\n'), ((5782, 5797), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5790, 5797), True, 'import matplotlib.pyplot as plt\n'), ((5803, 5813), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5811, 5813), True, 'import matplotlib.pyplot as plt\n'), ((2512, 2527), 'numpy.cumsum', 'numpy.cumsum', (['h'], {}), '(h)\n', (2524, 2527), False, 'import numpy\n'), ((3542, 3558), 'numpy.uint8', 'numpy.uint8', (['img'], {}), '(img)\n', (3553, 3558), False, 'import numpy\n'), ((5223, 5240), 'numpy.cumsum', 'numpy.cumsum', (['red'], {}), '(red)\n', (5235, 5240), False, 'import numpy\n'), ((5329, 5348), 'numpy.cumsum', 'numpy.cumsum', (['green'], {}), '(green)\n', (5341, 5348), False, 'import numpy\n'), ((5437, 5455), 'numpy.cumsum', 'numpy.cumsum', (['blue'], {}), '(blue)\n', (5449, 5455), False, 'import numpy\n'), ((5610, 5626), 'numpy.uint8', 'numpy.uint8', (['img'], {}), '(img)\n', (5621, 5626), False, 'import numpy\n'), ((756, 770), 'numpy.min', 'numpy.min', (['img'], {}), '(img)\n', (765, 770), False, 'import numpy\n'), ((798, 812), 'numpy.max', 'numpy.max', (['img'], {}), '(img)\n', (807, 812), False, 'import numpy\n'), ((1388, 1429), 'numpy.count_nonzero', 'numpy.count_nonzero', (['(pa <= saturation / 2)'], {}), '(pa <= saturation / 2)\n', (1407, 1429), False, 'import numpy\n'), ((1451, 1496), 'numpy.count_nonzero', 'numpy.count_nonzero', (['(pa <= 1 - saturation / 2)'], {}), '(pa <= 1 - saturation / 2)\n', (1470, 1496), False, 'import numpy\n')] |
__author__ = 'sibirrer'
from astrofunc.LensingProfiles.nfw import NFW
from astrofunc.LensingProfiles.nfw_ellipse import NFW_ELLIPSE
import numpy as np
import numpy.testing as npt
import pytest
class TestNFW(object):
"""
tests the Gaussian methods
"""
def setup(self):
self.nfw = NFW()
def test_function(self):
x = np.array([1])
y = np.array([2])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
values = self.nfw.function(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0], 2.4764530888727556, decimal=5)
x = np.array([0])
y = np.array([0])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
values = self.nfw.function(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0], 0, decimal=4)
x = np.array([2,3,4])
y = np.array([1,1,1])
values = self.nfw.function(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0], 2.4764530888727556, decimal=5)
npt.assert_almost_equal(values[1], 3.5400250357511416, decimal=5)
npt.assert_almost_equal(values[2], 4.5623722261790647, decimal=5)
def test_derivatives(self):
x = np.array([1])
y = np.array([2])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
f_x, f_y = self.nfw.derivatives(x, y, Rs, theta_Rs)
npt.assert_almost_equal(f_x[0], 0.53211690764331998, decimal=5)
npt.assert_almost_equal(f_y[0], 1.06423381528664, decimal=5)
x = np.array([0])
y = np.array([0])
theta_Rs = 0
f_x, f_y = self.nfw.derivatives(x, y, Rs, theta_Rs)
npt.assert_almost_equal(f_x[0], 0, decimal=5)
npt.assert_almost_equal(f_y[0], 0, decimal=5)
x = np.array([1,3,4])
y = np.array([2,1,1])
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
values = self.nfw.derivatives(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0][0], 0.53211690764331998, decimal=5)
npt.assert_almost_equal(values[1][0], 1.06423381528664, decimal=5)
npt.assert_almost_equal(values[0][1], 1.0493927480837946, decimal=5)
npt.assert_almost_equal(values[1][1], 0.34979758269459821, decimal=5)
def test_hessian(self):
x = np.array([1])
y = np.array([2])
Rs = 1.
rho0 = 1
theta_Rs = self.nfw._rho02alpha(rho0, Rs)
f_xx, f_yy,f_xy = self.nfw.hessian(x, y, Rs, theta_Rs)
npt.assert_almost_equal(f_xx[0], 0.40855527280658294, decimal=5)
npt.assert_almost_equal(f_yy[0], 0.037870368296371637, decimal=5)
npt.assert_almost_equal(f_xy[0], -0.2471232696734742, decimal=5)
x = np.array([1,3,4])
y = np.array([2,1,1])
values = self.nfw.hessian(x, y, Rs, theta_Rs)
npt.assert_almost_equal(values[0][0], 0.40855527280658294, decimal=5)
npt.assert_almost_equal(values[1][0], 0.037870368296371637, decimal=5)
npt.assert_almost_equal(values[2][0], -0.2471232696734742, decimal=5)
npt.assert_almost_equal(values[0][1], -0.046377502475445781, decimal=5)
npt.assert_almost_equal(values[1][1], 0.30577812878681554, decimal=5)
npt.assert_almost_equal(values[2][1], -0.13205836172334798, decimal=5)
class TestMassAngleConversion(object):
"""
test angular to mass unit conversions
"""
def setup(self):
self.nfw = NFW()
self.nfw_ellipse = NFW_ELLIPSE()
def test_angle(self):
x, y = 1, 0
alpha1, alpha2 = self.nfw.derivatives(x, y, theta_Rs=1., Rs=1.)
assert alpha1 == 1.
def test_convertAngle2rho(self):
rho0 = self.nfw._alpha2rho0(theta_Rs=1., Rs=1.)
assert rho0 == 0.81472283831773229
def test_convertrho02angle(self):
theta_Rs_in = 1.5
Rs = 1.5
rho0 = self.nfw._alpha2rho0(theta_Rs=theta_Rs_in, Rs=Rs)
theta_Rs_out = self.nfw._rho02alpha(rho0, Rs)
assert theta_Rs_in == theta_Rs_out
if __name__ == '__main__':
pytest.main() | [
"pytest.main",
"astrofunc.LensingProfiles.nfw.NFW",
"numpy.array",
"numpy.testing.assert_almost_equal",
"astrofunc.LensingProfiles.nfw_ellipse.NFW_ELLIPSE"
] | [((4080, 4093), 'pytest.main', 'pytest.main', ([], {}), '()\n', (4091, 4093), False, 'import pytest\n'), ((307, 312), 'astrofunc.LensingProfiles.nfw.NFW', 'NFW', ([], {}), '()\n', (310, 312), False, 'from astrofunc.LensingProfiles.nfw import NFW\n'), ((356, 369), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (364, 369), True, 'import numpy as np\n'), ((382, 395), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (390, 395), True, 'import numpy as np\n'), ((542, 607), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0]', '(2.4764530888727556)'], {'decimal': '(5)'}), '(values[0], 2.4764530888727556, decimal=5)\n', (565, 607), True, 'import numpy.testing as npt\n'), ((620, 633), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (628, 633), True, 'import numpy as np\n'), ((646, 659), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (654, 659), True, 'import numpy as np\n'), ((806, 854), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0]', '(0)'], {'decimal': '(4)'}), '(values[0], 0, decimal=4)\n', (829, 854), True, 'import numpy.testing as npt\n'), ((868, 887), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (876, 887), True, 'import numpy as np\n'), ((898, 917), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (906, 917), True, 'import numpy as np\n'), ((979, 1044), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0]', '(2.4764530888727556)'], {'decimal': '(5)'}), '(values[0], 2.4764530888727556, decimal=5)\n', (1002, 1044), True, 'import numpy.testing as npt\n'), ((1053, 1118), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[1]', '(3.5400250357511416)'], {'decimal': '(5)'}), '(values[1], 3.5400250357511416, decimal=5)\n', (1076, 1118), True, 'import numpy.testing as npt\n'), ((1127, 1191), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[2]', '(4.562372226179065)'], {'decimal': '(5)'}), '(values[2], 4.562372226179065, decimal=5)\n', (1150, 1191), True, 'import numpy.testing as npt\n'), ((1239, 1252), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (1247, 1252), True, 'import numpy as np\n'), ((1265, 1278), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (1273, 1278), True, 'import numpy as np\n'), ((1430, 1490), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_x[0]', '(0.53211690764332)'], {'decimal': '(5)'}), '(f_x[0], 0.53211690764332, decimal=5)\n', (1453, 1490), True, 'import numpy.testing as npt\n'), ((1502, 1562), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_y[0]', '(1.06423381528664)'], {'decimal': '(5)'}), '(f_y[0], 1.06423381528664, decimal=5)\n', (1525, 1562), True, 'import numpy.testing as npt\n'), ((1575, 1588), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1583, 1588), True, 'import numpy as np\n'), ((1601, 1614), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1609, 1614), True, 'import numpy as np\n'), ((1704, 1749), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_x[0]', '(0)'], {'decimal': '(5)'}), '(f_x[0], 0, decimal=5)\n', (1727, 1749), True, 'import numpy.testing as npt\n'), ((1758, 1803), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_y[0]', '(0)'], {'decimal': '(5)'}), '(f_y[0], 0, decimal=5)\n', (1781, 1803), True, 'import numpy.testing as npt\n'), ((1817, 1836), 'numpy.array', 'np.array', (['[1, 3, 4]'], {}), '([1, 3, 4])\n', (1825, 1836), True, 'import numpy as np\n'), ((1847, 1866), 'numpy.array', 'np.array', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (1855, 1866), True, 'import numpy as np\n'), ((1998, 2064), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0][0]', '(0.53211690764332)'], {'decimal': '(5)'}), '(values[0][0], 0.53211690764332, decimal=5)\n', (2021, 2064), True, 'import numpy.testing as npt\n'), ((2076, 2142), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[1][0]', '(1.06423381528664)'], {'decimal': '(5)'}), '(values[1][0], 1.06423381528664, decimal=5)\n', (2099, 2142), True, 'import numpy.testing as npt\n'), ((2151, 2219), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0][1]', '(1.0493927480837946)'], {'decimal': '(5)'}), '(values[0][1], 1.0493927480837946, decimal=5)\n', (2174, 2219), True, 'import numpy.testing as npt\n'), ((2228, 2296), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[1][1]', '(0.3497975826945982)'], {'decimal': '(5)'}), '(values[1][1], 0.3497975826945982, decimal=5)\n', (2251, 2296), True, 'import numpy.testing as npt\n'), ((2339, 2352), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (2347, 2352), True, 'import numpy as np\n'), ((2365, 2378), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (2373, 2378), True, 'import numpy as np\n'), ((2533, 2597), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_xx[0]', '(0.40855527280658294)'], {'decimal': '(5)'}), '(f_xx[0], 0.40855527280658294, decimal=5)\n', (2556, 2597), True, 'import numpy.testing as npt\n'), ((2606, 2670), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_yy[0]', '(0.03787036829637164)'], {'decimal': '(5)'}), '(f_yy[0], 0.03787036829637164, decimal=5)\n', (2629, 2670), True, 'import numpy.testing as npt\n'), ((2680, 2744), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['f_xy[0]', '(-0.2471232696734742)'], {'decimal': '(5)'}), '(f_xy[0], -0.2471232696734742, decimal=5)\n', (2703, 2744), True, 'import numpy.testing as npt\n'), ((2758, 2777), 'numpy.array', 'np.array', (['[1, 3, 4]'], {}), '([1, 3, 4])\n', (2766, 2777), True, 'import numpy as np\n'), ((2788, 2807), 'numpy.array', 'np.array', (['[2, 1, 1]'], {}), '([2, 1, 1])\n', (2796, 2807), True, 'import numpy as np\n'), ((2868, 2937), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0][0]', '(0.40855527280658294)'], {'decimal': '(5)'}), '(values[0][0], 0.40855527280658294, decimal=5)\n', (2891, 2937), True, 'import numpy.testing as npt\n'), ((2946, 3015), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[1][0]', '(0.03787036829637164)'], {'decimal': '(5)'}), '(values[1][0], 0.03787036829637164, decimal=5)\n', (2969, 3015), True, 'import numpy.testing as npt\n'), ((3025, 3094), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[2][0]', '(-0.2471232696734742)'], {'decimal': '(5)'}), '(values[2][0], -0.2471232696734742, decimal=5)\n', (3048, 3094), True, 'import numpy.testing as npt\n'), ((3103, 3173), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[0][1]', '(-0.04637750247544578)'], {'decimal': '(5)'}), '(values[0][1], -0.04637750247544578, decimal=5)\n', (3126, 3173), True, 'import numpy.testing as npt\n'), ((3183, 3252), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[1][1]', '(0.30577812878681554)'], {'decimal': '(5)'}), '(values[1][1], 0.30577812878681554, decimal=5)\n', (3206, 3252), True, 'import numpy.testing as npt\n'), ((3261, 3331), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['values[2][1]', '(-0.13205836172334798)'], {'decimal': '(5)'}), '(values[2][1], -0.13205836172334798, decimal=5)\n', (3284, 3331), True, 'import numpy.testing as npt\n'), ((3471, 3476), 'astrofunc.LensingProfiles.nfw.NFW', 'NFW', ([], {}), '()\n', (3474, 3476), False, 'from astrofunc.LensingProfiles.nfw import NFW\n'), ((3504, 3517), 'astrofunc.LensingProfiles.nfw_ellipse.NFW_ELLIPSE', 'NFW_ELLIPSE', ([], {}), '()\n', (3515, 3517), False, 'from astrofunc.LensingProfiles.nfw_ellipse import NFW_ELLIPSE\n')] |
import os
import curses
import numpy as np
from pathlib import Path
ROOT = Path("terminal_dungeon")
WALL_DIR = ROOT / "wall_textures"
SPRITE_DIR = ROOT / "sprite_textures"
def clamp(mi, val, ma):
return max(min(ma, val), mi)
class Renderer:
"""
Graphic engine. Casts rays. Casts sprites. Kicks ass.
Notes
-----
If one wanted to add ceiling/floor textures, weapons, or anything new, just add a method
"""
max_hops = 20 # How far rays are cast.
# Shading constants -- Modifying ascii_map should be safe.
ascii_map = np.array(list(' .,:;<+*LtCa4U80dQM@'))
shades = len(ascii_map) - 1
side_shade = (shades + 1) // 5
shade_dif = shades - side_shade
_textures_on = True
minimap_width = .2 # Fraction of screen
minimap_height = .3
minimap_pos = 5, 5 # minimap's lower-right corner's offset from screen's lower-right corner
pad = 50 # How much extra space is added around the edge of the mini-map -- for large terminals this will need to be increased.
def __init__(self, screen, player, wall_textures=None, sprite_textures=None):
self.screen = screen
self.resize()
self.player = player
self.game_map = player.game_map
self.mini_map = np.pad(np.where(self.game_map._map.T, '#', ' '), self.pad, constant_values=' ')
self._load_textures(wall_textures or [ ], sprite_textures or [ ])
@property
def textures_on(self):
return self.wall_textures and self._textures_on
def toggle_textures(self):
self._textures_on = not self._textures_on
def resize(self):
try: # linux
w, h = os.get_terminal_size()
curses.resizeterm(h, w)
except: # windows
h, w = self.screen.getmaxyx()
w -= 1
self.height = h
self.width = w
self.angle_increment = 1 / w
self.floor_y = h // 2
self.distances = np.zeros(w)
self.buffer = np.full((h, w), " ")
def _load_textures(self, wall_textures, sprite_textures):
# Wall textures will be integer arrays, while sprite textures are character arrays.
# This because the values in wall textures will add or subtract brightness to the current wall shading.
# If we used character arrays for walls, we wouldn't have different shades for N/S and E/W walls and
# walls further away wouldn't be dimmer, diminishing the 3d effect.
# This could be changed though, and would simplify some of the texture drawing logic in `cast_ray`.
self.wall_textures = [ ]
for name in wall_textures:
wall_lines = (WALL_DIR / (name + ".txt")).read_text().splitlines()
wall_as_integer_array = [list(map(int, line)) for line in wall_lines]
self.wall_textures.append(np.array(wall_as_integer_array).T)
self.sprite_textures = { }
for name in sprite_textures:
sprite_lines = (SPRITE_DIR / (name + ".txt")).read_text().splitlines()
sprite_as_character_array = list(map(list, sprite_lines))
self.sprite_textures[name] = np.array(sprite_as_character_array).T
def cast_ray(self, column):
"""
Cast rays and draw columns whose heights correspond to the distance a ray traveled
until it hit a wall.
"""
player = self.player
ray_angle = np.array((1, 2 * column * self.angle_increment - 1)) @ player.cam
map_pos = player.pos.astype(int)
delta = abs(1 / ray_angle)
step = np.sign(ray_angle)
side_dis = step * (np.heaviside(step, 1) - player.pos % 1) * delta
# Cast a ray until we hit a wall or hit max_hops
for _ in range(self.max_hops):
side = 0 if side_dis[0] < side_dis[1] else 1
side_dis[side] += delta[side]
map_pos[side] += step[side]
if self.game_map[map_pos]:
break
else: # No walls in range
self.distances[column] = float("inf")
return
# Not euclidean distance to avoid fish-eye effect.
wall_dis = (map_pos[side] - player.pos[side] + (0 if step[side] == 1 else 1)) / ray_angle[side]
# Save distance for sprite calculations.
self.distances[column] = wall_dis
h = self.height
line_height = int(h / wall_dis) # if wall_dis else h -- possible divide-by-0 error
if line_height == 0:
return # Draw nothing
jump_height = player.z * line_height
line_start = max(0, int((h - line_height) / 2 + jump_height))
line_end = min(h, int((h + line_height) / 2 + jump_height))
drawn_height = line_end - line_start
shade = min(drawn_height, self.shade_dif)
shade += 0 if side else self.side_shade # One side is brighter
shade_buffer = np.full(drawn_height, shade)
if self.textures_on:
tex = self.wall_textures[self.game_map[map_pos] - 1]
texture_width, texture_height = tex.shape
wall_x = (player.pos[1 - side] + wall_dis * ray_angle[1 - side]) % 1
tex_x = int(wall_x * texture_width)
if (-1 if side == 1 else 1) * ray_angle[side] < 0:
tex_x = texture_width - tex_x - 1
offset = (line_height - drawn_height) / 2
ys = np.arange(drawn_height) + offset
tex_ys = (ys * texture_height / line_height).astype(int)
# Add or subtract texture values to shade values
# Note 2 * n - 12 is 0 for n = 6, i.e., values above 6 are additive and
# below 6 are subtractive. For larger ascii maps, one may want to use linear
# equation with a larger slope.
shade_buffer += 2 * tex[tex_x, tex_ys] - 12
np.clip(shade_buffer, 1, self.shades, out=shade_buffer)
self.buffer[line_start:line_end, column] = self.ascii_map[shade_buffer]
def cast_sprites(self):
buffer = self.buffer
player = self.player
h = self.height
w = self.width
sprites = self.game_map.sprites
for sprite in sprites:
# Relative position of sprite to player
sprite.relative = player.pos - sprite.pos
sprites.sort()
# Camera Inverse used to calculate transformed position of sprites.
cam_inv = np.linalg.inv(-player.cam[::-1])
for sprite in sprites: # Draw each sprite from furthest to closest.
# Transformed position of sprites due to camera position
x, y = sprite.relative @ cam_inv
if y <= 0: # Sprite is behind player, don't draw it.
continue
# Sprite x-position on screen
sprite_x = int(w / 2 * (1 + x / y))
sprite_height = int(h / y)
sprite_width = int(w / y / 2)
if sprite_height == 0 or sprite_width == 0: # Sprite too small.
continue
jump_height = player.z * sprite_height
start_y = clamp(0, int((h - sprite_height) / 2 + jump_height), h)
end_y = clamp(0, int((h + sprite_height) / 2 + jump_height), h)
start_x = clamp(0, -sprite_width // 2 + sprite_x, w)
end_x = clamp(0, sprite_width // 2 + sprite_x, w)
columns = np.arange(start_x, end_x)
columns = columns[(0 <= columns) & (columns <= w) & (y <= self.distances[columns])]
tex = self.sprite_textures[sprite.tex]
tex_width, tex_height = tex.shape
clip_y = (sprite_height - h) / 2 - jump_height
tex_ys = np.clip((np.arange(start_y, end_y) + clip_y) * tex_height / sprite_height, 0, None).astype(int)
clip_x = sprite_x - sprite_width / 2
tex_xs = ((columns - clip_x) * tex_width / sprite_width).astype(int)
tex_rect = tex[tex_xs][:, tex_ys].T
buffer[start_y:end_y, columns] = np.where(tex_rect != "0", tex_rect, buffer[start_y:end_y, columns])
def draw_minimap(self):
x_offset, y_offset = self.minimap_pos
width = int(self.minimap_width * self.width)
width += width % 2
hw = width // 2
height = int(self.minimap_height * self.height)
height += height % 2
hh = height // 2 # half-height
x, y = self.player.pos.astype(int) + self.pad
r = -height - y_offset
c = -width - x_offset
self.buffer[r: -y_offset, c: -x_offset] = self.mini_map[y - hh: y + hh, x - hw: x + hw]
self.buffer[r + hh, c + hw] = '@'
def update(self):
self.buffer[:, :] = " " # Clear buffer
self.buffer[self.floor_y:, ::2] = self.ascii_map[1] # Draw floor
for column in range(self.width): # Draw walls
self.cast_ray(column)
self.cast_sprites()
self.draw_minimap()
# Push buffer to screen
for row_num, row in enumerate(self.buffer):
self.screen.addstr(row_num, 0, ''.join(row))
self.screen.refresh()
| [
"numpy.clip",
"os.get_terminal_size",
"pathlib.Path",
"numpy.where",
"numpy.heaviside",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.sign",
"curses.resizeterm",
"numpy.full",
"numpy.arange"
] | [((76, 100), 'pathlib.Path', 'Path', (['"""terminal_dungeon"""'], {}), "('terminal_dungeon')\n", (80, 100), False, 'from pathlib import Path\n'), ((1939, 1950), 'numpy.zeros', 'np.zeros', (['w'], {}), '(w)\n', (1947, 1950), True, 'import numpy as np\n'), ((1973, 1993), 'numpy.full', 'np.full', (['(h, w)', '""" """'], {}), "((h, w), ' ')\n", (1980, 1993), True, 'import numpy as np\n'), ((3545, 3563), 'numpy.sign', 'np.sign', (['ray_angle'], {}), '(ray_angle)\n', (3552, 3563), True, 'import numpy as np\n'), ((4852, 4880), 'numpy.full', 'np.full', (['drawn_height', 'shade'], {}), '(drawn_height, shade)\n', (4859, 4880), True, 'import numpy as np\n'), ((6360, 6392), 'numpy.linalg.inv', 'np.linalg.inv', (['(-player.cam[::-1])'], {}), '(-player.cam[::-1])\n', (6373, 6392), True, 'import numpy as np\n'), ((1266, 1306), 'numpy.where', 'np.where', (['self.game_map._map.T', '"""#"""', '""" """'], {}), "(self.game_map._map.T, '#', ' ')\n", (1274, 1306), True, 'import numpy as np\n'), ((1657, 1679), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (1677, 1679), False, 'import os\n'), ((1692, 1715), 'curses.resizeterm', 'curses.resizeterm', (['h', 'w'], {}), '(h, w)\n', (1709, 1715), False, 'import curses\n'), ((3388, 3440), 'numpy.array', 'np.array', (['(1, 2 * column * self.angle_increment - 1)'], {}), '((1, 2 * column * self.angle_increment - 1))\n', (3396, 3440), True, 'import numpy as np\n'), ((5793, 5848), 'numpy.clip', 'np.clip', (['shade_buffer', '(1)', 'self.shades'], {'out': 'shade_buffer'}), '(shade_buffer, 1, self.shades, out=shade_buffer)\n', (5800, 5848), True, 'import numpy as np\n'), ((7309, 7334), 'numpy.arange', 'np.arange', (['start_x', 'end_x'], {}), '(start_x, end_x)\n', (7318, 7334), True, 'import numpy as np\n'), ((7931, 7998), 'numpy.where', 'np.where', (["(tex_rect != '0')", 'tex_rect', 'buffer[start_y:end_y, columns]'], {}), "(tex_rect != '0', tex_rect, buffer[start_y:end_y, columns])\n", (7939, 7998), True, 'import numpy as np\n'), ((3123, 3158), 'numpy.array', 'np.array', (['sprite_as_character_array'], {}), '(sprite_as_character_array)\n', (3131, 3158), True, 'import numpy as np\n'), ((5345, 5368), 'numpy.arange', 'np.arange', (['drawn_height'], {}), '(drawn_height)\n', (5354, 5368), True, 'import numpy as np\n'), ((2821, 2852), 'numpy.array', 'np.array', (['wall_as_integer_array'], {}), '(wall_as_integer_array)\n', (2829, 2852), True, 'import numpy as np\n'), ((3591, 3612), 'numpy.heaviside', 'np.heaviside', (['step', '(1)'], {}), '(step, 1)\n', (3603, 3612), True, 'import numpy as np\n'), ((7619, 7644), 'numpy.arange', 'np.arange', (['start_y', 'end_y'], {}), '(start_y, end_y)\n', (7628, 7644), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import edward as ed
from edward.models import Normal, Empirical
from scipy.special import erf
import importlib
import utils
importlib.reload(utils)
from utils import *
class hmc_model:
def __init__(self, activation_fn, data_noise,
b_0_var=1., w_0_var=1., u_var=1., g_var=1.,
hidden_size = 100,
step_size=0.001, n_steps=40, n_samples=1000, burn_in=200, n_predict=50, deep_NN = False):
''' create object that will be a Bayesian NN w inference done by HMC '''
self.name_ = 'hmc_NN_h' + str(hidden_size)
self.activation_fn = activation_fn
self.data_noise = data_noise
self.hidden_size = hidden_size
self.deep_NN = deep_NN
# inference params
self.step_size = step_size # size of steps
self.n_steps = n_steps # no steps in between samples
self.n_samples = n_samples # no samples to collect
self.burn_in = burn_in # drop this number of burn in samples
self.n_predict = n_predict # take this number of when doing predictions
if self.n_samples < self.burn_in:
raise Exception('no. samples is less than burn in samples!')
if self.deep_NN == True:
print('going deep...')
# variance for step fn, relu, erf
self.b_0_var = b_0_var # first layer bias variance
self.w_0_var = w_0_var # first layer weight variance
# variance for rbf - we use williams 1996 notation
# i.e. node = exp(-(x-u)^2 / 2*var_g)
self.g_var = g_var # param of rbf fn (fixed)
self.u_var = u_var # var of centers, as -> inf, goes to stationary cov dist
return
def train(self, X_train, y_train, X_val, is_print=True):
''' set up BNN and run HMC inference '''
def neural_network(X):
# set up the BNN structure using tf
if self.activation_fn == 'relu':
h = tf.maximum(tf.matmul(X, W_0) + b_0,0) # relu
elif self.activation_fn == 'Lrelu':
a=0.2
h = tf.maximum(tf.matmul(X, W_0) + b_0,a*(tf.matmul(X, W_0) + b_0)) # leakly relu
elif self.activation_fn == 'erf':
h = tf.erf(tf.matmul(X, W_0) + b_0)
elif self.activation_fn == 'tanh':
h = tf.tanh(tf.matmul(X, W_0) + b_0)
# h = tf.tanh(1.23*tf.matmul(X, W_0) + b_0) # add 1.23 for close to GP erf
elif self.activation_fn == 'sigmoid':
h = tf.sigmoid(tf.matmul(X, W_0) + b_0)
elif self.activation_fn == 'softplus':
self.c=2. # if this is bigger -> relu behaviour, but less 'soft'
h = tf.divide(tf.log(tf.exp(tf.multiply(tf.matmul(X, W_0) + b_0,c)) + 1),c)
elif self.activation_fn == 'rbf':
self.beta_2 = 1/(2*self.g_var)
h = tf.exp(-self.beta_2*tf.square(X - W_0))
h = tf.matmul(h, W_1) #+ b_1
return tf.reshape(h, [-1])
def neural_network_deep(X):
# set up the BNN structure using tf
if self.activation_fn == 'relu':
h1 = tf.maximum(tf.matmul(X, W_0) + b_0,0) # relu
h = tf.maximum(tf.matmul(h1, W_1) + b_1,0) # relu
elif self.activation_fn == 'Lrelu':
a=0.2
h1 = tf.maximum(tf.matmul(X, W_0) + b_0,a*(tf.matmul(X, W_0) + b_0)) # leakly relu
h = tf.maximum(tf.matmul(h1, W_1) + b_1,a*(tf.matmul(h1, W_1) + b_1)) # leakly relu
elif self.activation_fn == 'erf':
h1 = tf.erf(tf.matmul(X, W_0) + b_0)
h = tf.erf(tf.matmul(h1, W_1) + b_1)
else:
raise Exception('tp: activation not implemented')
h = tf.matmul(h, W_2) #+ b_2
return tf.reshape(h, [-1])
if self.activation_fn == 'relu' or self.activation_fn == 'softplus' or self.activation_fn == 'Lrelu':
init_stddev_0_w = np.sqrt(self.w_0_var) # /d_in
init_stddev_0_b = np.sqrt(self.b_0_var) # /d_in
init_stddev_1_w = 1.0/np.sqrt(self.hidden_size) #*np.sqrt(10) # 2nd layer init. dist
elif self.activation_fn == 'tanh' or self.activation_fn == 'erf':
init_stddev_0_w = np.sqrt(self.w_0_var) # 1st layer init. dist for weights
init_stddev_0_b = np.sqrt(self.b_0_var) # for bias
init_stddev_1_w = 1.0/np.sqrt(self.hidden_size) # 2nd layer init. dist
elif self.activation_fn == 'rbf':
init_stddev_0_w = np.sqrt(self.u_var) # centres = sig_u
init_stddev_0_b = np.sqrt(self.g_var) # fixed /beta
init_stddev_1_w = 1.0/np.sqrt(self.hidden_size) # 2nd layer init. dist
n = X_train.shape[0]
X_dim = X_train.shape[1]
y_dim = 1 #y_train.shape[1]
with tf.name_scope("model"):
W_0 = Normal(loc=tf.zeros([X_dim, self.hidden_size]), scale=init_stddev_0_w*tf.ones([X_dim, self.hidden_size]),
name="W_0")
if self.deep_NN == False:
W_1 = Normal(loc=tf.zeros([self.hidden_size, y_dim]), scale=init_stddev_1_w*tf.ones([self.hidden_size, y_dim]),
name="W_1")
b_0 = Normal(loc=tf.zeros(self.hidden_size), scale=init_stddev_0_b*tf.ones(self.hidden_size),
name="b_0")
b_1 = Normal(loc=tf.zeros(1), scale=tf.ones(1),
name="b_1")
else:
W_1 = Normal(loc=tf.zeros([self.hidden_size, self.hidden_size]), scale=init_stddev_1_w*tf.ones([self.hidden_size, y_dim]),
name="W_1")
b_0 = Normal(loc=tf.zeros(self.hidden_size), scale=init_stddev_0_b*tf.ones(self.hidden_size),
name="b_0")
W_2 = Normal(loc=tf.zeros([self.hidden_size, y_dim]), scale=init_stddev_1_w*tf.ones([self.hidden_size, y_dim]),
name="W_2")
b_1 = Normal(loc=tf.zeros(self.hidden_size), scale=init_stddev_1_w*tf.ones(self.hidden_size),
name="b_1")
b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1),
name="b_2")
X = tf.placeholder(tf.float32, [n, X_dim], name="X")
if self.deep_NN == False:
y = Normal(loc=neural_network(X), scale=np.sqrt(self.data_noise) * tf.ones(n), name="y")
else:
y = Normal(loc=neural_network_deep(X), scale=np.sqrt(self.data_noise) * tf.ones(n), name="y")
# inference
if self.deep_NN == False:
qW_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, X_dim, self.hidden_size])))
qW_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size, y_dim])))
qb_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size])))
qb_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, y_dim])))
else:
qW_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, X_dim, self.hidden_size])))
qW_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size, self.hidden_size])))
qW_2 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size, y_dim])))
qb_0 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size])))
qb_1 = Empirical(tf.Variable(tf.zeros([self.n_samples, self.hidden_size])))
qb_2 = Empirical(tf.Variable(tf.zeros([self.n_samples, y_dim])))
# get some priors
### !!! TODO, turn this into a proper function
# X_pred = X_val.astype(np.float32).reshape((X_val.shape[0], 1))
# self.y_priors = tf.stack([nn_predict(X_pred, W_0.sample(), W_1.sample(),b_0.sample(), b_1.sample())
# for _ in range(10)])
# Neal 2012
# Too large a stepsize will result in a very low acceptance rate for states
# proposed by simulating trajectories. Too small a stepsize will either waste
# computation time, by the same factor as the stepsize is too small, or (worse)
# will lead to slow exploration by a random walk,
# https://stats.stackexchange.com/questions/304942/how-to-set-step-size-in-hamiltonian-monte-carlo
# If ϵ is too large, then there will be large discretisation error and low acceptance, if ϵ
# is too small then more expensive leapfrog steps will be required to move large distances.
# Ideally we want the largest possible value of ϵ
# that gives reasonable acceptance probability. Unfortunately this may vary for different values of the target variable.
# A simple heuristic to set this may be to do a preliminary run with fixed L,
# gradually increasing ϵ until the acceptance probability is at an appropriate level.
# Setting the trajectory length by trial and error therefore seems necessary.
# For a problem thought to be fairly difficult, a trajectory with L = 100 might be a
# suitable starting point. If preliminary runs (with a suitable ε; see above) show that HMC
# reaches a nearly independent point after only one iteration, a smaller value of L might be
# tried next. (Unless these “preliminary” runs are actually sufficient, in which case there is
# of course no need to do more runs.) If instead there is high autocorrelation in the run
# with L = 100, runs with L = 1000 might be tried next
# It may also be advisable to randomly sample ϵ
# and L form suitable ranges to avoid the possibility of having paths that are close to periodic as this would slow mixing.
if self.deep_NN == False:
inference = ed.HMC({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1},
data={X: X_train, y: y_train.ravel()})
else:
inference = ed.HMC({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1, W_2: qW_2, b_2: qb_2},
data={X: X_train, y: y_train.ravel()})
inference.run(step_size=self.step_size,n_steps=self.n_steps) # logdir='log'
# drop first chunk of burn in samples
if self.deep_NN == False:
self.qW_0_keep = qW_0.params[self.burn_in:].eval()
self.qW_1_keep = qW_1.params[self.burn_in:].eval()
self.qb_0_keep = qb_0.params[self.burn_in:].eval()
self.qb_1_keep = qb_1.params[self.burn_in:].eval()
else:
self.qW_0_keep = qW_0.params[self.burn_in:].eval()
self.qW_1_keep = qW_1.params[self.burn_in:].eval()
self.qb_0_keep = qb_0.params[self.burn_in:].eval()
self.qW_2_keep = qW_2.params[self.burn_in:].eval()
self.qb_1_keep = qb_1.params[self.burn_in:].eval()
self.qb_2_keep = qb_2.params[self.burn_in:].eval()
return
def predict(self, X_pred):
''' do predict on new data '''
def nn_predict_np(X, W_0, W_1, b_0, b_1):
if self.activation_fn == 'relu':
h = np.maximum(np.matmul(X, W_0) + b_0,0)
elif self.activation_fn == 'Lrelu':
a=0.2
h = np.maximum(np.matmul(X, W_0) + b_0,a*(np.matmul(X, W_0) + b_0))
elif self.activation_fn == 'erf':
h = erf(np.matmul(X, W_0) + b_0)
elif self.activation_fn == 'softplus':
h = np.log(1+np.exp(self.c*(np.matmul(X, W_0) + b_0) ))/self.c
elif self.activation_fn == 'tanh':
h = np.tanh(np.matmul(X, W_0) + b_0)
elif self.activation_fn == 'rbf':
h = np.exp(-self.beta_2*np.square(X - W_0))
h = np.matmul(h, W_1) #+ b_1
return np.reshape(h, [-1])
def nn_predict_np_deep(X, W_0, W_1, W_2, b_0, b_1, b_2):
if self.activation_fn == 'relu':
h1 = np.maximum(np.matmul(X, W_0) + b_0,0)
h = np.maximum(np.matmul(h1, W_1) + b_1,0)
elif self.activation_fn == 'Lrelu':
a=0.2
h1 = np.maximum(np.matmul(X, W_0) + b_0,a*(np.matmul(X, W_0) + b_0))
h = np.maximum(np.matmul(h1, W_1) + b_1,a*(np.matmul(h, W_1) + b_1))
elif self.activation_fn == 'erf':
h1 = erf(np.matmul(X, W_0) + b_0)
h = erf(np.matmul(h1, W_1) + b_1)
else:
raise Exception('tp: other activations not implemented')
h = np.matmul(h, W_2) #+ b_2
return np.reshape(h, [-1])
# predictive sampling with burn in
y_preds=[]
print('\nsampling predictions...')
for _ in range(self.n_predict):
# if _%5 == 0:
# print('sampling:',_, 'of', self.n_predict)
if self.n_predict == self.qW_0_keep.shape[0]:
id = _
else:
id = np.random.randint(0,self.qW_0_keep.shape[0]) # sample from posterior
# if sample from same index it will be joint, this is why we don't do sample
# use np instead of tf to speed up!
if self.deep_NN == False:
temp = nn_predict_np(X_pred,self.qW_0_keep[id],self.qW_1_keep[id],self.qb_0_keep[id],self.qb_1_keep[id])
else:
temp = nn_predict_np_deep(X_pred,self.qW_0_keep[id],self.qW_1_keep[id],self.qW_2_keep[id],self.qb_0_keep[id],self.qb_1_keep[id],self.qb_2_keep[id])
y_preds.append(temp)
y_preds = np.array(y_preds)
y_pred_mu = np.mean(y_preds,axis=0)
y_pred_std = np.std(y_preds,axis=0)
y_pred_std = np.sqrt(np.square(y_pred_std) + self.data_noise) # add on data noise
y_pred_mu = np.atleast_2d(y_pred_mu).T
y_pred_std = np.atleast_2d(y_pred_std).T
self.y_pred_mu = y_pred_mu
self.y_pred_std = y_pred_std
return y_preds, y_pred_mu, y_pred_std
| [
"numpy.mean",
"numpy.atleast_2d",
"numpy.sqrt",
"numpy.reshape",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.placeholder",
"numpy.square",
"numpy.array",
"numpy.random.randint",
"tensorflow.name_scope",
"tensorflow.matmul",
"importlib.reload",
"numpy.std",
"numpy.matmul",
"ten... | [((303, 326), 'importlib.reload', 'importlib.reload', (['utils'], {}), '(utils)\n', (319, 326), False, 'import importlib\n'), ((11730, 11747), 'numpy.array', 'np.array', (['y_preds'], {}), '(y_preds)\n', (11738, 11747), True, 'import numpy as np\n'), ((11763, 11787), 'numpy.mean', 'np.mean', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (11770, 11787), True, 'import numpy as np\n'), ((11802, 11825), 'numpy.std', 'np.std', (['y_preds'], {'axis': '(0)'}), '(y_preds, axis=0)\n', (11808, 11825), True, 'import numpy as np\n'), ((2704, 2721), 'tensorflow.matmul', 'tf.matmul', (['h', 'W_1'], {}), '(h, W_1)\n', (2713, 2721), True, 'import tensorflow as tf\n'), ((2739, 2758), 'tensorflow.reshape', 'tf.reshape', (['h', '[-1]'], {}), '(h, [-1])\n', (2749, 2758), True, 'import tensorflow as tf\n'), ((3388, 3405), 'tensorflow.matmul', 'tf.matmul', (['h', 'W_2'], {}), '(h, W_2)\n', (3397, 3405), True, 'import tensorflow as tf\n'), ((3423, 3442), 'tensorflow.reshape', 'tf.reshape', (['h', '[-1]'], {}), '(h, [-1])\n', (3433, 3442), True, 'import tensorflow as tf\n'), ((3570, 3591), 'numpy.sqrt', 'np.sqrt', (['self.w_0_var'], {}), '(self.w_0_var)\n', (3577, 3591), True, 'import numpy as np\n'), ((3621, 3642), 'numpy.sqrt', 'np.sqrt', (['self.b_0_var'], {}), '(self.b_0_var)\n', (3628, 3642), True, 'import numpy as np\n'), ((4331, 4353), 'tensorflow.name_scope', 'tf.name_scope', (['"""model"""'], {}), "('model')\n", (4344, 4353), True, 'import tensorflow as tf\n'), ((5433, 5481), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[n, X_dim]'], {'name': '"""X"""'}), "(tf.float32, [n, X_dim], name='X')\n", (5447, 5481), True, 'import tensorflow as tf\n'), ((10245, 10262), 'numpy.matmul', 'np.matmul', (['h', 'W_1'], {}), '(h, W_1)\n', (10254, 10262), True, 'import numpy as np\n'), ((10280, 10299), 'numpy.reshape', 'np.reshape', (['h', '[-1]'], {}), '(h, [-1])\n', (10290, 10299), True, 'import numpy as np\n'), ((10876, 10893), 'numpy.matmul', 'np.matmul', (['h', 'W_2'], {}), '(h, W_2)\n', (10885, 10893), True, 'import numpy as np\n'), ((10911, 10930), 'numpy.reshape', 'np.reshape', (['h', '[-1]'], {}), '(h, [-1])\n', (10921, 10930), True, 'import numpy as np\n'), ((11924, 11948), 'numpy.atleast_2d', 'np.atleast_2d', (['y_pred_mu'], {}), '(y_pred_mu)\n', (11937, 11948), True, 'import numpy as np\n'), ((11966, 11991), 'numpy.atleast_2d', 'np.atleast_2d', (['y_pred_std'], {}), '(y_pred_std)\n', (11979, 11991), True, 'import numpy as np\n'), ((3676, 3701), 'numpy.sqrt', 'np.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (3683, 3701), True, 'import numpy as np\n'), ((3829, 3850), 'numpy.sqrt', 'np.sqrt', (['self.w_0_var'], {}), '(self.w_0_var)\n', (3836, 3850), True, 'import numpy as np\n'), ((3907, 3928), 'numpy.sqrt', 'np.sqrt', (['self.b_0_var'], {}), '(self.b_0_var)\n', (3914, 3928), True, 'import numpy as np\n'), ((11198, 11243), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.qW_0_keep.shape[0]'], {}), '(0, self.qW_0_keep.shape[0])\n', (11215, 11243), True, 'import numpy as np\n'), ((11848, 11869), 'numpy.square', 'np.square', (['y_pred_std'], {}), '(y_pred_std)\n', (11857, 11869), True, 'import numpy as np\n'), ((3965, 3990), 'numpy.sqrt', 'np.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (3972, 3990), True, 'import numpy as np\n'), ((4071, 4090), 'numpy.sqrt', 'np.sqrt', (['self.u_var'], {}), '(self.u_var)\n', (4078, 4090), True, 'import numpy as np\n'), ((4131, 4150), 'numpy.sqrt', 'np.sqrt', (['self.g_var'], {}), '(self.g_var)\n', (4138, 4150), True, 'import numpy as np\n'), ((4375, 4410), 'tensorflow.zeros', 'tf.zeros', (['[X_dim, self.hidden_size]'], {}), '([X_dim, self.hidden_size])\n', (4383, 4410), True, 'import tensorflow as tf\n'), ((5785, 5836), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, X_dim, self.hidden_size]'], {}), '([self.n_samples, X_dim, self.hidden_size])\n', (5793, 5836), True, 'import tensorflow as tf\n'), ((5871, 5922), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, self.hidden_size, y_dim]'], {}), '([self.n_samples, self.hidden_size, y_dim])\n', (5879, 5922), True, 'import tensorflow as tf\n'), ((5957, 6001), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, self.hidden_size]'], {}), '([self.n_samples, self.hidden_size])\n', (5965, 6001), True, 'import tensorflow as tf\n'), ((6036, 6069), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, y_dim]'], {}), '([self.n_samples, y_dim])\n', (6044, 6069), True, 'import tensorflow as tf\n'), ((6112, 6163), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, X_dim, self.hidden_size]'], {}), '([self.n_samples, X_dim, self.hidden_size])\n', (6120, 6163), True, 'import tensorflow as tf\n'), ((6198, 6260), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, self.hidden_size, self.hidden_size]'], {}), '([self.n_samples, self.hidden_size, self.hidden_size])\n', (6206, 6260), True, 'import tensorflow as tf\n'), ((6295, 6346), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, self.hidden_size, y_dim]'], {}), '([self.n_samples, self.hidden_size, y_dim])\n', (6303, 6346), True, 'import tensorflow as tf\n'), ((6381, 6425), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, self.hidden_size]'], {}), '([self.n_samples, self.hidden_size])\n', (6389, 6425), True, 'import tensorflow as tf\n'), ((6460, 6504), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, self.hidden_size]'], {}), '([self.n_samples, self.hidden_size])\n', (6468, 6504), True, 'import tensorflow as tf\n'), ((6539, 6572), 'tensorflow.zeros', 'tf.zeros', (['[self.n_samples, y_dim]'], {}), '([self.n_samples, y_dim])\n', (6547, 6572), True, 'import tensorflow as tf\n'), ((1896, 1913), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (1905, 1913), True, 'import tensorflow as tf\n'), ((2886, 2903), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2895, 2903), True, 'import tensorflow as tf\n'), ((2939, 2957), 'tensorflow.matmul', 'tf.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (2948, 2957), True, 'import tensorflow as tf\n'), ((4192, 4217), 'numpy.sqrt', 'np.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4199, 4217), True, 'import numpy as np\n'), ((4434, 4468), 'tensorflow.ones', 'tf.ones', (['[X_dim, self.hidden_size]'], {}), '([X_dim, self.hidden_size])\n', (4441, 4468), True, 'import tensorflow as tf\n'), ((4536, 4571), 'tensorflow.zeros', 'tf.zeros', (['[self.hidden_size, y_dim]'], {}), '([self.hidden_size, y_dim])\n', (4544, 4571), True, 'import tensorflow as tf\n'), ((4670, 4696), 'tensorflow.zeros', 'tf.zeros', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4678, 4696), True, 'import tensorflow as tf\n'), ((4786, 4797), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (4794, 4797), True, 'import tensorflow as tf\n'), ((4805, 4815), 'tensorflow.ones', 'tf.ones', (['(1)'], {}), '(1)\n', (4812, 4815), True, 'import tensorflow as tf\n'), ((4865, 4911), 'tensorflow.zeros', 'tf.zeros', (['[self.hidden_size, self.hidden_size]'], {}), '([self.hidden_size, self.hidden_size])\n', (4873, 4911), True, 'import tensorflow as tf\n'), ((5010, 5036), 'tensorflow.zeros', 'tf.zeros', (['self.hidden_size'], {}), '(self.hidden_size)\n', (5018, 5036), True, 'import tensorflow as tf\n'), ((5126, 5161), 'tensorflow.zeros', 'tf.zeros', (['[self.hidden_size, y_dim]'], {}), '([self.hidden_size, y_dim])\n', (5134, 5161), True, 'import tensorflow as tf\n'), ((5260, 5286), 'tensorflow.zeros', 'tf.zeros', (['self.hidden_size'], {}), '(self.hidden_size)\n', (5268, 5286), True, 'import tensorflow as tf\n'), ((5376, 5387), 'tensorflow.zeros', 'tf.zeros', (['(1)'], {}), '(1)\n', (5384, 5387), True, 'import tensorflow as tf\n'), ((5395, 5405), 'tensorflow.ones', 'tf.ones', (['(1)'], {}), '(1)\n', (5402, 5405), True, 'import tensorflow as tf\n'), ((9742, 9759), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (9751, 9759), True, 'import numpy as np\n'), ((10416, 10433), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (10425, 10433), True, 'import numpy as np\n'), ((10462, 10480), 'numpy.matmul', 'np.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (10471, 10480), True, 'import numpy as np\n'), ((1998, 2015), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2007, 2015), True, 'import tensorflow as tf\n'), ((3043, 3060), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (3052, 3060), True, 'import tensorflow as tf\n'), ((3129, 3147), 'tensorflow.matmul', 'tf.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (3138, 3147), True, 'import tensorflow as tf\n'), ((4595, 4629), 'tensorflow.ones', 'tf.ones', (['[self.hidden_size, y_dim]'], {}), '([self.hidden_size, y_dim])\n', (4602, 4629), True, 'import tensorflow as tf\n'), ((4720, 4745), 'tensorflow.ones', 'tf.ones', (['self.hidden_size'], {}), '(self.hidden_size)\n', (4727, 4745), True, 'import tensorflow as tf\n'), ((4935, 4969), 'tensorflow.ones', 'tf.ones', (['[self.hidden_size, y_dim]'], {}), '([self.hidden_size, y_dim])\n', (4942, 4969), True, 'import tensorflow as tf\n'), ((5060, 5085), 'tensorflow.ones', 'tf.ones', (['self.hidden_size'], {}), '(self.hidden_size)\n', (5067, 5085), True, 'import tensorflow as tf\n'), ((5185, 5219), 'tensorflow.ones', 'tf.ones', (['[self.hidden_size, y_dim]'], {}), '([self.hidden_size, y_dim])\n', (5192, 5219), True, 'import tensorflow as tf\n'), ((5310, 5335), 'tensorflow.ones', 'tf.ones', (['self.hidden_size'], {}), '(self.hidden_size)\n', (5317, 5335), True, 'import tensorflow as tf\n'), ((5555, 5579), 'numpy.sqrt', 'np.sqrt', (['self.data_noise'], {}), '(self.data_noise)\n', (5562, 5579), True, 'import numpy as np\n'), ((5582, 5592), 'tensorflow.ones', 'tf.ones', (['n'], {}), '(n)\n', (5589, 5592), True, 'import tensorflow as tf\n'), ((5662, 5686), 'numpy.sqrt', 'np.sqrt', (['self.data_noise'], {}), '(self.data_noise)\n', (5669, 5686), True, 'import numpy as np\n'), ((5689, 5699), 'tensorflow.ones', 'tf.ones', (['n'], {}), '(n)\n', (5696, 5699), True, 'import tensorflow as tf\n'), ((9837, 9854), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (9846, 9854), True, 'import numpy as np\n'), ((10559, 10576), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (10568, 10576), True, 'import numpy as np\n'), ((10631, 10649), 'numpy.matmul', 'np.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (10640, 10649), True, 'import numpy as np\n'), ((2025, 2042), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2034, 2042), True, 'import tensorflow as tf\n'), ((2117, 2134), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2126, 2134), True, 'import tensorflow as tf\n'), ((3070, 3087), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (3079, 3087), True, 'import tensorflow as tf\n'), ((3157, 3175), 'tensorflow.matmul', 'tf.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (3166, 3175), True, 'import tensorflow as tf\n'), ((3251, 3268), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (3260, 3268), True, 'import tensorflow as tf\n'), ((3291, 3309), 'tensorflow.matmul', 'tf.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (3300, 3309), True, 'import tensorflow as tf\n'), ((9864, 9881), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (9873, 9881), True, 'import numpy as np\n'), ((9939, 9956), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (9948, 9956), True, 'import numpy as np\n'), ((10586, 10603), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (10595, 10603), True, 'import numpy as np\n'), ((10659, 10676), 'numpy.matmul', 'np.matmul', (['h', 'W_1'], {}), '(h, W_1)\n', (10668, 10676), True, 'import numpy as np\n'), ((10735, 10752), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (10744, 10752), True, 'import numpy as np\n'), ((10772, 10790), 'numpy.matmul', 'np.matmul', (['h1', 'W_1'], {}), '(h1, W_1)\n', (10781, 10790), True, 'import numpy as np\n'), ((2196, 2213), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2205, 2213), True, 'import tensorflow as tf\n'), ((2360, 2377), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2369, 2377), True, 'import tensorflow as tf\n'), ((10127, 10144), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (10136, 10144), True, 'import numpy as np\n'), ((10217, 10235), 'numpy.square', 'np.square', (['(X - W_0)'], {}), '(X - W_0)\n', (10226, 10235), True, 'import numpy as np\n'), ((2676, 2694), 'tensorflow.square', 'tf.square', (['(X - W_0)'], {}), '(X - W_0)\n', (2685, 2694), True, 'import tensorflow as tf\n'), ((10038, 10055), 'numpy.matmul', 'np.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (10047, 10055), True, 'import numpy as np\n'), ((2540, 2557), 'tensorflow.matmul', 'tf.matmul', (['X', 'W_0'], {}), '(X, W_0)\n', (2549, 2557), True, 'import tensorflow as tf\n')] |
import pickle
import xlsxwriter
import numpy as np
import os
def load(filename):
loaded_dict = pickle.load(open(filename, 'rb'))
return dict
def np_2darray_converter(matrix):
if(type(matrix) == type({})): # making dictionary suitable for excel
keys = list(matrix.keys())
values = list(matrix.values())
values = [str(value) for value in values]
matrix = np.array([keys, values]).transpose()
new_matrix = np.array(matrix, ndmin = 2)
if (new_matrix.dtype == 'O'): # should I add more data types, for example dictionaries?
return -1, -1, -1
rows = new_matrix.shape[0]
cols = new_matrix.shape[1]
return rows, cols, new_matrix
def create_dat(filename, keys, data = None):
if(data == None):
pickle.dump(keys, open(filename, 'wb'))
else:
data_dict = {}
for i, key in enumerate(keys):
data_dict[key] = data[i]
pickle.dump(data_dict, open(filename, 'wb'))
def create_xlsx(filename, keys, data = None, dat = False):
if(dat == True):
fname, fext = os.path.splitext(filename)
create_dat(fname + '.dat', keys, data)
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
title_format = workbook.add_format({ 'font_size':16, 'font_color':'#02172C',
'bold':True, 'bg_color':'#7FF000'}) #A9E063
separator_format = workbook.add_format({'bg_color':'#434446'}) #A72307
if(data == None):
try:
d_keys = list(keys.keys())
d_data = list(keys.values())
except:
print('A dictionary is expected as 2nd positional argument')
return
else:
d_keys = keys
d_data = data
current_column = 3
current_row = 3
for i in range(len(d_data)):
worksheet.write(current_row, current_column, d_keys[i], title_format)
rows, cols, current_data = np_2darray_converter(d_data[i])
if(rows == -1):
continue
if(rows == 1):
worksheet.write_column(current_row + 2, current_column, current_data[0])
current_column += 4
worksheet.set_column(current_column-2, current_column-2, width = 3, cell_format = separator_format)
else:
worksheet.conditional_format(current_row, current_column + 1, current_row, current_column + cols - 1, {'type':'blanks',
'format':title_format})
for j in range(rows):
worksheet.write_row(current_row + 2 + j, current_column, current_data[j])
current_column += cols + 3
worksheet.set_column(current_column-2, current_column-2,width = 3, cell_format = separator_format)
workbook.close()
| [
"numpy.array",
"os.path.splitext",
"xlsxwriter.Workbook"
] | [((468, 493), 'numpy.array', 'np.array', (['matrix'], {'ndmin': '(2)'}), '(matrix, ndmin=2)\n', (476, 493), True, 'import numpy as np\n'), ((1200, 1229), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['filename'], {}), '(filename)\n', (1219, 1229), False, 'import xlsxwriter\n'), ((1110, 1136), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1126, 1136), False, 'import os\n'), ((413, 437), 'numpy.array', 'np.array', (['[keys, values]'], {}), '([keys, values])\n', (421, 437), True, 'import numpy as np\n')] |
import numpy as np
wavelength = 626.34
constant = np.array([(3050+0.6*np.cos(np.pi*i/40.0))*(1/wavelength) for i in range(30)])
exactdata = constant*wavelength
errorbar = 0.1*constant
realdata = np.random.normal(exactdata,errorbar)
runnumber = np.array(range(30))
| [
"numpy.random.normal",
"numpy.cos"
] | [((196, 233), 'numpy.random.normal', 'np.random.normal', (['exactdata', 'errorbar'], {}), '(exactdata, errorbar)\n', (212, 233), True, 'import numpy as np\n'), ((70, 94), 'numpy.cos', 'np.cos', (['(np.pi * i / 40.0)'], {}), '(np.pi * i / 40.0)\n', (76, 94), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 11:52:48 2019
This is the module for evaluation metrics
@author: Cheng
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 21:31:32 2019
@author: cheng
"""
import numpy as np
from scipy.spatial.distance import directed_hausdorff
def get_classified_errors(test_pred, indexed_predictions, scale):
'''
Measure the errors in relation to user types
userType:
1: pedestrian
2: cyclist
3: vehicle
'''
_, num_pred, pred_seq, _ = indexed_predictions.shape
all_preds_prime = np.reshape(indexed_predictions, [-1, 5])
all_test_pred = np.reshape(test_pred, [-1, 5])
# Calculate the corresponding errors
mixed_errors = get_evaluation(test_pred, indexed_predictions, num_pred, scale)
print('\nmixed_errors by ADE and FDE\n', np.array_str(mixed_errors[0:2, 2], precision=2, suppress_small=True))
# Differentiate errors by user types
ped_test_preds = np.reshape(all_test_pred[all_test_pred[:, 4]==1, :], [-1, pred_seq, 5])
cyc_test_preds = np.reshape(all_test_pred[all_test_pred[:, 4]==2, :], [-1, pred_seq, 5])
veh_test_preds = np.reshape(all_test_pred[all_test_pred[:, 4]==3, :], [-1, pred_seq, 5])
ped_preds_prime = np.reshape(all_preds_prime[all_preds_prime[:, 4]==1, :], [-1, num_pred, pred_seq, 5])
cyc_preds_prime = np.reshape(all_preds_prime[all_preds_prime[:, 4]==2, :], [-1, num_pred, pred_seq, 5])
veh_preds_prime = np.reshape(all_preds_prime[all_preds_prime[:, 4]==3, :], [-1, num_pred, pred_seq, 5])
ped_errors = get_evaluation(ped_test_preds, ped_preds_prime, num_pred, scale)
# print('\nped_errors \n', np.array_str(ped_errors, precision=2, suppress_small=True))
cyc_errors = get_evaluation(cyc_test_preds, cyc_preds_prime, num_pred, scale)
# print('\ncyc_errors \n', np.array_str(cyc_errors, precision=2, suppress_small=True))
veh_errors = get_evaluation(veh_test_preds, veh_preds_prime, num_pred, scale)
# print('\nveh_errors \n', np.array_str(veh_errors, precision=2, suppress_small=True))
errors = np.vstack((mixed_errors, ped_errors, cyc_errors, veh_errors))
return errors
def get_evaluation(test_pred, predictions, num_pred, scale):
# Evaluation
evaluations = np.zeros([len(predictions), num_pred, 5])
for i, user_gt in enumerate(test_pred):
user_preds = predictions[i]
for j, user_pred in enumerate(user_preds):
evaluations[i, j, :] = get_eva_values(user_gt[:, 2:4]*scale, user_pred[:, 2:4]*scale)
# Compute the average errors across all users and all predictions
mean_evaluations = np.reshape(evaluations, [-1, 5])
mean_errors = np.mean(mean_evaluations, axis=0)
mean_std = np.std(mean_evaluations, axis=0)
# Comput the minimum errors across all users for the best prediction
min_evaluations = np.amin(evaluations, axis=1)
min_errors = np.mean(min_evaluations, axis=0)
min_std = np.std(min_evaluations, axis=0)
# Save the evaluation results
errors = np.concatenate((np.reshape(mean_errors, [-1, 1]),
np.reshape(mean_std, [-1, 1]),
np.reshape(min_errors, [-1, 1]),
np.reshape(min_std, [-1, 1])), axis=1)
return errors
def get_eva_values(y_t, y_p):
'''
y_t: 2d numpy array for true trajectory. Shape: steps*2
y_p: 2d numpy array for predicted trajectory. Shape: steps*2
'''
Euclidean = get_euclidean(y_t, y_p)
last_disp = get_last_disp(y_t, y_p)
Hausdorff = get_hausdorff(y_t, y_p)
speed_dev = get_speeddev(y_t, y_p)
heading_error = get_headerror(y_t, y_p)
# Store Euclidean, last_disp, Hausdorff, speed_dev, heading_error as a list
eva_values = [Euclidean, last_disp, Hausdorff, speed_dev, heading_error]
return eva_values
def get_euclidean(y_true, y_prediction):
Euclidean = np.linalg.norm((y_true - y_prediction), axis=1)
Euclidean = np.mean(Euclidean)
#Euclidean = np.around(Euclidean, decimals=4)
return Euclidean
def get_last_disp(y_true, y_prediction):
last_disp = np.linalg.norm((y_true[-1, :] - y_prediction[-1, :]))
#last_disp = np.around(last_disp, decimals=4)
return last_disp
def get_hausdorff(y_true, y_prediction):
'''
Here is the directed Hausdorff distance, but it computes both directions and output the larger value
'''
Hausdorff = max(directed_hausdorff(y_true, y_prediction)[0], directed_hausdorff(y_prediction, y_true)[0])
#Hausdorff = np.around(Hausdorff, decimals=4)
return Hausdorff
def get_speeddev(y_true, y_prediction):
if len(y_true) == 1:
return 0
else:
speed_dev = 0.0
for t in range(len(y_true)-1):
speed_t = np.linalg.norm(y_true[t+1] - y_true[t])
speed_p = np.linalg.norm(y_prediction[t+1] - y_prediction[t])
speed_dev += abs(speed_t - speed_p)
speed_dev /= (len(y_true)-1)
#speed_dev = np.around(speed_dev, decimals=4)
return speed_dev
def get_headerror(y_true, y_prediction):
if len(y_prediction) == 1:
return 0
else:
heading_error = 0.0
for t in range(len(y_true)-1):
xcoor_t = y_true[t+1, 0] - y_true[t, 0]
ycoor_t = y_true[t+1, 1] - y_true[t, 1]
angle_t = np.arctan2(ycoor_t, xcoor_t)
xcoor_p = y_prediction[t+1, 0] - y_prediction[t, 0]
ycoor_p = y_prediction[t+1, 1] - y_prediction[t, 1]
angle_p = np.arctan2(ycoor_p, xcoor_p)
angle = np.rad2deg((abs(angle_t - angle_p)) % (np.pi))
heading_error += angle
heading_error /= len(y_true)-1
#heading_error = np.around(heading_error, decimals=4)
return heading_error
| [
"numpy.mean",
"numpy.reshape",
"numpy.amin",
"scipy.spatial.distance.directed_hausdorff",
"numpy.array_str",
"numpy.linalg.norm",
"numpy.arctan2",
"numpy.vstack",
"numpy.std"
] | [((569, 609), 'numpy.reshape', 'np.reshape', (['indexed_predictions', '[-1, 5]'], {}), '(indexed_predictions, [-1, 5])\n', (579, 609), True, 'import numpy as np\n'), ((630, 660), 'numpy.reshape', 'np.reshape', (['test_pred', '[-1, 5]'], {}), '(test_pred, [-1, 5])\n', (640, 660), True, 'import numpy as np\n'), ((972, 1045), 'numpy.reshape', 'np.reshape', (['all_test_pred[all_test_pred[:, 4] == 1, :]', '[-1, pred_seq, 5]'], {}), '(all_test_pred[all_test_pred[:, 4] == 1, :], [-1, pred_seq, 5])\n', (982, 1045), True, 'import numpy as np\n'), ((1065, 1138), 'numpy.reshape', 'np.reshape', (['all_test_pred[all_test_pred[:, 4] == 2, :]', '[-1, pred_seq, 5]'], {}), '(all_test_pred[all_test_pred[:, 4] == 2, :], [-1, pred_seq, 5])\n', (1075, 1138), True, 'import numpy as np\n'), ((1158, 1231), 'numpy.reshape', 'np.reshape', (['all_test_pred[all_test_pred[:, 4] == 3, :]', '[-1, pred_seq, 5]'], {}), '(all_test_pred[all_test_pred[:, 4] == 3, :], [-1, pred_seq, 5])\n', (1168, 1231), True, 'import numpy as np\n'), ((1256, 1347), 'numpy.reshape', 'np.reshape', (['all_preds_prime[all_preds_prime[:, 4] == 1, :]', '[-1, num_pred, pred_seq, 5]'], {}), '(all_preds_prime[all_preds_prime[:, 4] == 1, :], [-1, num_pred,\n pred_seq, 5])\n', (1266, 1347), True, 'import numpy as np\n'), ((1364, 1455), 'numpy.reshape', 'np.reshape', (['all_preds_prime[all_preds_prime[:, 4] == 2, :]', '[-1, num_pred, pred_seq, 5]'], {}), '(all_preds_prime[all_preds_prime[:, 4] == 2, :], [-1, num_pred,\n pred_seq, 5])\n', (1374, 1455), True, 'import numpy as np\n'), ((1472, 1563), 'numpy.reshape', 'np.reshape', (['all_preds_prime[all_preds_prime[:, 4] == 3, :]', '[-1, num_pred, pred_seq, 5]'], {}), '(all_preds_prime[all_preds_prime[:, 4] == 3, :], [-1, num_pred,\n pred_seq, 5])\n', (1482, 1563), True, 'import numpy as np\n'), ((2091, 2152), 'numpy.vstack', 'np.vstack', (['(mixed_errors, ped_errors, cyc_errors, veh_errors)'], {}), '((mixed_errors, ped_errors, cyc_errors, veh_errors))\n', (2100, 2152), True, 'import numpy as np\n'), ((2645, 2677), 'numpy.reshape', 'np.reshape', (['evaluations', '[-1, 5]'], {}), '(evaluations, [-1, 5])\n', (2655, 2677), True, 'import numpy as np\n'), ((2697, 2730), 'numpy.mean', 'np.mean', (['mean_evaluations'], {'axis': '(0)'}), '(mean_evaluations, axis=0)\n', (2704, 2730), True, 'import numpy as np\n'), ((2746, 2778), 'numpy.std', 'np.std', (['mean_evaluations'], {'axis': '(0)'}), '(mean_evaluations, axis=0)\n', (2752, 2778), True, 'import numpy as np\n'), ((2874, 2902), 'numpy.amin', 'np.amin', (['evaluations'], {'axis': '(1)'}), '(evaluations, axis=1)\n', (2881, 2902), True, 'import numpy as np\n'), ((2920, 2952), 'numpy.mean', 'np.mean', (['min_evaluations'], {'axis': '(0)'}), '(min_evaluations, axis=0)\n', (2927, 2952), True, 'import numpy as np\n'), ((2967, 2998), 'numpy.std', 'np.std', (['min_evaluations'], {'axis': '(0)'}), '(min_evaluations, axis=0)\n', (2973, 2998), True, 'import numpy as np\n'), ((3948, 3993), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_true - y_prediction)'], {'axis': '(1)'}), '(y_true - y_prediction, axis=1)\n', (3962, 3993), True, 'import numpy as np\n'), ((4012, 4030), 'numpy.mean', 'np.mean', (['Euclidean'], {}), '(Euclidean)\n', (4019, 4030), True, 'import numpy as np\n'), ((4160, 4211), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_true[-1, :] - y_prediction[-1, :])'], {}), '(y_true[-1, :] - y_prediction[-1, :])\n', (4174, 4211), True, 'import numpy as np\n'), ((831, 899), 'numpy.array_str', 'np.array_str', (['mixed_errors[0:2, 2]'], {'precision': '(2)', 'suppress_small': '(True)'}), '(mixed_errors[0:2, 2], precision=2, suppress_small=True)\n', (843, 899), True, 'import numpy as np\n'), ((3062, 3094), 'numpy.reshape', 'np.reshape', (['mean_errors', '[-1, 1]'], {}), '(mean_errors, [-1, 1])\n', (3072, 3094), True, 'import numpy as np\n'), ((3126, 3155), 'numpy.reshape', 'np.reshape', (['mean_std', '[-1, 1]'], {}), '(mean_std, [-1, 1])\n', (3136, 3155), True, 'import numpy as np\n'), ((3186, 3217), 'numpy.reshape', 'np.reshape', (['min_errors', '[-1, 1]'], {}), '(min_errors, [-1, 1])\n', (3196, 3217), True, 'import numpy as np\n'), ((3248, 3276), 'numpy.reshape', 'np.reshape', (['min_std', '[-1, 1]'], {}), '(min_std, [-1, 1])\n', (3258, 3276), True, 'import numpy as np\n'), ((4476, 4516), 'scipy.spatial.distance.directed_hausdorff', 'directed_hausdorff', (['y_true', 'y_prediction'], {}), '(y_true, y_prediction)\n', (4494, 4516), False, 'from scipy.spatial.distance import directed_hausdorff\n'), ((4521, 4561), 'scipy.spatial.distance.directed_hausdorff', 'directed_hausdorff', (['y_prediction', 'y_true'], {}), '(y_prediction, y_true)\n', (4539, 4561), False, 'from scipy.spatial.distance import directed_hausdorff\n'), ((4826, 4867), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_true[t + 1] - y_true[t])'], {}), '(y_true[t + 1] - y_true[t])\n', (4840, 4867), True, 'import numpy as np\n'), ((4895, 4948), 'numpy.linalg.norm', 'np.linalg.norm', (['(y_prediction[t + 1] - y_prediction[t])'], {}), '(y_prediction[t + 1] - y_prediction[t])\n', (4909, 4948), True, 'import numpy as np\n'), ((5414, 5442), 'numpy.arctan2', 'np.arctan2', (['ycoor_t', 'xcoor_t'], {}), '(ycoor_t, xcoor_t)\n', (5424, 5442), True, 'import numpy as np\n'), ((5593, 5621), 'numpy.arctan2', 'np.arctan2', (['ycoor_p', 'xcoor_p'], {}), '(ycoor_p, xcoor_p)\n', (5603, 5621), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for doci_hamiltonian.py."""
import os
import unittest
import numpy
from openfermion.config import EQ_TOLERANCE
from openfermion.chem.molecular_data import MolecularData
from openfermion.config import DATA_DIRECTORY
from openfermion.transforms import jordan_wigner
from openfermion.linalg import get_sparse_operator
from openfermion.ops.representations.doci_hamiltonian import (
DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci,
get_doci_from_integrals)
class IntegralTransformsTest(unittest.TestCase):
def setUp(self):
self.geometry = [('H', (0., 0., 0.)), ('H', (0., 0., 0.7414))]
self.basis = 'sto-3g'
self.multiplicity = 1
self.filename = os.path.join(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')
self.molecule = MolecularData(self.geometry,
self.basis,
self.multiplicity,
filename=self.filename)
self.molecule.load()
def test_integrals_self_inverse(self):
hc, hr1, hr2 = get_doci_from_integrals(self.molecule.one_body_integrals,
self.molecule.two_body_integrals)
proj_one_body, proj_two_body = get_projected_integrals_from_doci(
hc, hr1, hr2)
hc_test, hr1_test, hr2_test = get_doci_from_integrals(
proj_one_body, proj_two_body)
self.assertTrue(numpy.allclose(hc, hc_test))
self.assertTrue(numpy.allclose(hr1, hr1_test))
print(hr2)
print(hr2_test)
self.assertTrue(numpy.allclose(hr2, hr2_test))
def test_integrals_to_doci(self):
one_body_integrals = self.molecule.one_body_integrals
two_body_integrals = self.molecule.two_body_integrals
hc, hr1, hr2 = get_doci_from_integrals(one_body_integrals,
two_body_integrals)
self.assertEqual(hc.shape[0], 2)
self.assertEqual(hr1.shape[0], 2)
self.assertEqual(hr2.shape[0], 2)
for p in range(2):
self.assertEqual(
hc[p] + hr2[p, p],
2 * one_body_integrals[p, p] + two_body_integrals[p, p, p, p])
for q in range(2):
if p != q:
self.assertEqual(hr1[p, q], two_body_integrals[p, p, q, q])
self.assertEqual(
hr2[p, q], 2 * two_body_integrals[p, q, q, p] -
two_body_integrals[p, q, p, q])
class DOCIHamiltonianTest(unittest.TestCase):
def setUp(self):
self.geometry = [('H', (0., 0., 0.)), ('H', (0., 0., 0.7414))]
self.basis = 'sto-3g'
self.multiplicity = 1
self.filename = os.path.join(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')
self.molecule = MolecularData(self.geometry,
self.basis,
self.multiplicity,
filename=self.filename)
self.molecule.load()
def test_n_body_tensor_errors(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
with self.assertRaises(TypeError):
doci_hamiltonian.n_body_tensors = 0
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((0, 0), (0, 0))]
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((0, 0), (0, 0), (0, 0), (0, 0))]
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((1, 1), (0, 0))]
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((0, 1), (2, 1), (3, 0), (8, 0))]
def test_errors_operations(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian2 = DOCIHamiltonian.zero(n_qubits=3)
with self.assertRaises(TypeError):
doci_hamiltonian += 'a'
with self.assertRaises(TypeError):
doci_hamiltonian -= 'a'
with self.assertRaises(TypeError):
doci_hamiltonian *= 'a'
with self.assertRaises(TypeError):
doci_hamiltonian /= 'a'
with self.assertRaises(TypeError):
doci_hamiltonian += doci_hamiltonian2
with self.assertRaises(TypeError):
doci_hamiltonian -= doci_hamiltonian2
def test_adding_constants(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian += 2
self.assertAlmostEqual(doci_hamiltonian.constant, 2)
doci_hamiltonian -= 3
self.assertAlmostEqual(doci_hamiltonian.constant, -1)
def test_basic_operations(self):
doci_hamiltonian1 = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian2 = DOCIHamiltonian.from_integrals(
constant=self.molecule.nuclear_repulsion,
one_body_integrals=self.molecule.one_body_integrals,
two_body_integrals=self.molecule.two_body_integrals)
self.assertTrue(doci_hamiltonian2 == doci_hamiltonian1 +
doci_hamiltonian2)
self.assertTrue(doci_hamiltonian1 -
doci_hamiltonian2 == doci_hamiltonian2 / -1)
self.assertTrue(doci_hamiltonian2 * 0 == doci_hamiltonian1)
def test_error(self):
doci_hamiltonian = DOCIHamiltonian.from_integrals(
constant=self.molecule.nuclear_repulsion,
one_body_integrals=self.molecule.one_body_integrals,
two_body_integrals=self.molecule.two_body_integrals)
with self.assertRaises(TypeError):
doci_hamiltonian[((1, 0), (0, 1))] = 1
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((1, 0),)]
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((1, 1), (0, 0))]
with self.assertRaises(IndexError):
_ = doci_hamiltonian[((0, 1), (1, 1), (0, 0), (2, 0))]
def test_getting_setting_constant(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian.constant = 1
self.assertEqual(doci_hamiltonian[()], 1)
def test_getting_setting_1body(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian.hc[0] = 2
doci_hamiltonian.hc[1] = 4
self.assertEqual(doci_hamiltonian[((0, 1), (0, 0))], 1)
self.assertEqual(doci_hamiltonian[((1, 1), (1, 0))], 1)
self.assertEqual(doci_hamiltonian[((2, 1), (2, 0))], 2)
self.assertEqual(doci_hamiltonian[((3, 1), (3, 0))], 2)
def test_getting_setting_hr2(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian.hr2[0, 0] = 2
doci_hamiltonian.hr2[1, 1] = 4
self.assertEqual(doci_hamiltonian[((0, 1), (1, 1), (1, 0), (0, 0))], 1)
self.assertEqual(doci_hamiltonian[((1, 1), (0, 1), (0, 0), (1, 0))], 1)
self.assertEqual(doci_hamiltonian[((2, 1), (3, 1), (3, 0), (2, 0))], 2)
self.assertEqual(doci_hamiltonian[((3, 1), (2, 1), (2, 0), (3, 0))], 2)
doci_hamiltonian.hr2[0, 1] = 2
self.assertEqual(doci_hamiltonian[((0, 1), (2, 1), (2, 0), (0, 0))], 1)
self.assertEqual(doci_hamiltonian[((0, 1), (3, 1), (3, 0), (0, 0))], 1)
self.assertEqual(doci_hamiltonian[((1, 1), (2, 1), (2, 0), (1, 0))], 1)
self.assertEqual(doci_hamiltonian[((1, 1), (3, 1), (3, 0), (1, 0))], 1)
def test_getting_setting_hr1(self):
doci_hamiltonian = DOCIHamiltonian.zero(n_qubits=2)
doci_hamiltonian.hr1[0, 1] = 2
self.assertEqual(doci_hamiltonian[(0, 1), (1, 1), (3, 0), (2, 0)], 1)
self.assertEqual(doci_hamiltonian[(1, 1), (0, 1), (2, 0), (3, 0)], 1)
def test_from_integrals_to_qubit(self):
hamiltonian = jordan_wigner(self.molecule.get_molecular_hamiltonian())
doci_hamiltonian = DOCIHamiltonian.from_integrals(
constant=self.molecule.nuclear_repulsion,
one_body_integrals=self.molecule.one_body_integrals,
two_body_integrals=self.molecule.two_body_integrals).qubit_operator
hamiltonian_matrix = get_sparse_operator(hamiltonian).toarray()
doci_hamiltonian_matrix = get_sparse_operator(
doci_hamiltonian).toarray()
diagonal = numpy.real(numpy.diag(hamiltonian_matrix))
doci_diagonal = numpy.real(numpy.diag(doci_hamiltonian_matrix))
position_of_doci_diag_in_h = [0] * len(doci_diagonal)
for idx, doci_eigval in enumerate(doci_diagonal):
closest_in_diagonal = None
for idx2, eig in enumerate(diagonal):
if closest_in_diagonal is None or abs(eig - doci_eigval) < abs(
closest_in_diagonal - doci_eigval):
closest_in_diagonal = eig
position_of_doci_diag_in_h[idx] = idx2
assert abs(closest_in_diagonal - doci_eigval) < EQ_TOLERANCE, (
"Value " + str(doci_eigval) + " of the DOCI Hamiltonian " +
"diagonal did not appear in the diagonal of the full " +
"Hamiltonian. The closest value was " +
str(closest_in_diagonal))
sub_matrix = hamiltonian_matrix[numpy.ix_(position_of_doci_diag_in_h,
position_of_doci_diag_in_h)]
assert numpy.allclose(doci_hamiltonian_matrix, sub_matrix), (
"The coupling between the DOCI states in the DOCI Hamiltonian " +
"should be identical to that between these states in the full " +
"Hamiltonian bur the DOCI hamiltonian matrix\n" +
str(doci_hamiltonian_matrix) +
"\ndoes not match the corresponding sub-matrix of the full " +
"Hamiltonian\n" + str(sub_matrix))
| [
"openfermion.chem.molecular_data.MolecularData",
"numpy.allclose",
"openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.from_integrals",
"openfermion.linalg.get_sparse_operator",
"openfermion.ops.representations.doci_hamiltonian.get_doci_from_integrals",
"os.path.join",
"numpy.ix_",
"nump... | [((1287, 1343), 'os.path.join', 'os.path.join', (['DATA_DIRECTORY', '"""H2_sto-3g_singlet_0.7414"""'], {}), "(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')\n", (1299, 1343), False, 'import os\n'), ((1368, 1456), 'openfermion.chem.molecular_data.MolecularData', 'MolecularData', (['self.geometry', 'self.basis', 'self.multiplicity'], {'filename': 'self.filename'}), '(self.geometry, self.basis, self.multiplicity, filename=self.\n filename)\n', (1381, 1456), False, 'from openfermion.chem.molecular_data import MolecularData\n'), ((1662, 1758), 'openfermion.ops.representations.doci_hamiltonian.get_doci_from_integrals', 'get_doci_from_integrals', (['self.molecule.one_body_integrals', 'self.molecule.two_body_integrals'], {}), '(self.molecule.one_body_integrals, self.molecule.\n two_body_integrals)\n', (1685, 1758), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((1840, 1887), 'openfermion.ops.representations.doci_hamiltonian.get_projected_integrals_from_doci', 'get_projected_integrals_from_doci', (['hc', 'hr1', 'hr2'], {}), '(hc, hr1, hr2)\n', (1873, 1887), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((1939, 1992), 'openfermion.ops.representations.doci_hamiltonian.get_doci_from_integrals', 'get_doci_from_integrals', (['proj_one_body', 'proj_two_body'], {}), '(proj_one_body, proj_two_body)\n', (1962, 1992), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((2398, 2461), 'openfermion.ops.representations.doci_hamiltonian.get_doci_from_integrals', 'get_doci_from_integrals', (['one_body_integrals', 'two_body_integrals'], {}), '(one_body_integrals, two_body_integrals)\n', (2421, 2461), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((3335, 3391), 'os.path.join', 'os.path.join', (['DATA_DIRECTORY', '"""H2_sto-3g_singlet_0.7414"""'], {}), "(DATA_DIRECTORY, 'H2_sto-3g_singlet_0.7414')\n", (3347, 3391), False, 'import os\n'), ((3416, 3504), 'openfermion.chem.molecular_data.MolecularData', 'MolecularData', (['self.geometry', 'self.basis', 'self.multiplicity'], {'filename': 'self.filename'}), '(self.geometry, self.basis, self.multiplicity, filename=self.\n filename)\n', (3429, 3504), False, 'from openfermion.chem.molecular_data import MolecularData\n'), ((3712, 3744), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (3732, 3744), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((4314, 4346), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (4334, 4346), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((4375, 4407), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(3)'}), '(n_qubits=3)\n', (4395, 4407), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((4975, 5007), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (4995, 5007), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((5257, 5289), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (5277, 5289), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((5318, 5505), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.from_integrals', 'DOCIHamiltonian.from_integrals', ([], {'constant': 'self.molecule.nuclear_repulsion', 'one_body_integrals': 'self.molecule.one_body_integrals', 'two_body_integrals': 'self.molecule.two_body_integrals'}), '(constant=self.molecule.nuclear_repulsion,\n one_body_integrals=self.molecule.one_body_integrals, two_body_integrals\n =self.molecule.two_body_integrals)\n', (5348, 5505), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((5877, 6064), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.from_integrals', 'DOCIHamiltonian.from_integrals', ([], {'constant': 'self.molecule.nuclear_repulsion', 'one_body_integrals': 'self.molecule.one_body_integrals', 'two_body_integrals': 'self.molecule.two_body_integrals'}), '(constant=self.molecule.nuclear_repulsion,\n one_body_integrals=self.molecule.one_body_integrals, two_body_integrals\n =self.molecule.two_body_integrals)\n', (5907, 6064), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((6554, 6586), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (6574, 6586), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((6745, 6777), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (6765, 6777), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((7172, 7204), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (7192, 7204), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((8031, 8063), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.zero', 'DOCIHamiltonian.zero', ([], {'n_qubits': '(2)'}), '(n_qubits=2)\n', (8051, 8063), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((9893, 9944), 'numpy.allclose', 'numpy.allclose', (['doci_hamiltonian_matrix', 'sub_matrix'], {}), '(doci_hamiltonian_matrix, sub_matrix)\n', (9907, 9944), False, 'import numpy\n'), ((2030, 2057), 'numpy.allclose', 'numpy.allclose', (['hc', 'hc_test'], {}), '(hc, hc_test)\n', (2044, 2057), False, 'import numpy\n'), ((2083, 2112), 'numpy.allclose', 'numpy.allclose', (['hr1', 'hr1_test'], {}), '(hr1, hr1_test)\n', (2097, 2112), False, 'import numpy\n'), ((2181, 2210), 'numpy.allclose', 'numpy.allclose', (['hr2', 'hr2_test'], {}), '(hr2, hr2_test)\n', (2195, 2210), False, 'import numpy\n'), ((8410, 8597), 'openfermion.ops.representations.doci_hamiltonian.DOCIHamiltonian.from_integrals', 'DOCIHamiltonian.from_integrals', ([], {'constant': 'self.molecule.nuclear_repulsion', 'one_body_integrals': 'self.molecule.one_body_integrals', 'two_body_integrals': 'self.molecule.two_body_integrals'}), '(constant=self.molecule.nuclear_repulsion,\n one_body_integrals=self.molecule.one_body_integrals, two_body_integrals\n =self.molecule.two_body_integrals)\n', (8440, 8597), False, 'from openfermion.ops.representations.doci_hamiltonian import DOCIHamiltonian, get_tensors_from_doci, get_projected_integrals_from_doci, get_doci_from_integrals\n'), ((8839, 8869), 'numpy.diag', 'numpy.diag', (['hamiltonian_matrix'], {}), '(hamiltonian_matrix)\n', (8849, 8869), False, 'import numpy\n'), ((8906, 8941), 'numpy.diag', 'numpy.diag', (['doci_hamiltonian_matrix'], {}), '(doci_hamiltonian_matrix)\n', (8916, 8941), False, 'import numpy\n'), ((9761, 9826), 'numpy.ix_', 'numpy.ix_', (['position_of_doci_diag_in_h', 'position_of_doci_diag_in_h'], {}), '(position_of_doci_diag_in_h, position_of_doci_diag_in_h)\n', (9770, 9826), False, 'import numpy\n'), ((8671, 8703), 'openfermion.linalg.get_sparse_operator', 'get_sparse_operator', (['hamiltonian'], {}), '(hamiltonian)\n', (8690, 8703), False, 'from openfermion.linalg import get_sparse_operator\n'), ((8748, 8785), 'openfermion.linalg.get_sparse_operator', 'get_sparse_operator', (['doci_hamiltonian'], {}), '(doci_hamiltonian)\n', (8767, 8785), False, 'from openfermion.linalg import get_sparse_operator\n')] |
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from ProcessData.TrainingLoss import TrainingLoss
from ProcessData.Utils import getX_full
from typing import Tuple
from HighFrequency.HighFrequency import HighFrequency
from HighFrequency.Discriminator import Discriminator
from HighFrequency.DataBase import DataBase
from HighFrequency.LossFunction import LossFunction
from HighFrequency.Vizualise import plotState
import Format
class TrainingConfig:
def __init__(self, batch_size , epochs, learn_rate, learning_decay, warm_up):
self.batch_size = batch_size
self.epochs = epochs
self.learn_rate = learn_rate
self.learning_decay = learning_decay
self.warm_up = warm_up
def RunDiscriminator_latent(latentRecon:torch.Tensor, discriminator:Discriminator, database:DataBase) -> torch.Tensor:
#generated_poses = []
decoded = database.AE_network.decoder(latentRecon)
#for i in range(len(latentRecon)):
#generated_poses.append(getX_full(decoded[i], database.poseDimDiv, database.skeleton, Format.rotation)/100)
#generated_poses = torch.flatten(torch.stack(generated_poses, dim=0), -2, -1)
#generated_disc = discriminator(generated_poses)
generated_disc = discriminator(decoded)
return generated_disc
def RunDiscriminator_pose(true_data:torch.Tensor, discriminator:Discriminator, database:DataBase) -> torch.Tensor:
#true_poses = []
# for i in range(len(true_data)):
# true_poses.append(getX_full(true_data[i], database.poseDimDiv, database.skeleton, Format.rotation)/100)
# true_poses = torch.flatten(torch.stack(true_poses, dim=0), -2, -1)
# true_disc = discriminator(true_poses)
true_disc = discriminator(true_data)
return true_disc
def Train(training_config:TrainingConfig, error_config:TrainingLoss, outFile:str, runName:str, database:DataBase, database_validation:DataBase, visual:bool= False):
print('Starting...')
writer = SummaryWriter(log_dir='runs/'+runName)
if torch.cuda.is_available():
print('Enabling CUDA...')
device = torch.device("cuda:0")
torch.cuda.empty_cache()
print('Creating DB...')
database_loader = DataLoader(database, shuffle=True, batch_size=training_config.batch_size, num_workers= (1 if torch.cuda.is_available() else 0))
database_loader_validation = DataLoader(database_validation, shuffle=True, batch_size=training_config.batch_size, num_workers= (1 if torch.cuda.is_available() else 0))
print('Creating NN & Trainer...')
if torch.cuda.is_available():
model_raw = HighFrequency(Format.latentDim, database.featureDim)
model = model_raw.to(device)
discriminator = Discriminator(database.poseDim)
discriminator = discriminator.to(device)
else:
model = HighFrequency(Format.latentDim, database.featureDim)
discriminator = Discriminator(database.poseDim)
optimizer_model = torch.optim.AdamW(model.parameters(), lr = training_config.learn_rate )
scheduler_model = torch.optim.lr_scheduler.LambdaLR(optimizer_model, lr_lambda=lambda ep: training_config.learning_decay**ep * (0.1 + np.cos(np.pi * ep/10)**2)/1.1)
optimizer_disc = torch.optim.AdamW(discriminator.parameters(), lr = training_config.learn_rate )
scheduler_disc = torch.optim.lr_scheduler.LambdaLR(optimizer_disc, lr_lambda=lambda ep: training_config.learning_decay**ep)
# optimizer_model = torch.optim.AdamW(model.parameters(), lr = training_config.learn_rate)
# scheduler_model = torch.optim.lr_scheduler.LambdaLR(optimizer_model, lr_lambda=lambda ep: training_config.learn_rate*(training_config.learning_decay**ep) * (0.1 + np.cos(np.pi * ep/10)**2)/1.1 )
# optimizer_disc = torch.optim.AdamW(discriminator.parameters(), lr = training_config.learn_rate)
# scheduler_disc= torch.optim.lr_scheduler.LambdaLR(optimizer_disc, lr_lambda=lambda ep: training_config.learn_rate*(training_config.learning_decay**ep) * (0.1 + np.cos(np.pi * ep/10)**2)/1.1 )
warm_up_frames = Format.deltaT * training_config.warm_up
starter = 3
print('Starting training...')
for ep in range(training_config.epochs):
# TRAIN
losses_sum_weighted_training = 0.0
losses_training = [0.0] * error_config.length()
model.train()
discriminator.train()
for i, data in enumerate(database_loader):
# split the data
latentLast, latentNext, times, features, latent_goal, true_data, start_end_true = data
if torch.cuda.is_available():
latentLast = latentLast.cuda()
latentNext = latentNext.cuda()
times = times.cuda()
features = features.cuda()
latent_goal = latent_goal.cuda()
true_data = true_data.cuda()
# set the parameter gradients to zero
# forward pass and losses
out_latent = model.forward_full(latentLast, latentNext, times, features)
batches = true_data.size(0)
frames = true_data.size(1)
# # Discriminator
# if ep>=starter:
# discriminator.zero_grad()
# genLoss = torch.mean(torch.square(RunDiscriminator_latent(out_latent[:, warm_up_frames:].detach(), discriminator, database)))
# discLoss = torch.mean(torch.square(RunDiscriminator_pose(true_data[:, warm_up_frames:], discriminator, database)-1))
# sumLosses = (genLoss+discLoss)/2
# if sumLosses >= 0.15:
# sumLosses.backward()
# # torch.nn.utils.clip_grad_value_(discriminator.parameters(), 1)
# optimizer_disc.step()
# print("DiscriminatorLosses: Generated: {} \t \t True: {}".format(float(genLoss.item()), float(discLoss.item())))
# # Generator
# model.zero_grad()
# if ep>=starter+2:
# with torch.no_grad(): genLoss = torch.mean(torch.square(RunDiscriminator_latent(out_latent[:, warm_up_frames:], discriminator, database)-1))
# else:
genLoss = 0
losses_here = LossFunction(out_latent[:, warm_up_frames:], latent_goal[:, warm_up_frames:], true_data[:, warm_up_frames:], genLoss, database)
losses_here.applyWeights(error_config)
# backward pass + optimisation
totalLoss = sum(losses_here())
totalLoss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 2)
optimizer_model.step()
# update training_loss
losses_sum_weighted_training = losses_sum_weighted_training + float(totalLoss) * (batches/len(database))
for j in range(len(losses_training)): losses_training[j] = losses_training[j] + losses_here.getUnweighted().makefloat()[j] * (batches/len(database))
# Step the scheduler
scheduler_model.step()
if ep>=starter: scheduler_disc.step()
with torch.no_grad():
# Add to tensorboard
labels = error_config.getNames()
writer.add_scalar("HF_Training/Total_Weighted", losses_sum_weighted_training, ep)
writer.add_scalar("HF_Training/Total_Unweighted", sum(losses_training), ep)
for i in range(len(labels)):
writer.add_scalar("HF_Training/"+labels[i], losses_training[i], ep)
writer.flush()
# Vizualise
if visual:
episode_start, episode_end = torch.split(start_end_true[0][(database.sequenceLengthLong-2) * Format.deltaT + int(Format.deltaT/2)],(database.poseDim,database.poseDim), dim=-1)
frames_true = torch.split(latent_goal[0][(database.sequenceLengthLong-2) * Format.deltaT +1: (database.sequenceLengthLong-1) * Format.deltaT], 1, dim=-2)
frames_predicted = torch.split(out_latent[0][(database.sequenceLengthLong-2) * Format.deltaT: (database.sequenceLengthLong-1) * Format.deltaT+1], 1, dim=-2)
plotState(episode_start, episode_end, frames_true, frames_predicted, losses_training, database_validation)
# VALIDATE
with torch.no_grad():
losses_sum_weighted_validation = 0.0
losses_validation = [0.0] * error_config.length()
model.eval()
discriminator.eval()
for i, data in enumerate(database_loader_validation):
# split the data
latentLast, latentNext, times, features, latent_goal, true_data, start_end_true = data
if torch.cuda.is_available():
latentLast = latentLast.cuda()
latentNext = latentNext.cuda()
times = times.cuda()
features = features.cuda()
latent_goal = latent_goal.cuda()
true_data = true_data.cuda()
# set the parameter gradients to zero
# forward pass and losses
out_latent = model.forward_full(latentLast, latentNext, times, features)
batches = true_data.size(0)
frames = true_data.size(1)
# if ep>=starter:
# genLoss = torch.mean(torch.square(RunDiscriminator_latent(out_latent[:, warm_up_frames:].detach(), discriminator, database_validation)))
# discLoss = torch.mean(torch.square(RunDiscriminator_pose(true_data[:, warm_up_frames:], discriminator, database_validation)-1))
# sumLosses = (genLoss+discLoss)/2
# if sumLosses >= 0.15: print("DiscriminatorLosses: Generated: {} \t \t True: {}".format(float(genLoss.item()), float(discLoss.item())))
# # Generator
# if ep>=starter+2: genLoss = torch.mean(torch.square(RunDiscriminator_latent(out_latent[:, warm_up_frames:], discriminator, database_validation)-1))
genLoss = 0
losses_here = LossFunction(out_latent[:, warm_up_frames:], latent_goal[:, warm_up_frames:], true_data[:, warm_up_frames:], genLoss, database_validation)
losses_here.applyWeights(error_config)
totalLoss = sum(losses_here())
# update validation_loss
losses_sum_weighted_validation = losses_sum_weighted_validation + float(totalLoss) * (batches/len(database_validation))
for j in range(len(losses_validation)): losses_validation[j] = losses_validation[j] + losses_here.getUnweighted().makefloat()[j] * (batches/len(database_validation))
# Add to tensorboard
labels = error_config.getNames()
writer.add_scalar("HF_Validation/Total_Weighted", losses_sum_weighted_validation, ep)
writer.add_scalar("HF_Validation/Total_Unweighted", sum(losses_validation), ep)
for i in range(len(labels)):
writer.add_scalar("HF_Validation/"+labels[i], losses_validation[i], ep)
writer.flush()
# Vizualise
if visual:
episode_start, episode_end = torch.split(start_end_true[0][(database_validation.sequenceLengthLong-2) * Format.deltaT + int(Format.deltaT/2)],(database_validation.poseDim,database_validation.poseDim), dim=-1)
frames_true = torch.split(latent_goal[0][(database_validation.sequenceLengthLong-2) * Format.deltaT +1: (database_validation.sequenceLengthLong-1) * Format.deltaT], 1, dim=-2)
frames_predicted = torch.split(out_latent[0][(database_validation.sequenceLengthLong-2) * Format.deltaT: (database_validation.sequenceLengthLong-1) * Format.deltaT+1], 1, dim=-2)
plotState(episode_start, episode_end, frames_true, frames_predicted, losses_validation, database_validation)
# Print
print("\nEpoch: " + str(ep) + "\t \t ErrorTr: " + str(losses_sum_weighted_training) + "\t \t ErrorVal: " + str(losses_sum_weighted_validation))
torch.save(model.state_dict(), outFile)
return model
| [
"torch.utils.tensorboard.SummaryWriter",
"HighFrequency.HighFrequency.HighFrequency",
"torch.split",
"torch.optim.lr_scheduler.LambdaLR",
"HighFrequency.Vizualise.plotState",
"HighFrequency.Discriminator.Discriminator",
"HighFrequency.LossFunction.LossFunction",
"torch.cuda.is_available",
"numpy.cos... | [((2043, 2083), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': "('runs/' + runName)"}), "(log_dir='runs/' + runName)\n", (2056, 2083), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2090, 2115), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2113, 2115), False, 'import torch\n'), ((2645, 2670), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2668, 2670), False, 'import torch\n'), ((3410, 3523), 'torch.optim.lr_scheduler.LambdaLR', 'torch.optim.lr_scheduler.LambdaLR', (['optimizer_disc'], {'lr_lambda': '(lambda ep: training_config.learning_decay ** ep)'}), '(optimizer_disc, lr_lambda=lambda ep: \n training_config.learning_decay ** ep)\n', (3443, 3523), False, 'import torch\n'), ((2168, 2190), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2180, 2190), False, 'import torch\n'), ((2199, 2223), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2221, 2223), False, 'import torch\n'), ((2692, 2744), 'HighFrequency.HighFrequency.HighFrequency', 'HighFrequency', (['Format.latentDim', 'database.featureDim'], {}), '(Format.latentDim, database.featureDim)\n', (2705, 2744), False, 'from HighFrequency.HighFrequency import HighFrequency\n'), ((2806, 2837), 'HighFrequency.Discriminator.Discriminator', 'Discriminator', (['database.poseDim'], {}), '(database.poseDim)\n', (2819, 2837), False, 'from HighFrequency.Discriminator import Discriminator\n'), ((2913, 2965), 'HighFrequency.HighFrequency.HighFrequency', 'HighFrequency', (['Format.latentDim', 'database.featureDim'], {}), '(Format.latentDim, database.featureDim)\n', (2926, 2965), False, 'from HighFrequency.HighFrequency import HighFrequency\n'), ((2990, 3021), 'HighFrequency.Discriminator.Discriminator', 'Discriminator', (['database.poseDim'], {}), '(database.poseDim)\n', (3003, 3021), False, 'from HighFrequency.Discriminator import Discriminator\n'), ((4662, 4687), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4685, 4687), False, 'import torch\n'), ((6330, 6462), 'HighFrequency.LossFunction.LossFunction', 'LossFunction', (['out_latent[:, warm_up_frames:]', 'latent_goal[:, warm_up_frames:]', 'true_data[:, warm_up_frames:]', 'genLoss', 'database'], {}), '(out_latent[:, warm_up_frames:], latent_goal[:, warm_up_frames:\n ], true_data[:, warm_up_frames:], genLoss, database)\n', (6342, 6462), False, 'from HighFrequency.LossFunction import LossFunction\n'), ((7178, 7193), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7191, 7193), False, 'import torch\n'), ((8355, 8370), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8368, 8370), False, 'import torch\n'), ((2385, 2410), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2408, 2410), False, 'import torch\n'), ((2557, 2582), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2580, 2582), False, 'import torch\n'), ((7877, 8025), 'torch.split', 'torch.split', (['latent_goal[0][(database.sequenceLengthLong - 2) * Format.deltaT + 1:(\n database.sequenceLengthLong - 1) * Format.deltaT]', '(1)'], {'dim': '(-2)'}), '(latent_goal[0][(database.sequenceLengthLong - 2) * Format.\n deltaT + 1:(database.sequenceLengthLong - 1) * Format.deltaT], 1, dim=-2)\n', (7888, 8025), False, 'import torch\n'), ((8052, 8199), 'torch.split', 'torch.split', (['out_latent[0][(database.sequenceLengthLong - 2) * Format.deltaT:(database.\n sequenceLengthLong - 1) * Format.deltaT + 1]', '(1)'], {'dim': '(-2)'}), '(out_latent[0][(database.sequenceLengthLong - 2) * Format.deltaT\n :(database.sequenceLengthLong - 1) * Format.deltaT + 1], 1, dim=-2)\n', (8063, 8199), False, 'import torch\n'), ((8206, 8316), 'HighFrequency.Vizualise.plotState', 'plotState', (['episode_start', 'episode_end', 'frames_true', 'frames_predicted', 'losses_training', 'database_validation'], {}), '(episode_start, episode_end, frames_true, frames_predicted,\n losses_training, database_validation)\n', (8215, 8316), False, 'from HighFrequency.Vizualise import plotState\n'), ((8781, 8806), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8804, 8806), False, 'import torch\n'), ((10202, 10345), 'HighFrequency.LossFunction.LossFunction', 'LossFunction', (['out_latent[:, warm_up_frames:]', 'latent_goal[:, warm_up_frames:]', 'true_data[:, warm_up_frames:]', 'genLoss', 'database_validation'], {}), '(out_latent[:, warm_up_frames:], latent_goal[:, warm_up_frames:\n ], true_data[:, warm_up_frames:], genLoss, database_validation)\n', (10214, 10345), False, 'from HighFrequency.LossFunction import LossFunction\n'), ((11531, 11705), 'torch.split', 'torch.split', (['latent_goal[0][(database_validation.sequenceLengthLong - 2) * Format.deltaT +\n 1:(database_validation.sequenceLengthLong - 1) * Format.deltaT]', '(1)'], {'dim': '(-2)'}), '(latent_goal[0][(database_validation.sequenceLengthLong - 2) *\n Format.deltaT + 1:(database_validation.sequenceLengthLong - 1) * Format\n .deltaT], 1, dim=-2)\n', (11542, 11705), False, 'import torch\n'), ((11728, 11901), 'torch.split', 'torch.split', (['out_latent[0][(database_validation.sequenceLengthLong - 2) * Format.deltaT:\n (database_validation.sequenceLengthLong - 1) * Format.deltaT + 1]', '(1)'], {'dim': '(-2)'}), '(out_latent[0][(database_validation.sequenceLengthLong - 2) *\n Format.deltaT:(database_validation.sequenceLengthLong - 1) * Format.\n deltaT + 1], 1, dim=-2)\n', (11739, 11901), False, 'import torch\n'), ((11904, 12016), 'HighFrequency.Vizualise.plotState', 'plotState', (['episode_start', 'episode_end', 'frames_true', 'frames_predicted', 'losses_validation', 'database_validation'], {}), '(episode_start, episode_end, frames_true, frames_predicted,\n losses_validation, database_validation)\n', (11913, 12016), False, 'from HighFrequency.Vizualise import plotState\n'), ((3256, 3279), 'numpy.cos', 'np.cos', (['(np.pi * ep / 10)'], {}), '(np.pi * ep / 10)\n', (3262, 3279), True, 'import numpy as np\n')] |
"""
Compute the entropy in bits of a list of probabilities.
"""
import numpy as np
def entropy(ps):
"""
Compute the entropy in bits of a list of probabilities.
The input list of probabilities must sum to one and no
element should be larger than 1 or less than 0.
:param list ps: list of probabilities
:type ps: list
"""
if any([(p < 0.0) or (p > 1.0) for p in ps]):
raise ValueError("At least one input is out of range [0...1]")
else:
pass
if not np.isclose(1, np.sum(ps), atol=1e-08):
raise ValueError("The list of input probabilities does not sum to 1")
else:
pass
items = ps * np.log2(ps)
new_items = []
for item in items:
if np.isnan(item):
new_items.append(0)
else:
new_items.append(item)
return np.abs(-np.sum(new_items))
| [
"numpy.sum",
"numpy.log2",
"numpy.isnan"
] | [((665, 676), 'numpy.log2', 'np.log2', (['ps'], {}), '(ps)\n', (672, 676), True, 'import numpy as np\n'), ((730, 744), 'numpy.isnan', 'np.isnan', (['item'], {}), '(item)\n', (738, 744), True, 'import numpy as np\n'), ((522, 532), 'numpy.sum', 'np.sum', (['ps'], {}), '(ps)\n', (528, 532), True, 'import numpy as np\n'), ((846, 863), 'numpy.sum', 'np.sum', (['new_items'], {}), '(new_items)\n', (852, 863), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 15:29:41 2021.
@author: pielsticker
"""
import numpy as np
import h5py
from sklearn.utils import shuffle
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from .utils import ClassDistribution, SpectraPlot
#%%
class DataHandler:
"""Class for data treatment during an experiment in tensorflow."""
def __init__(self, intensity_only=True):
"""
Initialize intenstiy_only parameter.
Parameters
----------
intensity_only : boolean, optional
If True, then only the intensity scale is loaded into X.
If False, the intensity and the BE scale is loaded into X.
The default is True.
Returns
-------
None.
"""
self.intensity_only = intensity_only
def load_data_preprocess(
self,
input_filepath,
no_of_examples,
train_test_split,
train_val_split,
):
"""
Load the data from an HDF5 file and preprocess it.
Parameters
----------
input_filepath : str
Filepath of the .
no_of_examples : int
Number of samples from the input file.
train_test_split : float
Split percentage between train+val and test set.
Typically ~ 0.2.
train_val_split : float
Split percentage between train and val set.
Typically ~ 0.2.
Returns
-------
self.X_train : ndarray
Training features. Shape: 3d Numpy array.
self.X_val : ndarray
Validation features.
self.X_test : ndarray
Test features.
self.y_train : ndarray. Shape: 2d Numpy array.
Training labels.
self.y_val : ndarray
Validation labels.
self.y_test : ndarray
Test labels.
Optionally, the method can also return more information about
the data set:
self.sim_values_train,
self.sim_values_val,
self.sim_values_test : dicts
Dictionary containing information about the parameters
used during the artificical constructuon of the dataset.
Keys : 'shiftx', 'noise', 'FWHM',
'scatterer', 'distance', 'pressure'
Split in the same way as the features and labels.
self.names_train,
self.names_val,
self.names_test : ndarrays
Arrays of the spectra names associated with each X,y
pairing in the data set. Typical for measured spectra.
Split in the same way as the features and labels.
"""
self.input_filepath = input_filepath
self.train_test_split = train_test_split
self.train_val_split = train_val_split
self.no_of_examples = no_of_examples
with h5py.File(input_filepath, "r") as hf:
try:
self.energies = hf["energies"][:]
except KeyError:
self.energies = np.flip(np.arange(694, 750.05, 0.05))
print(
"The data set did have not an energy scale. "
+ "Default (Fe) was assumed."
)
try:
try:
self.labels = [
label.decode("utf-8") for label in hf["labels"][:]
]
except AttributeError:
self.labels = [str(label) for label in hf["labels"][:]]
self.num_classes = len(self.labels)
except KeyError:
print(
"The data set did not contain any labels. "
+ "The label list is empty."
)
dataset_size = hf["X"].shape[0]
# Randomly choose a subset of the whole data set.
try:
r = np.random.randint(0, dataset_size - self.no_of_examples)
except ValueError as e:
error_msg = (
"There are not enough spectra in this data set. "
+ "Please choose a value of less than {0} ".format(
dataset_size - 1
)
+ "for no_of_examples."
)
raise type(e)(error_msg)
X = hf["X"][r : r + self.no_of_examples, :, :]
X = X.astype(np.float)
y = hf["y"][r : r + self.no_of_examples, :]
if not self.intensity_only:
new_energies = np.tile(
np.reshape(np.array(self.energies), (-1, 1)),
(X.shape[0], 1, 1),
)
X = np.dstack((new_energies, X))
# Check if the data set was artificially created.
if "shiftx" in hf.keys():
shift_x = hf["shiftx"][r : r + self.no_of_examples, :]
noise = hf["noise"][r : r + self.no_of_examples, :]
fwhm = hf["FWHM"][r : r + self.no_of_examples, :]
if "scatterer" in hf.keys():
# If the data set was artificially created, check
# if scattering in a gas phase was simulated.
scatterer = hf["scatterer"][
r : r + self.no_of_examples, :
]
distance = hf["distance"][r : r + self.no_of_examples, :]
pressure = hf["pressure"][r : r + self.no_of_examples, :]
(
self.X,
self.y,
shift_x,
noise,
fwhm,
scatterer,
distance,
pressure,
) = shuffle(
X,
y,
shift_x,
noise,
fwhm,
scatterer,
distance,
pressure,
)
# Store all parameters of the simulations in a dict
sim_values = {
"shift_x": shift_x,
"noise": noise,
"fwhm": fwhm,
"scatterer": scatterer,
"distance": distance,
"pressure": pressure,
}
self.sim_values = sim_values
else:
# Shuffle all arrays together
self.X, self.y, shift_x, noise, fwhm = shuffle(
X, y, shift_x, noise, fwhm
)
sim_values = {
"shift_x": shift_x,
"noise": noise,
"fwhm": fwhm,
}
self.sim_values = sim_values
# Split into train, val and test sets
(
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.sim_values_train,
self.sim_values_val,
self.sim_values_test,
) = self._split_test_val_train(
self.X, self.y, sim_values=self.sim_values
)
# Determine the shape of the training features,
# needed for model building in Keras.
self.input_shape = self.X_train.shape[1:]
loaded_data = (
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.sim_values_train,
self.sim_values_val,
self.sim_values_test,
)
# Check if the spectra have associated names. Typical for
# measured spectra.
elif "names" in hf.keys():
names_load_list = [
name[0].decode("utf-8")
for name in hf["names"][r : r + self.no_of_examples, :]
]
names = np.reshape(np.array(names_load_list), (-1, 1))
# Shuffle all arrays together
self.X, self.y, self.names = shuffle(X, y, names)
# Split into train, val and test sets
(
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.names_train,
self.names_val,
self.names_test,
) = self._split_test_val_train(
self.X, self.y, names=self.names
)
# Determine the shape of the training features,
# needed for model building in Keras.
self.input_shape = (
self.X_train.shape[1],
self.X_train.shape[2],
)
loaded_data = (
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.names_train,
self.names_val,
self.names_test,
)
# If there are neither simulation values nor names in
# the dataset, just load the X and y arrays.
else:
# Shuffle X and y together
self.X, self.y = shuffle(X, y)
# Split into train, val and test sets
(
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
) = self._split_test_val_train(self.X, self.y)
# Determine the shape of the training features,
# needed for model building in Keras.
self.input_shape = (
self.X_train.shape[1],
self.X_train.shape[2],
)
loaded_data = (
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
)
print("Data was loaded!")
print("Total no. of samples: " + str(self.X.shape[0]))
print("No. of training samples: " + str(self.X_train.shape[0]))
print("No. of validation samples: " + str(self.X_val.shape[0]))
print("No. of test samples: " + str(self.X_test.shape[0]))
print(
"Shape of each sample : "
+ str(self.X_train.shape[1])
+ " features (X)"
+ " + "
+ str(self.y_train.shape[1])
+ " labels (y)"
)
return loaded_data
def _split_test_val_train(self, X, y, **kwargs):
"""
Split multiple numpy arrays into train, val, and test sets.
First, the whole data is split into the train+val and test sets
according to the attribute self.train_test_split. Secondly.
the train+val sets are further split into train and val sets
according to the attribute self.train_val_split.
Parameters
----------
X : ndarray
Features used as inputs for a Keras model. 3d array.
y : TYPE
Labels to be learned. 2d array.
**kwargs : str
Possible keywords:
'sim_values', 'names'.
Returns
-------
For each input array, three output arras are returned.
E.g. for input X, the returns are X_train, X_val, X_test.
"""
# First split into train+val and test sets
no_of_train_val = int((1 - self.train_test_split) * X.shape[0])
X_train_val = X[:no_of_train_val, :, :]
X_test = X[no_of_train_val:, :, :]
y_train_val = y[:no_of_train_val, :]
y_test = y[no_of_train_val:, :]
# Then create val subset from train set
no_of_train = int((1 - self.train_val_split) * X_train_val.shape[0])
X_train = X_train_val[:no_of_train, :, :]
X_val = X_train_val[no_of_train:, :, :]
y_train = y_train_val[:no_of_train, :]
y_val = y_train_val[no_of_train:, :]
if "sim_values" in kwargs.keys():
# Also split the arrays in the 'sim_values' dictionary.
sim_values = kwargs["sim_values"]
shift_x = sim_values["shift_x"]
noise = sim_values["noise"]
fwhm = sim_values["fwhm"]
shift_x_train_val = shift_x[:no_of_train_val, :]
shift_x_test = shift_x[no_of_train_val:, :]
noise_train_val = noise[:no_of_train_val, :]
noise_test = noise[no_of_train_val:, :]
fwhm_train_val = fwhm[:no_of_train_val, :]
fwhm_test = fwhm[no_of_train_val:, :]
shift_x_train = shift_x_train_val[:no_of_train, :]
shift_x_val = shift_x_train_val[no_of_train:, :]
noise_train = noise_train_val[:no_of_train, :]
noise_val = noise_train_val[no_of_train:, :]
fwhm_train = fwhm_train_val[:no_of_train, :]
fwhm_val = fwhm_train_val[no_of_train:, :]
sim_values_train = {
"shift_x": shift_x_train,
"noise": noise_train,
"fwhm": fwhm_train,
}
sim_values_val = {
"shift_x": shift_x_val,
"noise": noise_val,
"fwhm": fwhm_val,
}
sim_values_test = {
"shift_x": shift_x_test,
"noise": noise_test,
"fwhm": fwhm_test,
}
if "scatterer" in sim_values.keys():
# Also split the scatterer, distance, and pressure
# arrays if they are present in the in the 'sim_values'
# dictionary.
scatterer = sim_values["scatterer"]
distance = sim_values["distance"]
pressure = sim_values["pressure"]
scatterer_train_val = scatterer[:no_of_train_val, :]
scatterer_test = scatterer[no_of_train_val:, :]
distance_train_val = distance[:no_of_train_val, :]
distance_test = distance[no_of_train_val:, :]
pressure_train_val = pressure[:no_of_train_val, :]
pressure_test = pressure[no_of_train_val:, :]
scatterer_train = scatterer_train_val[:no_of_train, :]
scatterer_val = scatterer_train_val[no_of_train:, :]
distance_train = distance_train_val[:no_of_train, :]
distance_val = distance_train_val[no_of_train:, :]
pressure_train = pressure_train_val[:no_of_train, :]
pressure_val = pressure_train_val[no_of_train:, :]
sim_values_train["scatterer"] = scatterer_train
sim_values_train["distance"] = distance_train
sim_values_train["pressure"] = pressure_train
sim_values_val["scatterer"] = scatterer_val
sim_values_val["distance"] = distance_val
sim_values_val["pressure"] = pressure_val
sim_values_test["scatterer"] = scatterer_test
sim_values_test["distance"] = distance_test
sim_values_test["pressure"] = pressure_test
return (
X_train,
X_val,
X_test,
y_train,
y_val,
y_test,
sim_values_train,
sim_values_val,
sim_values_test,
)
if "names" in kwargs.keys():
# Also split the names array.
names = kwargs["names"]
names_train_val = names[:no_of_train_val, :]
names_test = names[no_of_train_val:, :]
names_train = names_train_val[:no_of_train, :]
names_val = names_train_val[no_of_train:, :]
return (
X_train,
X_val,
X_test,
y_train,
y_val,
y_test,
names_train,
names_val,
names_test,
)
return X_train, X_val, X_test, y_train, y_val, y_test
def _only_keep_classification_data(self):
"""
Keep only data with just one species, i.e. with one label of 1.0
and the rest of the labels = 0.0.
"""
indices = np.where(self.y == 0.0)[0]
self.X, self.y = self.X[indices], self.y[indices]
if hasattr(self, "sim_values"):
new_sim_values = {}
for key, sim_arrays in self.sim_values.items():
new_sim_values[key] = sim_arrays[indices]
self.sim_values = new_sim_values
(
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.sim_values_train,
self.sim_values_val,
self.sim_values_test,
) = self._split_test_val_train(
self.X, self.y, sim_values=self.sim_values
)
loaded_data = (
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.sim_values_train,
self.sim_values_val,
self.sim_values_test,
)
elif hasattr(self, "names"):
print("Hi")
self.names = [self.names[i] for i in indices]
(
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.names_train,
self.names_val,
self.names_test,
) = self._split_test_val_train(self.X, self.y, names=self.names)
loaded_data = (
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
self.names_train,
self.names_val,
self.names_test,
)
else:
(
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
) = self._split_test_val_train(self.X, self.y)
loaded_data = (
self.X_train,
self.X_val,
self.X_test,
self.y_train,
self.y_val,
self.y_test,
)
print(
f"Only spectra with one species were left in the data set! Test/val/train splits were kept."
)
print(f"Remaining no. of training examples: {self.y_train.shape[0]}")
print(f"Remaining no. of val examples: {self.y_val.shape[0]}")
print(f"Remaining no. of test examples: {self.y_test.shape[0]}")
return loaded_data
def check_class_distribution(self, task):
"""
Generate a Class Distribution object based on a given task.
Parameters
----------
task : str
If task == 'regression', an average distribution is
calculated.
If task == 'classification' or 'multi_class_detection',
the distribution of the labels across the different data
sets is calculated.
Returns
-------
dict
Dictionary containing the class distribution.
"""
data_list = [self.y, self.y_train, self.y_val, self.y_test]
self.class_distribution = ClassDistribution(task, data_list)
return self.class_distribution.cd
def calculate_losses(self, loss_func):
"""
Calculate losses for train and test data.
Parameters
----------
loss_func : keras.losses.Loss
A keras loss function.
Returns
-------
None.
"""
print("Calculating loss for each example...")
self.losses_train = [
loss_func(self.y_train[i], self.pred_train[i]).numpy()
for i in range(self.y_train.shape[0])
]
self.losses_test = [
loss_func(self.y_test[i], self.pred_test[i]).numpy()
for i in range(self.y_test.shape[0])
]
print("Done!")
def plot_spectra(self, no_of_spectra, dataset, indices, with_prediction):
"""
Generate spectra plot for a given data set.
Parameters
----------
no_of_spectra : int
No. of plots to create.
dataset : str
Either 'train', 'val', or 'test'.
The default is 'train'.
indices: list
List
with_prediction : bool, optional
If True, information about the predicted values are also
shown in the plot.
The default is False.
Returns
-------
None.
"""
data = []
texts = []
X, y = self._select_dataset(dataset)
for i in range(no_of_spectra):
index = indices[i]
if self.intensity_only:
new_energies = np.reshape(np.array(self.energies), (-1, 1))
data.append(np.hstack((new_energies, X[index])))
else:
data.append(X[index])
text = self.write_text_for_spectrum(
dataset=dataset, index=index, with_prediction=with_prediction
)
texts.append(text)
data = np.array(data)
graphic = SpectraPlot(data=data, annots=texts)
fig, axs = graphic.plot()
def plot_random(self, no_of_spectra, dataset, with_prediction):
"""
Plot random XPS spectra out of one of the data set.
The labels and additional information are shown as texts on the
plots.
Parameters
----------
no_of_spectra : int
No. of plots to create.
dataset : str
Either 'train', 'val', or 'test'.
The default is 'train'.
with_prediction : bool, optional
If True, information about the predicted values are also
shown in the plot.
The default is False.
Returns
-------
None.
"""
X, y = self._select_dataset(dataset)
indices = []
for i in range(no_of_spectra):
r = np.random.randint(0, X.shape[0])
indices.append(r)
self.plot_spectra(
no_of_spectra=no_of_spectra,
dataset=dataset,
indices=indices,
with_prediction=with_prediction,
)
def show_worst_predictions(
self, no_of_spectra, kind="all", threshold=0.0
):
"""
Plot the spectra with the highest losses.
Accepts a threshold parameter. If a threshold other than 0 is
given, the spectra with losses right above this threshold are
plotted.
Parameters
----------
no_of_spectra : int
No. of spectra to plot.
kind : str, optional
Choice of sub set in test data.
'all': all test data.
'single': only test data with single species.
'linear_comb': only test data with linear combination
of species.
The default is 'all'.
threshold : float
Threshold value for loss.
Returns
-------
None.
"""
X, y = self._select_dataset("test")
pred, losses = self._get_predictions("test")
if kind == "all":
indices = [
j[1]
for j in sorted(
[
(x, i)
for (i, x) in enumerate(losses)
if x >= threshold
],
reverse=True,
)
]
len_all = y.shape[0]
print_statement = ""
elif kind == "single":
indices = [
j[1]
for j in sorted(
[
(x, i)
for (i, x) in enumerate(losses)
if (
len(np.where(y[i] == 0.0)[0]) == 3
and x >= threshold
)
],
reverse=True,
)
]
len_all = len(
[
i
for (i, x) in enumerate(losses)
if (len(np.where(y[i] == 0.0)[0]) == 3)
]
)
print_statement = "with a single species "
elif kind == "linear_comb":
indices = [
j[1]
for j in sorted(
[
(x, i)
for (i, x) in enumerate(losses)
if (
len(np.where(y[i] == 0.0)[0]) != 3
and x >= threshold
)
],
reverse=True,
)
]
len_all = len(
[
i
for (i, x) in enumerate(losses)
if (len(np.where(y[i] == 0.0)[0]) != 3)
]
)
print_statement = "with multiple species "
if threshold > 0.0:
print(
"{0} of {1} test samples ({2}%) {3}have a mean ".format(
str(len(indices)),
str(len_all),
str(
100 * (np.around(len(indices) / len_all, decimals=3))
),
print_statement,
)
+ "absolute error of of at least {0}.".format(str(threshold))
)
indices = indices[-no_of_spectra:]
else:
indices = indices[:no_of_spectra]
self.plot_spectra(
no_of_spectra=no_of_spectra,
dataset="test",
indices=indices,
with_prediction=True,
)
def show_wrong_classification(self):
"""
Plot all spectra in the test data with a wrong prediction.
Only works for classification.
Returns
-------
None.
"""
data = []
texts = []
wrong_pred_args = []
for i in range(self.pred_test.shape[0]):
argmax_class_true = np.argmax(self.y_test[i, :], axis=0)
argmax_class_pred = np.argmax(self.pred_test[i, :], axis=0)
if argmax_class_true != argmax_class_pred:
wrong_pred_args.append(i)
no_of_wrong_pred = len(wrong_pred_args)
print(
"No. of wrong predictions on the test data: "
+ str(no_of_wrong_pred)
)
if no_of_wrong_pred > 0:
for i in range(no_of_wrong_pred):
index = wrong_pred_args[i]
real_y = "Real: " + str(self.y_test[index]) + "\n"
# Round prediction and sum to 1
tmp_array = np.around(self.pred_test[index], decimals=4)
#row_sums = tmp_array.sum()
#tmp_array = tmp_array / row_sums
#tmp_array = np.around(tmp_array, decimals=2)
pred_y = "Prediction: " + str(tmp_array) + "\n"
pred_label = (
"Predicted label: "
+ str(self.pred_test_classes[index, 0])
+ "\n"
)
labels = self.y_test[index]
for j, value in enumerate(labels):
if value == 1:
label = str(self.labels[j])
label = "Real label: " + label + "\n"
text = real_y + pred_y + label + pred_label
try:
sim = self._write_sim_text(dataset="test", index=index)
text += sim
except AttributeError:
pass
try:
name = self._write_measured_text(
dataset="test", index=index
)
text += name
except AttributeError:
pass
texts.append(text)
data = np.array(data)
graphic = SpectraPlot(data=data, annots=texts)
fig, axs = graphic.plot()
def plot_prob_predictions(
self, prob_preds, dataset="test", no_of_spectra=10
):
X, y = self._select_dataset(dataset_name="test")
if no_of_spectra > y.shape[0]:
print("Provide no. of spectra was bigger than dataset size.")
no_of_spectra = y.shape[0]
fig, axs = plt.subplots(
nrows=no_of_spectra, ncols=5, figsize=(22, 5 * no_of_spectra)
)
max_y = np.max([np.float(np.max(y)), np.max(prob_preds)])
random_numbers = []
for i in range(no_of_spectra):
ax0 = axs[i, 0]
ax1 = axs[i, 1]
ax2 = axs[i, 2]
ax3 = axs[i, 3]
ax4 = axs[i, 4]
r = np.random.randint(0, X.shape[0])
while r in random_numbers:
r = np.random.randint(0, X.shape[0])
random_numbers.append(r)
if len(X.shape) == 4:
ax0.imshow(X[r, :, :, 0], cmap="gist_gray")
elif len(X.shape) == 3:
ax0.plot(self.energies, self.X[r])
ax0.invert_xaxis()
ax0.set_xlim(np.max(self.energies), np.min(self.energies))
ax0.set_xlabel("Binding energy (eV)")
ax0.set_ylabel("Intensity (arb. units)")
annot = self.write_text_for_spectrum(
dataset="test", index=r, with_prediction=False,
)
ax0.set_title("Spectrum no. {}".format(r))
ax0.text(
0.025,
0.4,
annot,
horizontalalignment="left",
verticalalignment="top",
transform=ax0.transAxes,
fontsize=12,
)
sns.barplot(x=np.arange(self.num_classes), y=y[r], ax=ax1)
# ax1.set_ylim([0, np.max(y)])
ax1.set_title("Ground Truth")
colors = iter(mcolors.CSS4_COLORS.keys())
for pred in prob_preds[r, :20]:
sns.barplot(
x=np.arange(self.num_classes),
y=pred,
color=next(colors),
alpha=0.2,
ax=ax2,
)
# ax2.set_ylim([0, max_y])
ax2.set_title("Posterior Samples")
for j, row in enumerate(prob_preds[r, :, :].transpose()):
_ = ax3.hist(
row,
bins=25,
# range=(-2.,2.),
orientation="horizontal",
fill=True,
linewidth=1,
label=self.labels[j],
)
ax3.legend()
ax3.set_xscale("log")
ax3.set_xlabel("Prediction")
ax3.set_ylabel("Counts")
ax3.set_title("Prediction Histogram")
ax4.bar(
np.arange(self.num_classes),
np.mean(prob_preds[r, :, :], axis=0),
yerr=np.std(prob_preds[r, :, :], axis=0),
align="center",
ecolor="black",
capsize=10,
)
ax4.set_title("Predictive Probabilities")
for ax in [ax1, ax2, ax4]:
ax.set_xticks(np.arange(self.num_classes))
ax.set_xticklabels(self.labels)
fig.tight_layout()
return fig
def _select_dataset(self, dataset_name):
"""
Select a data set (for plotting).
Parameters
----------
name : str
Name of the data set. Options: 'train', 'val', 'test'.
Returns
-------
TYPE
DESCRIPTION.
"""
if dataset_name == "train":
X = self.X_train
y = self.y_train
elif dataset_name == "val":
X = self.X_val
y = self.y_val
elif dataset_name == "test":
X = self.X_test
y = self.y_test
return X, y
def _get_predictions(self, dataset_name):
"""
Get the predictions and losses for one data set.
Used for writing the annotations during plotting.
Parameters
----------
name : str
Name of the data set. Options: 'train', 'val', 'test'.
Returns
-------
TYPE
DESCRIPTION.
"""
if dataset_name == "train":
pred = self.pred_train
losses = self.losses_train
elif dataset_name == "test":
pred = self.pred_test
losses = self.losses_test
return pred, losses
def write_text_for_spectrum(self, dataset, index, with_prediction=True):
"""
Create the annotation for a plot of one spectrum.
Parameters
----------
dataset : TYPE
DESCRIPTION.
index : TYPE
DESCRIPTION.
with_prediction : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
text : TYPE
DESCRIPTION.
"""
X, y = self._select_dataset(dataset)
label = str(np.around(y[index], decimals=3))
text = "Real: " + label + "\n"
if with_prediction:
pred, losses = self._get_predictions(dataset)
# Round prediction and sum to 1
tmp_array = np.around(pred[index], decimals=4)
#row_sums = tmp_array.sum()
#tmp_array = tmp_array / row_sums
#tmp_array = np.around(tmp_array, decimals=3)
pred_text = "Prediction: " + str(list(tmp_array)) + "\n"
text += pred_text
try:
text += self._write_sim_text(dataset=dataset, index=index)
except AttributeError:
pass
try:
text += self._write_measured_text(dataset=dataset, index=index)
except AttributeError:
pass
if with_prediction:
loss_text = (
"\n" + "Loss: " + str(np.around(losses[index], decimals=3))
)
text += loss_text
return text
def _write_sim_text(self, dataset, index):
"""
Generate a string containing the simulation parameters.
Parameters
----------
dataset : str
Either 'train', 'val', or 'test.
Needed for taking the correct sim_values.
index : int
Index of the example for which the text shall be created.
Returns
-------
sim_text : str
Output text in a figure.
"""
if dataset == "train":
shift_x = self.sim_values_train["shift_x"][index]
noise = self.sim_values_train["noise"][index]
fwhm = self.sim_values_train["fwhm"][index]
elif dataset == "val":
shift_x = self.sim_values_val["shift_x"][index]
noise = self.sim_values_val["noise"][index]
fwhm = self.sim_values_val["fwhm"][index]
elif dataset == "test":
shift_x = self.sim_values_test["shift_x"][index]
noise = self.sim_values_test["noise"][index]
fwhm = self.sim_values_test["fwhm"][index]
if fwhm is not None and fwhm != 0:
fwhm_text = (
"FHWM: " + str(np.round(float(fwhm), decimals=2)) + ", "
)
else:
fwhm_text = "FHWM: not changed" + ", "
if shift_x is not None and shift_x != 0:
shift_text = " Shift: " + "{:.2f}".format(float(shift_x)) + ", "
else:
shift_text = " Shift: none" + ", "
if noise is not None and noise != 0:
noise_text = "S/N: " + "{:.1f}".format(float(noise))
else:
noise_text = "S/N: not changed"
sim_text = fwhm_text + shift_text + noise_text + "\n"
if "scatterer" in self.sim_values.keys():
sim_text += self._write_scatter_text(dataset, index)
else:
sim_text += "Scattering: none."
return sim_text
def _write_scatter_text(self, dataset, index):
"""
Generate a string containing the scattering parameters.
Parameters
----------
dataset : str
Either 'train', 'val', or 'test.
Needed for taking the correct sim_values.
index : int
Index of the example for which the text shall be created.
Returns
-------
scatter_text : str
Output text in a figure.
"""
if dataset == "train":
scatterer = self.sim_values_train["scatterer"][index]
distance = self.sim_values_train["distance"][index]
pressure = self.sim_values_train["pressure"][index]
elif dataset == "val":
scatterer = self.sim_values_val["scatterer"][index]
distance = self.sim_values_val["distance"][index]
pressure = self.sim_values_val["pressure"][index]
elif dataset == "test":
scatterer = self.sim_values_test["scatterer"][index]
distance = self.sim_values_test["distance"][index]
pressure = self.sim_values_test["pressure"][index]
scatterers = {"0": "He", "1": "H2", "2": "N2", "3": "O2"}
try:
scatterer_name = scatterers[str(scatterer[0])]
except KeyError:
return "Scattering: none."
name_text = "Scatterer: " + scatterer_name + ", "
pressure_text = "{:.1f}".format(float(pressure)) + " mbar, "
distance_text = "d = " + "{:.1f}".format(float(distance)) + " mm"
scatter_text = name_text + pressure_text + distance_text + "\n"
return scatter_text
def _write_measured_text(self, dataset, index):
"""
Generate information about the measured spectra.
Parameters
----------
dataset : str
Either 'train', 'val', or 'test.
Needed for taking the correct names.
index : int
Index of the example for which the text shall be created.
Returns
-------
measured_text : str
Output text in a figure.
"""
if dataset == "train":
name = self.names_train[index][0]
elif dataset == "val":
name = self.names_val[index][0]
elif dataset == "test":
name = self.names_test[index][0]
measured_text = "Spectrum no. " + str(index) + "\n" + str(name) + "\n"
return measured_text
#%%
if __name__ == "__main__":
np.random.seed(502)
input_filepath = r"C:\Users\pielsticker\Simulations\20210520_Fe_linear_combination_small_gas_phase\20210520_Fe_linear_combination_small_gas_phase.h5"
datahandler = DataHandler(intensity_only=True)
train_test_split = 0.2
train_val_split = 0.2
no_of_examples = 1000
(
X_train,
X_val,
X_test,
y_train,
y_val,
y_test,
sim_values_train,
sim_values_val,
sim_values_test,
) = datahandler.load_data_preprocess(
input_filepath=input_filepath,
no_of_examples=no_of_examples,
train_test_split=train_test_split,
train_val_split=train_val_split,
)
print("Input shape: " + str(datahandler.input_shape))
print("Labels: " + str(datahandler.labels))
print("No. of classes: " + str(datahandler.num_classes))
datahandler.plot_random(
no_of_spectra=20, dataset="train", with_prediction=False
)
| [
"numpy.dstack",
"numpy.mean",
"numpy.hstack",
"numpy.where",
"sklearn.utils.shuffle",
"numpy.argmax",
"numpy.min",
"h5py.File",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"matplotlib.colors.CSS4_COLORS.keys",
"numpy.random.seed",
"numpy.around",
"numpy.std",
"matplotlib.pyplot... | [((40394, 40413), 'numpy.random.seed', 'np.random.seed', (['(502)'], {}), '(502)\n', (40408, 40413), True, 'import numpy as np\n'), ((22610, 22624), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (22618, 22624), True, 'import numpy as np\n'), ((30068, 30143), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'no_of_spectra', 'ncols': '(5)', 'figsize': '(22, 5 * no_of_spectra)'}), '(nrows=no_of_spectra, ncols=5, figsize=(22, 5 * no_of_spectra))\n', (30080, 30143), True, 'import matplotlib.pyplot as plt\n'), ((3018, 3048), 'h5py.File', 'h5py.File', (['input_filepath', '"""r"""'], {}), "(input_filepath, 'r')\n", (3027, 3048), False, 'import h5py\n'), ((17254, 17277), 'numpy.where', 'np.where', (['(self.y == 0.0)'], {}), '(self.y == 0.0)\n', (17262, 17277), True, 'import numpy as np\n'), ((23526, 23558), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]'], {}), '(0, X.shape[0])\n', (23543, 23558), True, 'import numpy as np\n'), ((27753, 27789), 'numpy.argmax', 'np.argmax', (['self.y_test[i, :]'], {'axis': '(0)'}), '(self.y_test[i, :], axis=0)\n', (27762, 27789), True, 'import numpy as np\n'), ((27822, 27861), 'numpy.argmax', 'np.argmax', (['self.pred_test[i, :]'], {'axis': '(0)'}), '(self.pred_test[i, :], axis=0)\n', (27831, 27861), True, 'import numpy as np\n'), ((29626, 29640), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (29634, 29640), True, 'import numpy as np\n'), ((30458, 30490), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]'], {}), '(0, X.shape[0])\n', (30475, 30490), True, 'import numpy as np\n'), ((34944, 34975), 'numpy.around', 'np.around', (['y[index]'], {'decimals': '(3)'}), '(y[index], decimals=3)\n', (34953, 34975), True, 'import numpy as np\n'), ((35184, 35218), 'numpy.around', 'np.around', (['pred[index]'], {'decimals': '(4)'}), '(pred[index], decimals=4)\n', (35193, 35218), True, 'import numpy as np\n'), ((4044, 4100), 'numpy.random.randint', 'np.random.randint', (['(0)', '(dataset_size - self.no_of_examples)'], {}), '(0, dataset_size - self.no_of_examples)\n', (4061, 4100), True, 'import numpy as np\n'), ((4851, 4879), 'numpy.dstack', 'np.dstack', (['(new_energies, X)'], {}), '((new_energies, X))\n', (4860, 4879), True, 'import numpy as np\n'), ((28386, 28430), 'numpy.around', 'np.around', (['self.pred_test[index]'], {'decimals': '(4)'}), '(self.pred_test[index], decimals=4)\n', (28395, 28430), True, 'import numpy as np\n'), ((30212, 30230), 'numpy.max', 'np.max', (['prob_preds'], {}), '(prob_preds)\n', (30218, 30230), True, 'import numpy as np\n'), ((30551, 30583), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]'], {}), '(0, X.shape[0])\n', (30568, 30583), True, 'import numpy as np\n'), ((31701, 31727), 'matplotlib.colors.CSS4_COLORS.keys', 'mcolors.CSS4_COLORS.keys', ([], {}), '()\n', (31725, 31727), True, 'import matplotlib.colors as mcolors\n'), ((32673, 32700), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (32682, 32700), True, 'import numpy as np\n'), ((32718, 32754), 'numpy.mean', 'np.mean', (['prob_preds[r, :, :]'], {'axis': '(0)'}), '(prob_preds[r, :, :], axis=0)\n', (32725, 32754), True, 'import numpy as np\n'), ((5958, 6024), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y', 'shift_x', 'noise', 'fwhm', 'scatterer', 'distance', 'pressure'], {}), '(X, y, shift_x, noise, fwhm, scatterer, distance, pressure)\n', (5965, 6024), False, 'from sklearn.utils import shuffle\n'), ((6813, 6848), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y', 'shift_x', 'noise', 'fwhm'], {}), '(X, y, shift_x, noise, fwhm)\n', (6820, 6848), False, 'from sklearn.utils import shuffle\n'), ((8680, 8700), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y', 'names'], {}), '(X, y, names)\n', (8687, 8700), False, 'from sklearn.utils import shuffle\n'), ((10040, 10053), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {}), '(X, y)\n', (10047, 10053), False, 'from sklearn.utils import shuffle\n'), ((22265, 22288), 'numpy.array', 'np.array', (['self.energies'], {}), '(self.energies)\n', (22273, 22288), True, 'import numpy as np\n'), ((22327, 22362), 'numpy.hstack', 'np.hstack', (['(new_energies, X[index])'], {}), '((new_energies, X[index]))\n', (22336, 22362), True, 'import numpy as np\n'), ((30200, 30209), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (30206, 30209), True, 'import numpy as np\n'), ((31544, 31571), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (31553, 31571), True, 'import numpy as np\n'), ((32777, 32812), 'numpy.std', 'np.std', (['prob_preds[r, :, :]'], {'axis': '(0)'}), '(prob_preds[r, :, :], axis=0)\n', (32783, 32812), True, 'import numpy as np\n'), ((33045, 33072), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (33054, 33072), True, 'import numpy as np\n'), ((35825, 35861), 'numpy.around', 'np.around', (['losses[index]'], {'decimals': '(3)'}), '(losses[index], decimals=3)\n', (35834, 35861), True, 'import numpy as np\n'), ((3192, 3220), 'numpy.arange', 'np.arange', (['(694)', '(750.05)', '(0.05)'], {}), '(694, 750.05, 0.05)\n', (3201, 3220), True, 'import numpy as np\n'), ((4738, 4761), 'numpy.array', 'np.array', (['self.energies'], {}), '(self.energies)\n', (4746, 4761), True, 'import numpy as np\n'), ((8552, 8577), 'numpy.array', 'np.array', (['names_load_list'], {}), '(names_load_list)\n', (8560, 8577), True, 'import numpy as np\n'), ((30867, 30888), 'numpy.max', 'np.max', (['self.energies'], {}), '(self.energies)\n', (30873, 30888), True, 'import numpy as np\n'), ((30890, 30911), 'numpy.min', 'np.min', (['self.energies'], {}), '(self.energies)\n', (30896, 30911), True, 'import numpy as np\n'), ((31824, 31851), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (31833, 31851), True, 'import numpy as np\n'), ((25767, 25788), 'numpy.where', 'np.where', (['(y[i] == 0.0)'], {}), '(y[i] == 0.0)\n', (25775, 25788), True, 'import numpy as np\n'), ((26511, 26532), 'numpy.where', 'np.where', (['(y[i] == 0.0)'], {}), '(y[i] == 0.0)\n', (26519, 26532), True, 'import numpy as np\n'), ((25427, 25448), 'numpy.where', 'np.where', (['(y[i] == 0.0)'], {}), '(y[i] == 0.0)\n', (25435, 25448), True, 'import numpy as np\n'), ((26171, 26192), 'numpy.where', 'np.where', (['(y[i] == 0.0)'], {}), '(y[i] == 0.0)\n', (26179, 26192), True, 'import numpy as np\n')] |
import os
import cv2
import queue
import random
import threading
import face_recognition
import numpy as np
from sklearn import svm
import joblib
q = queue.Queue()
# 加载人脸图片并进行编码
def Encode():
print("Start Encoding")
image_path = 'C:\\Users\\Administrator\\Desktop\\face_recognition-master\\examples\\knn_examples\\test\\'
person_list = os.listdir(image_path)
#print(person_list)
for person in person_list:
image_list = os.listdir(image_path)
for image in image_list:
#print(person +' '+ image)
face = face_recognition.load_image_file(image_path + image)
face_locations = face_recognition.face_locations(face)
face_enc = face_recognition.face_encodings(face, face_locations)[0]
np.save(image.split(".")[0], face_enc)
#print(image.split(".")[0])
# 训练SVC
def Train_SVC():
print("Start Training")
encodings = []
names = []
name_dict = {}
# 加载人脸数据库并学习
data_path = "C:\\Users\\Administrator\\Desktop\\face_recognition-master\\examples\\knn_examples\\test\\"
person_list = os.listdir(data_path)
print(person_list)
for i, person in enumerate(person_list):
data_list = os.listdir(data_path)
for data in data_list:
print(i, data)
encodings.append(np.load(data_path + person).tolist())
names.append(int(i))
name_dict[i] = person
clf = svm.SVC(C=20, probability=True)
clf.fit(encodings, names)
joblib.dump(clf, "my_model.m")
f = open('name.txt', 'w')
f.write(str(name_dict))
f.close()
# 线程1获取网络摄像头图像
def Receive():
print("Start Reveive")
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# cap = cv2.VideoCapture("rtsp://admin:a123456789@192.168.127.12/h264/ch1/main/av_stream")
ret, frame = cap.read()
q.put(frame)
while ret:
ret, frame = cap.read()
q.put(frame)
# 线程2进行人脸检测识别并显示
def Display():
print("Start DisPlaying")
clf = joblib.load("my_model.m")
f = open('name.txt', 'r')
name_dict = eval(f.read())
f.close()
face_locations = []
face_names = []
count = 0
threshold = 1/(0.75 * len(name_dict))
while True:
if not q.empty():
count += 1
frame = q.get()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
# 每0.2秒进行一次人脸检测
if count % 5 == 0:
face_locations = face_recognition.face_locations(rgb_small_frame)
# 每0.4秒进行一次人脸识别
if count % 10 == 0:
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
#print(clf.predict[face_encoding])
print(clf.predict_proba([face_encoding])) #
if np.max(clf.predict_proba([face_encoding])) > threshold:
face_names.append(name_dict[int(clf.predict([face_encoding]))])
else:
face_names.append("Unknown")
# 显示人脸定位框及姓名
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
#Encode()
Train_SVC()
p1 = threading.Thread(target=Receive)
p2 = threading.Thread(target=Display)
p1.start()
p2.start()
| [
"cv2.rectangle",
"face_recognition.face_locations",
"os.listdir",
"cv2.resize",
"cv2.imshow",
"cv2.putText",
"cv2.waitKey",
"face_recognition.face_encodings",
"cv2.VideoCapture",
"face_recognition.load_image_file",
"joblib.load",
"threading.Thread",
"queue.Queue",
"numpy.load",
"joblib.d... | [((152, 165), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (163, 165), False, 'import queue\n'), ((352, 374), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (362, 374), False, 'import os\n'), ((1109, 1130), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1119, 1130), False, 'import os\n'), ((1444, 1475), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': '(20)', 'probability': '(True)'}), '(C=20, probability=True)\n', (1451, 1475), False, 'from sklearn import svm\n'), ((1510, 1540), 'joblib.dump', 'joblib.dump', (['clf', '"""my_model.m"""'], {}), "(clf, 'my_model.m')\n", (1521, 1540), False, 'import joblib\n'), ((1683, 1717), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (1699, 1717), False, 'import cv2\n'), ((2001, 2026), 'joblib.load', 'joblib.load', (['"""my_model.m"""'], {}), "('my_model.m')\n", (2012, 2026), False, 'import joblib\n'), ((3876, 3908), 'threading.Thread', 'threading.Thread', ([], {'target': 'Receive'}), '(target=Receive)\n', (3892, 3908), False, 'import threading\n'), ((3918, 3950), 'threading.Thread', 'threading.Thread', ([], {'target': 'Display'}), '(target=Display)\n', (3934, 3950), False, 'import threading\n'), ((451, 473), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (461, 473), False, 'import os\n'), ((1219, 1240), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1229, 1240), False, 'import os\n'), ((567, 619), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['(image_path + image)'], {}), '(image_path + image)\n', (599, 619), False, 'import face_recognition\n'), ((649, 686), 'face_recognition.face_locations', 'face_recognition.face_locations', (['face'], {}), '(face)\n', (680, 686), False, 'import face_recognition\n'), ((2326, 2369), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(frame, (0, 0), fx=0.25, fy=0.25)\n', (2336, 2369), False, 'import cv2\n'), ((3708, 3734), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (3718, 3734), False, 'import cv2\n'), ((710, 763), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['face', 'face_locations'], {}), '(face, face_locations)\n', (741, 763), False, 'import face_recognition\n'), ((2517, 2565), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb_small_frame'], {}), '(rgb_small_frame)\n', (2548, 2565), False, 'import face_recognition\n'), ((2661, 2725), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb_small_frame', 'face_locations'], {}), '(rgb_small_frame, face_locations)\n', (2692, 2725), False, 'import face_recognition\n'), ((3413, 3479), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (3426, 3479), False, 'import cv2\n'), ((3496, 3584), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, bottom - 35)', '(right, bottom)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2\n .FILLED)\n', (3509, 3584), False, 'import cv2\n'), ((3596, 3699), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left + 6, bottom - 6)', 'cv2.FONT_HERSHEY_DUPLEX', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, \n 1.0, (255, 255, 255), 1)\n', (3607, 3699), False, 'import cv2\n'), ((3751, 3765), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3762, 3765), False, 'import cv2\n'), ((1328, 1355), 'numpy.load', 'np.load', (['(data_path + person)'], {}), '(data_path + person)\n', (1335, 1355), True, 'import numpy as np\n')] |
from numpy import array, compress, zeros
import wx
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
from spacq.interface.list_columns import ListParser
"""
Embeddable, generic, virtual, tabular display.
"""
class VirtualListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
"""
A generic virtual list.
"""
max_value_len = 10 # Characters.
@staticmethod
def find_type(value):
"""
Determine the type of a column based on a single value.
The type is one of: scalar, list, string.
"""
try:
float(value)
except ValueError:
pass
else:
return 'scalar'
try:
ListParser()(value)
except ValueError:
pass
else:
return 'list'
return 'string'
def __init__(self, parent, *args, **kwargs):
wx.ListCtrl.__init__(self, parent,
style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES,
*args, **kwargs)
ListCtrlAutoWidthMixin.__init__(self)
self.reset()
def reset(self):
self.headings = []
self.data = array([])
self.filtered_data = None
self.display_data = array([])
self.types = []
def refresh_with_values(self, data):
self.ItemCount = len(data)
if self.ItemCount > 0:
self.display_data = zeros(data.shape, dtype='|S{0}'.format(self.max_value_len))
for i, _ in enumerate(self.headings):
# Truncate for display.
self.display_data[:,i] = [str(x)[:self.max_value_len] for x in data[:,i]]
self.Refresh()
def apply_filter(self, f, afresh=False):
"""
Set the data to be the old data, along with the application of a filter.
f is a function of two parameters: the index of the row and the row itself.
f must return True if the row is to be kept and False otherwise.
If afresh is True, all old filtered data is discarded.
Otherwise, a new filter can be quickly applied.
"""
if afresh:
self.filtered_data = None
if self.filtered_data is not None:
original_set = self.filtered_data
else:
original_set = self.data
self.filtered_data = compress([f(i, x) for i, x in enumerate(original_set)], original_set, axis=0)
self.refresh_with_values(self.filtered_data)
def GetValue(self, types=None):
# Get all types by default.
if types is None:
types = set(self.types)
else:
types = set(types)
# Find column indices of the correct type.
idxs = [i for i, t in enumerate(self.types) if t in types]
if self.filtered_data is not None:
data = self.filtered_data
else:
data = self.data
return ([self.headings[i] for i in idxs], data[:,idxs], [self.types[i] for i in idxs])
def SetValue(self, headings, data):
"""
headings: A list of strings.
data: A 2D NumPy array.
"""
self.ClearAll()
self.reset()
self.headings = headings
self.data = data
self.refresh_with_values(self.data)
if self.ItemCount > 0:
width, height = self.GetSize()
# Give some room for the scrollbar.
col_width = (width - 50) / len(self.headings)
for i, heading in enumerate(self.headings):
self.InsertColumn(i, heading, width=col_width)
type = self.find_type(data[0,i])
self.types.append(type)
def OnGetItemText(self, item, col):
"""
Return cell value for LC_VIRTUAL.
"""
return self.display_data[item,col]
class TabularDisplayPanel(wx.Panel):
"""
A panel to display arbitrary tabular data.
"""
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Table.
self.table = VirtualListCtrl(self)
panel_box.Add(self.table, proportion=1, flag=wx.EXPAND)
self.SetSizer(panel_box)
def __len__(self):
return self.table.ItemCount
# TODO: has headers does not function as intended, never will reach code to give header names
def from_csv_data(self, has_header, values):
"""
Import the given CSV data into the table.
If has_header is True, the first row is treated specially.
"""
if has_header:
headers, rows = values[0], array(values[1:])
else:
headers, rows = [''] * len(values[0]), array(values)
# Ensure that all columns have a header.
for i, header in enumerate(headers):
if not header:
headers[i] = 'Column {0}'.format(i + 1)
self.SetValue(headers, rows)
def GetValue(self, *args, **kwargs):
return self.table.GetValue(*args, **kwargs)
def SetValue(self, headings, values):
self.table.SetValue(headings, values)
class TabularDisplayFrame(wx.Frame):
def __init__(self, parent, *args, **kwargs):
wx.Frame.__init__(self, parent, *args, **kwargs)
# Frame.
frame_box = wx.BoxSizer(wx.VERTICAL)
## Display panel.
self.display_panel = TabularDisplayPanel(self)
frame_box.Add(self.display_panel, proportion=1, flag=wx.EXPAND)
self.SetSizer(frame_box)
| [
"wx.BoxSizer",
"wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin.__init__",
"spacq.interface.list_columns.ListParser",
"numpy.array",
"wx.ListCtrl.__init__",
"wx.Frame.__init__",
"wx.Panel.__init__"
] | [((738, 860), 'wx.ListCtrl.__init__', 'wx.ListCtrl.__init__', (['self', 'parent', '*args'], {'style': '(wx.LC_REPORT | wx.LC_VIRTUAL | wx.LC_HRULES | wx.LC_VRULES)'}), '(self, parent, *args, style=wx.LC_REPORT | wx.\n LC_VIRTUAL | wx.LC_HRULES | wx.LC_VRULES, **kwargs)\n', (758, 860), False, 'import wx\n'), ((861, 898), 'wx.lib.mixins.listctrl.ListCtrlAutoWidthMixin.__init__', 'ListCtrlAutoWidthMixin.__init__', (['self'], {}), '(self)\n', (892, 898), False, 'from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin\n'), ((969, 978), 'numpy.array', 'array', (['[]'], {}), '([])\n', (974, 978), False, 'from numpy import array, compress, zeros\n'), ((1029, 1038), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1034, 1038), False, 'from numpy import array, compress, zeros\n'), ((3328, 3376), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self', 'parent', '*args'], {}), '(self, parent, *args, **kwargs)\n', (3345, 3376), False, 'import wx\n'), ((3403, 3427), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (3414, 3427), False, 'import wx\n'), ((4434, 4482), 'wx.Frame.__init__', 'wx.Frame.__init__', (['self', 'parent', '*args'], {}), '(self, parent, *args, **kwargs)\n', (4451, 4482), False, 'import wx\n'), ((4509, 4533), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (4520, 4533), False, 'import wx\n'), ((596, 608), 'spacq.interface.list_columns.ListParser', 'ListParser', ([], {}), '()\n', (606, 608), False, 'from spacq.interface.list_columns import ListParser\n'), ((3923, 3940), 'numpy.array', 'array', (['values[1:]'], {}), '(values[1:])\n', (3928, 3940), False, 'from numpy import array, compress, zeros\n'), ((3991, 4004), 'numpy.array', 'array', (['values'], {}), '(values)\n', (3996, 4004), False, 'from numpy import array, compress, zeros\n')] |
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch as th
from gym import spaces
from stable_baselines3.common.buffers import BaseBuffer
from stable_baselines3.common.preprocessing import get_obs_shape
from stable_baselines3.common.type_aliases import EpisodicRolloutBufferSamples, ReplayBufferSamples, RolloutBufferSamples
from stable_baselines3.common.vec_env import VecNormalize
class EpisodicBuffer(BaseBuffer):
"""
Episodic buffer used in on-policy PG algorithms like REINFORCE
It corresponds to episodes collected using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param observation_space: Observation space
:param action_space: Action space
:param device: cpu or gpu
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
:param n_steps: N of N-step return
:param nb_rollouts: Number of rollouts to fill the buffer
:param max_episode_steps: Maximum length of an episode
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
n_steps: int = 5,
beta: float = 1.0,
nb_rollouts: int = 1,
max_episode_steps: int = 1,
verbose=False,
):
if verbose:
print("nb rollouts:", nb_rollouts)
print("max episode length:", max_episode_steps)
buffer_size = nb_rollouts * max_episode_steps
super(EpisodicBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
self.gae_lambda = gae_lambda
self.n_steps = n_steps
self.gamma = gamma
self.beta = beta
# maximum steps in episode
self.max_episode_steps = max_episode_steps
self.current_idx = 0
self.episode_idx = 0
# Counter to prevent overflow
self.episode_steps = 0
self.nb_rollouts = nb_rollouts
# buffer with episodes
# number of episodes which can be stored until buffer size is reached
# self.nb_rollouts = self.buffer_size // self.max_episode_steps
self.current_idx = 0
# Counter to prevent overflow
self.episode_steps = 0
# Get shape of observation and goal (usually the same)
self.obs_shape = get_obs_shape(self.observation_space)
print(self.obs_shape)
# episode length storage, needed for episodes which has less steps than the maximum length
self.episode_lengths = np.zeros(self.nb_rollouts, dtype=np.int64)
assert self.n_envs == 1, "Episodic buffer only supports single env for now"
self.reset()
def add(
self,
obs: Dict[str, np.ndarray],
action: np.ndarray,
value: np.ndarray,
reward: np.ndarray,
episode_start: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
self._buffer["observation"][self.episode_idx, self.current_idx] = obs
self._buffer["action"][self.episode_idx, self.current_idx] = action
self.values[self.episode_idx, self.current_idx] = value
self.rewards[self.episode_idx, self.current_idx] = reward
self.episode_starts[self.episode_idx, self.current_idx] = episode_start
self.dones[self.episode_idx, self.current_idx] = done
# update current pointer
self.current_idx += 1
self.episode_steps += 1
if done or self.episode_steps >= self.max_episode_steps:
self.store_episode()
self.episode_steps = 0
def get_all_indices(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Retrieve all samples valid indices, taking episode length
into account.
"""
all_episodes = np.concatenate([np.ones(ep_len) * ep_idx for ep_idx, ep_len in enumerate(self.episode_lengths)])
all_transitions = np.concatenate([np.arange(ep_len) for ep_len in self.episode_lengths])
return all_episodes.astype(np.uint64), all_transitions.astype(np.uint64)
def get_samples(self) -> EpisodicRolloutBufferSamples:
total_steps = sum(self.episode_lengths)
all_indices = self.get_all_indices()
# Retrieve all transition and flatten the arrays
return EpisodicRolloutBufferSamples(
self.to_torch(self._buffer["observation"][all_indices].reshape(total_steps, *self.obs_shape)),
self.to_torch(self._buffer["action"][all_indices].reshape(total_steps, self.action_dim)),
self.to_torch(self.policy_returns[all_indices].reshape(total_steps)),
self.to_torch(self.target_values[all_indices].reshape(total_steps)),
)
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> Union[ReplayBufferSamples, RolloutBufferSamples]:
"""
:param batch_inds:
:param env:
:return:
"""
raise NotImplementedError()
def store_episode(self) -> None:
"""
Increment episode counter
and reset current episode index.
"""
# add episode length to length storage
self.episode_lengths[self.episode_idx] = self.current_idx
self.episode_idx += 1
self.current_idx = 0
@property
def n_episodes_stored(self) -> int:
return self.episode_idx
def size(self) -> int:
"""
:return: The current number of transitions in the buffer.
"""
return int(np.sum(self.episode_lengths))
def reset(self) -> None:
"""
Reset the buffer.
"""
self.values = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.log_probs = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.episode_starts = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.dones = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
# input dimensions for buffer initialization
self.input_shape = {
"observation": (self.n_envs,) + self.obs_shape,
"action": (self.action_dim,),
}
self._buffer = {
key: np.zeros((self.nb_rollouts, self.max_episode_steps, *dim), dtype=np.float32)
for key, dim in self.input_shape.items()
}
self.policy_returns = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.target_values = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.rewards = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.episode_idx = 0
self.current_idx = 0
self.episode_lengths = np.zeros(self.nb_rollouts, dtype=np.int64)
def get_discounted_sum_rewards(self) -> None:
"""
Apply a discounted sum of rewards to all samples of all episodes
"""
for ep in range(self.nb_rollouts):
sum_discounted_rewards = 0
for i in reversed(range(self.episode_lengths[ep])):
sum_discounted_rewards = self.rewards[ep, i] + self.gamma * sum_discounted_rewards
self.policy_returns[ep, i] = sum_discounted_rewards
def get_sum_rewards(self) -> None:
"""
Apply a sum of rewards to all samples of all episodes
"""
for ep, ep_len in enumerate(self.episode_lengths):
self.policy_returns[ep, :] = self.rewards[ep, :ep_len].sum()
def get_normalized_rewards(self) -> None:
"""
Normalize rewards of all samples of all episodes
"""
all_rewards = self.rewards[self.get_all_indices()]
self.policy_returns = (self.policy_returns - all_rewards.mean()) / (all_rewards.std() + 1e-8)
def get_normalized_sum(self) -> None:
"""
Normalize rewards of all samples of all episodes
"""
self.get_sum_rewards()
all_returns = self.policy_returns[self.get_all_indices()]
self.policy_returns = (self.policy_returns - all_returns.mean()) / (all_returns.std() + 1e-8)
def get_normalized_discounted_rewards(self) -> None:
"""
Apply a normalized and discounted sum of rewards to all samples of the episode
"""
self.get_discounted_sum_rewards()
# Note(antonin): shall we normalize with all discounted returns
# or with all rewards
all_returns = self.policy_returns[self.get_all_indices()]
self.policy_returns = (self.policy_returns - all_returns.mean()) / (all_returns.std() + 1e-8)
def get_exponentiated_rewards(self, beta) -> None:
"""
Apply an exponentiation factor to the rewards of all samples of all episodes
:param beta: the exponentiation factor
"""
# TODO(antonin): add a clip parameter to clip large values?
self.policy_returns[:, :] = np.exp(self.rewards[:, :] / beta)
def get_target_values_mc(self) -> None:
"""
Warning: is only OK for V values
"""
self.get_discounted_sum_rewards()
self.target_values = self.policy_returns.copy()
def get_target_values_td(self) -> None:
""" """
for ep in range(self.nb_rollouts):
for step in reversed(range(self.episode_lengths[ep])):
if step == self.episode_lengths[ep] - 1:
# Episodic setting: last step is always terminal
# and we are not handling timeout separately yet
target = self.rewards[ep, step]
else:
target = self.rewards[ep, step] + self.gamma * self.values[ep, step + 1]
self.target_values[ep, step] = target
def get_target_values_nsteps(self) -> None:
"""
Warning, assumes that values[ep] correspond to V-values
"""
for ep in range(self.nb_rollouts):
for step in reversed(range(self.episode_lengths[ep])):
if step == self.episode_lengths[ep] - 1:
# Episodic setting: last step is always terminal
# and we are not handling timeout separately yet
summ = self.rewards[ep, step]
else:
horizon = step + self.n_steps
summ = self.rewards[ep, step]
if horizon < self.episode_lengths[ep]:
bootstrap_val = self.values[ep, horizon]
summ += self.gamma ** self.n_steps * bootstrap_val
for j in range(1, self.n_steps):
if step + j >= self.episode_lengths[ep]:
break
summ += self.gamma ** j * self.rewards[ep, step + j]
self.target_values[ep, step] = summ
def get_n_step_return(self) -> None:
"""
Apply Bellman backup n-step return to all rewards of all samples of all episodes
Though this seems to work in practice, not sure it makes much sense
"""
for ep in range(self.nb_rollouts):
for i in range(self.episode_lengths[ep]):
horizon = i + self.n_steps
summ = self.rewards[ep, i]
if horizon < self.episode_lengths[ep]:
bootstrap_val = self.values[ep, horizon]
summ += self.gamma ** self.n_steps * bootstrap_val
for j in range(1, self.n_steps):
if i + j >= self.episode_lengths[ep]:
break
summ += self.gamma ** j * self.rewards[ep, i + j]
self.policy_returns[ep, i] = summ
def process_gae(self) -> None:
"""
Post-processing step: compute the lambda-return (TD(lambda) estimate)
and GAE(lambda) advantage.
Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)
to compute the advantage. To obtain vanilla advantage (A(s) = R - V(S))
where R is the discounted reward with value bootstrap,
set ``gae_lambda=1.0`` during initialization.
The TD(lambda) estimator has also two special cases:
- TD(1) is Monte-Carlo estimate (sum of discounted rewards)
- TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))
For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.
"""
last_gae_lam = 0
for ep in range(self.nb_rollouts):
for step in reversed(range(self.episode_lengths[ep])):
if step == self.episode_lengths[ep] - 1:
# delta = self.rewards[ep, step] + self.gamma * last_values - self.values[ep, step]
# Episodic setting: last step is always terminal
# and we are not handling timeout separately yet
delta = self.rewards[ep, step] - self.values[ep, step]
else:
delta = self.rewards[ep, step] + self.gamma * self.values[ep, step + 1] - self.values[ep, step]
last_gae_lam = delta + self.gamma * self.gae_lambda * last_gae_lam
self.policy_returns[ep, step] = last_gae_lam
# TD(lambda) estimator, see Github PR #375 or "Telescoping in TD(lambda)"
# in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA
self.target_values[ep] = self.policy_returns[ep] + self.values[ep]
| [
"numpy.ones",
"numpy.arange",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"stable_baselines3.common.preprocessing.get_obs_shape"
] | [((2994, 3031), 'stable_baselines3.common.preprocessing.get_obs_shape', 'get_obs_shape', (['self.observation_space'], {}), '(self.observation_space)\n', (3007, 3031), False, 'from stable_baselines3.common.preprocessing import get_obs_shape\n'), ((3193, 3235), 'numpy.zeros', 'np.zeros', (['self.nb_rollouts'], {'dtype': 'np.int64'}), '(self.nb_rollouts, dtype=np.int64)\n', (3201, 3235), True, 'import numpy as np\n'), ((6316, 6386), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (6324, 6386), True, 'import numpy as np\n'), ((6412, 6482), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (6420, 6482), True, 'import numpy as np\n'), ((6513, 6583), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (6521, 6583), True, 'import numpy as np\n'), ((6605, 6675), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (6613, 6675), True, 'import numpy as np\n'), ((7082, 7152), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (7090, 7152), True, 'import numpy as np\n'), ((7182, 7252), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (7190, 7252), True, 'import numpy as np\n'), ((7276, 7346), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)\n', (7284, 7346), True, 'import numpy as np\n'), ((7436, 7478), 'numpy.zeros', 'np.zeros', (['self.nb_rollouts'], {'dtype': 'np.int64'}), '(self.nb_rollouts, dtype=np.int64)\n', (7444, 7478), True, 'import numpy as np\n'), ((9607, 9640), 'numpy.exp', 'np.exp', (['(self.rewards[:, :] / beta)'], {}), '(self.rewards[:, :] / beta)\n', (9613, 9640), True, 'import numpy as np\n'), ((6184, 6212), 'numpy.sum', 'np.sum', (['self.episode_lengths'], {}), '(self.episode_lengths)\n', (6190, 6212), True, 'import numpy as np\n'), ((6912, 6988), 'numpy.zeros', 'np.zeros', (['(self.nb_rollouts, self.max_episode_steps, *dim)'], {'dtype': 'np.float32'}), '((self.nb_rollouts, self.max_episode_steps, *dim), dtype=np.float32)\n', (6920, 6988), True, 'import numpy as np\n'), ((4598, 4615), 'numpy.arange', 'np.arange', (['ep_len'], {}), '(ep_len)\n', (4607, 4615), True, 'import numpy as np\n'), ((4475, 4490), 'numpy.ones', 'np.ones', (['ep_len'], {}), '(ep_len)\n', (4482, 4490), True, 'import numpy as np\n')] |
#%%
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn import preprocessing
import random
#%%
df_train = pd.read_csv("data/train_ohe.csv")
df_val = pd.read_csv("data/validation_ohe.csv")
df_test = pd.read_csv("data/test_ohe.csv")
print (df_train.click.values.sum()*100/len(df_train), "%")
print (df_val.click.values.sum()*100/len(df_val), "%")
#%%
features = list(df_train.columns)
features_remove = ['click', 'bidid', 'logtype', 'userid', 'urlid', 'bidprice', 'payprice', 'usertag']
features = [x for x in features if x not in features_remove]
classification_column = 'click'
rand_seed = 27
random.seed(rand_seed)
np.random.seed(rand_seed)
num_to_sample = 15000
X_train_full = df_train[features].copy()
X_val_full = df_val[features].copy()
X_test = df_test[features].copy()
y_val_full = df_val.click
df_train_sample = df_train.sample(num_to_sample, random_state=rand_seed).copy()
X_train_inliers = df_train_sample[features][df_train.click == 0].copy()
y_train_inliers = df_train_sample[classification_column][df_train.click == 0].copy()
X_train_outliers = df_train[features][df_train.click == 1].copy()
y_train_outliers = df_train[classification_column][df_train.click == 1].copy()
X_train = X_train_inliers.copy()
y_train = y_train_inliers.copy()
X_val_inliers = df_val[features][df_val.click == 0].copy()
y_val_inliers = df_val[classification_column][df_val.click == 0].copy()
X_val_outliers = df_val[features][df_val.click == 1].copy()
y_val_outliers = df_val[classification_column][df_val.click == 1].copy()
features_to_process = X_test.columns.values
features_to_process = [x for x in features_to_process if "usertag_" not in x]
#%%
for col in features_to_process:
data = X_train_full[col].append(X_val_full[col]).append(X_test[col])
if X_test[col].dtypes == 'object':
median_val = data.value_counts().index[0]
else:
median_val = data.median()
X_train_full[col].fillna(median_val, inplace=True)
X_train[col].fillna(median_val, inplace=True)
X_train_inliers[col].fillna(median_val, inplace=True)
X_train_outliers[col].fillna(median_val, inplace=True)
X_val_full[col].fillna(median_val, inplace=True)
X_val_inliers[col].fillna(median_val, inplace=True)
X_val_outliers[col].fillna(median_val, inplace=True)
X_test[col].fillna(median_val, inplace=True)
le = {}
for col in features_to_process:
if X_test[col].dtypes == 'object':
data = X_train_full[col].append(X_val_full[col]).append(X_test[col])
le[col] = preprocessing.LabelEncoder() #define and store a label encoder for every column so we can inverse_transform
le[col].fit(data.values)
X_train_full[col] = le[col].transform(X_train_full[col])
X_train[col] = le[col].transform(X_train[col])
X_train_inliers[col] = le[col].transform(X_train_inliers[col])
X_train_outliers[col] = le[col].transform(X_train_outliers[col])
X_val_full[col] = le[col].transform(X_val_full[col])
X_val_inliers[col] = le[col].transform(X_val_inliers[col])
X_val_outliers[col] = le[col].transform(X_val_outliers[col])
X_test[col] = le[col].transform(X_test[col])
ss = {}
do_scale = True
if do_scale:
data_scale = X_train_full[0:1].append(X_test[0:1]).append(X_val_full[0:1])
features_to_process = [x for x in data_scale.columns.values if "usertag_" not in x]
for col in features_to_process:
data = X_train_full[col].append(X_val_full[col]).append(X_test[col])
ss[col] = preprocessing.MinMaxScaler() #define and store a label encoder for every column so we can inverse_transform
ss[col].fit(data.values)
if col in X_train.columns.values:
X_train[col] = ss[col].transform(X_train[col])
X_train_inliers[col] = ss[col].transform(X_train_inliers[col])
X_train_outliers[col] = ss[col].transform(X_train_outliers[col])
X_val_full[col] = ss[col].transform(X_val_full[col])
X_val_inliers[col] = ss[col].transform(X_val_inliers[col])
X_val_outliers[col] = ss[col].transform(X_val_outliers[col])
if col in X_test.columns.values:
X_test[col] = ss[col].transform(X_test[col])
#%%
t_feat = pd.concat([X_train_inliers, X_train_outliers])
t_lab = pd.concat([y_train_inliers, y_train_outliers])
from sklearn.neural_network import MLPClassifier
#clf = MLPClassifier(hidden_layer_sizes=(60, 10, 4))
clf = MLPClassifier(hidden_layer_sizes=(60,10,4))
clf.fit(t_feat, t_lab)
val_preds = clf.predict(X_val_full)
print(confusion_matrix(y_val_full, val_preds))
#%%
good_clf=clf
#%%
val_preds = good_clf.predict(X_val_full)
print(confusion_matrix(y_val_full, val_preds))
val_probas = good_clf.predict_proba(X_val_full)
val_click_prob = [x[1] for x in val_probas]
#%%
#20,10,4
#[[295894 3629]
# [ 125 101]]
#pd.DataFrame({"bidid":df_val.bidid,"clickprob":val_click_prob, "clickpred":val_preds}).to_csv('nn_val_preds.csv')
#100,20,10
#[[285291 14232]
# [ 117 109]]
#pd.DataFrame({"bidid":df_val.bidid,"clickprob":val_click_prob, "clickpred":val_preds}).to_csv('nn_val_preds_v2.csv')
#60,10,4
#[[292516 7007]
# [ 115 111]]
pd.DataFrame({"bidid":df_val.bidid,"clickprob":val_click_prob, "clickpred":val_preds}).to_csv('nn_val_preds_v3.csv')
#%%
test_preds = good_clf.predict(X_test)
#print(confusion_matrix(y_val_full, val_preds))
test_probas = good_clf.predict_proba(X_test)
test_click_prob = [x[1] for x in test_probas]
pd.DataFrame({"bidid":df_test.bidid,"clickprob":test_click_prob, "clickpred":test_preds}).to_csv('nn_test_preds.csv')
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.neural_network.MLPClassifier",
"pandas.read_csv",
"random.seed",
"numpy.random.seed",
"pandas.DataFrame",
"pandas.concat",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.metrics.confusion_matrix"
] | [((166, 199), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_ohe.csv"""'], {}), "('data/train_ohe.csv')\n", (177, 199), True, 'import pandas as pd\n'), ((210, 248), 'pandas.read_csv', 'pd.read_csv', (['"""data/validation_ohe.csv"""'], {}), "('data/validation_ohe.csv')\n", (221, 248), True, 'import pandas as pd\n'), ((260, 292), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_ohe.csv"""'], {}), "('data/test_ohe.csv')\n", (271, 292), True, 'import pandas as pd\n'), ((670, 692), 'random.seed', 'random.seed', (['rand_seed'], {}), '(rand_seed)\n', (681, 692), False, 'import random\n'), ((694, 719), 'numpy.random.seed', 'np.random.seed', (['rand_seed'], {}), '(rand_seed)\n', (708, 719), True, 'import numpy as np\n'), ((4527, 4573), 'pandas.concat', 'pd.concat', (['[X_train_inliers, X_train_outliers]'], {}), '([X_train_inliers, X_train_outliers])\n', (4536, 4573), True, 'import pandas as pd\n'), ((4583, 4629), 'pandas.concat', 'pd.concat', (['[y_train_inliers, y_train_outliers]'], {}), '([y_train_inliers, y_train_outliers])\n', (4592, 4629), True, 'import pandas as pd\n'), ((4753, 4798), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': '(60, 10, 4)'}), '(hidden_layer_sizes=(60, 10, 4))\n', (4766, 4798), False, 'from sklearn.neural_network import MLPClassifier\n'), ((4867, 4906), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_val_full', 'val_preds'], {}), '(y_val_full, val_preds)\n', (4883, 4906), False, 'from sklearn.metrics import confusion_matrix\n'), ((4988, 5027), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_val_full', 'val_preds'], {}), '(y_val_full, val_preds)\n', (5004, 5027), False, 'from sklearn.metrics import confusion_matrix\n'), ((2694, 2722), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2720, 2722), False, 'from sklearn import preprocessing\n'), ((3757, 3785), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (3783, 3785), False, 'from sklearn import preprocessing\n'), ((5519, 5613), 'pandas.DataFrame', 'pd.DataFrame', (["{'bidid': df_val.bidid, 'clickprob': val_click_prob, 'clickpred': val_preds}"], {}), "({'bidid': df_val.bidid, 'clickprob': val_click_prob,\n 'clickpred': val_preds})\n", (5531, 5613), True, 'import pandas as pd\n'), ((5825, 5922), 'pandas.DataFrame', 'pd.DataFrame', (["{'bidid': df_test.bidid, 'clickprob': test_click_prob, 'clickpred': test_preds}"], {}), "({'bidid': df_test.bidid, 'clickprob': test_click_prob,\n 'clickpred': test_preds})\n", (5837, 5922), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
#
# Created on Tue Jan 16 09:32:22 2018
#
# @author: hsauro
# ---------------------------------------------------------------------
# Plotting Utilities
# ---------------------------------------------------------------------
import tellurium as _te
from mpl_toolkits.mplot3d import Axes3D as _Axes3D
import numpy as _np
import matplotlib.pyplot as _plt
import random
def plotAsciiConcentrationsBar (r, scale=5):
'''
Display the floating species concentrations as an ASCII bar chart.
Args:
-----
r : reference
roadrunner instance
scale : integer
optional parameter to scale the ascii bar graph
Example:
>>> teUtils.plotting.plotAsciiConcentrationsBar (r, scale=20)
'''
import math
c = r.getFloatingSpeciesConcentrations()
ids = r.getFloatingSpeciesIds()
maxString = len (max(ids, key=len))
for value in range (len (c)):
print ('{:{X}.{Y}}'.format (ids[value], X=maxString, Y=maxString), ':', math.trunc (scale*c[value])*'*')
def plotAsciiReactionRatesBar (r, scale=5):
'''
Display the reaction rates as an ASCII bar chart.
Args:
-----
r : reference
roadrunner instance
scale : integer
optional parameter to scale the ascii bar graph
Example:
>>> teUtils.plotting.plotAsciiReactionRatesBar (r, scale=20)
'''
import math
c = r.getReactionRates()
ids = r.getReactionIds()
maxString = len (max(ids, key=len))
for value in range (len (c)):
print ('{:{X}.{Y}}'.format (ids[value], X=maxString, Y=maxString), ':', math.trunc (scale*c[value])*'*')
def plotRandSimGrid (r, species=[], pdfExport=None, figsize=(11,8), maxRange=10, endTime=200, numPoints=500, ngrid=20):
'''
Plots a grid of simulations, each simulation is based on the same model
but randomly drawn parameter values.
Args:
r : reference
roadrunner instance
figsize : tuple of float
optional: width and heigh of plot in inches
endtime : double
optional: time to simulate to
numPoints: double
optional: numberof points to generate for the plot
ngrid : integer
optional: the size of the grid, default is 20 x 20 plots
maxRange: double
optional: upper range for randomly drawn parameter values
pdfExport : string
optional parameter, indicates the filename to export the plot as a pdf file
Example:
>>> teUtils.plotting.plotPhasePortraitGrid (r)
'''
print ("Starting....")
slist = sorted (r.getFloatingSpeciesIds())
if species == []:
n = r.getNumFloatingSpecies()
else:
slist = species
n = len (species) + 1
slist = ['time'] + slist
print ('Creating subplots (will take a while for a large grid)...')
fig, axarr = _plt.subplots(ngrid, ngrid, figsize=figsize)
print ("Adjust subplots...")
fig.subplots_adjust (wspace=0.15, hspace=0.15)
print ("Run simulations and populate grid....")
count = 0
for i in range(ngrid):
for j in range(ngrid):
r.reset()
for k in r.getGlobalParameterIds():
r[k] = random.random()*maxRange
m = r.simulate (0, endTime, numPoints, slist)
count += 1
ax = _plt.subplot2grid ((ngrid,ngrid), (i,j))
if i==n-1:
ax.set_xlabel ('Time')
ax.set_xticklabels([])
else:
ax.set_xticklabels([])
ax.set_xticks([])
if j == 0:
ax.set_yticklabels([])
else:
ax.set_yticks([])
for k in range (n-1):
ax.plot (m[:,0],m[:,k+1])
if pdfExport != None:
fig.savefig(pdfExport)
def plotPhasePortraitGrid (r, pdfExport=None, figsize=(11,8), endTime=200, numPoints=500):
'''
Plots a grid of phase portraits of the floating species concentrations.
Args:
r : reference
roadrunner instance
figsize : tuple of float
optional: width and heigh of plot in inches
endtime : double
optional: time to simulate to
numPoints: double
optional: numberof points to generate for the plot
pdfExport : string
optional parameter, indicates the filename to export the plot as a pdf file
Example:
>>> teUtils.plotting.plotPhasePortraitGrid (r)
'''
print ("Starting....")
slist = sorted (r.getFloatingSpeciesIds())
r.reset()
print ('Run simulation...')
m = r.simulate (0, endTime, numPoints, slist)
n = r.getNumFloatingSpecies()
print ('Creating subplots (will take a while for a large grid)...')
fig, axarr = _plt.subplots(n, n, figsize=figsize)
print ("Adjust subplots...")
fig.subplots_adjust (wspace=0.15, hspace=0.15)
count = 0
for i in range(n):
for j in range(n):
count += 1
ax = _plt.subplot2grid ((n,n), (i,j))
if i==n-1:
ax.set_xlabel (slist[j])
ax.set_xticklabels([])
else:
ax.set_xticklabels([])
ax.set_xticks([])
if j == 0:
ax.set_ylabel (slist[i])
ax.set_yticklabels([])
else:
ax.set_yticklabels([])
ax.set_yticks([])
ax.plot (m[:,i], m[:,j])
if pdfExport != None:
fig.savefig(pdfExport)
def plotConcentrationControlHeatMap (r, pdfExport=None, annotations=True, figsize=(13,7), vmin=-1, vmax=1):
'''
Display the concentation control coefficients as a heat map
Args:
r : reference
roadrunner instance
pdfExport : string
optional: indicates the filename to export the heat map image to in the form of pdf
annotations (boolean),
optional : used to draw values on teh heatmap cells
figsize : tutle of double
optional: sets the size of the plot, eg figsize=(10,5)
vmin and vmax : double
optional: set the lower and upper limits for the range
Example:
>>> teUtils.plotting.plotConcentrationControlHeatMap (r, pdfExport='heapmap.pdf')
'''
import seaborn as sns
import pandas as pd
hist = r.getScaledConcentrationControlCoefficientMatrix()
ss = r.getFloatingSpeciesIds()
rr = ["E" + str(x) for x in range (r.getNumReactions())]
df = pd.DataFrame (hist, columns=rr, index=ss)
f, ax = _plt.subplots(figsize=figsize)
hp = sns.heatmap(df, annot=annotations, fmt="5.2f", linewidths=.5, ax=ax,cmap='bwr', vmin=vmin, vmax=vmax)
if pdfExport != None:
f.savefig(pdfExport)
def plotFluxControlHeatMap (r, pdfExport=None, annotations=True, figsize=(13,7), vmin=-1, vmax=1):
'''
Display the flux control coefficients as a heat map
Args:
r : reference
roadrunner instance
pdfExport : string
optional parameter, if present it should indicate the filename to export the heat map image to in the form of pdf
annotations : boolean
used to draw values on teh heatmap cells
figsize : tuple
sets the size of the plot, eg figsize=(10,5)
vmin and vmax : double
set the lower and upper limits for the range
Example:
>>> teUtils.plotting.plotFluxControlHeatMap (r, pdfExport='heapmap.pdf')
'''
import seaborn as sns
import pandas as pd
hist = r.getScaledFluxControlCoefficientMatrix()
ss = r.getReactionIds()
rr = ["E" + str(x) for x in range (r.getNumReactions())]
df = pd.DataFrame (hist, columns=rr, index=ss)
f, ax = _plt.subplots(figsize=figsize)
hp = sns.heatmap(df, annot=annotations, fmt="5.2f", linewidths=.5, vmin=vmin, vmax=vmax, ax=ax, cmap='bwr')
if pdfExport != None:
f.savefig(pdfExport)
def plotArrayHeatMap (data, pdfExport=None, annotations=True, figsize=(13,7), vmin=-1, vmax=1):
'''
Display the flux control coefficients as a heat map
Args:
r : reference
roadrunner instance
pdfExport : string
optional parameter, if present it should indicate the filename to export the heat map image to in the form of pdf
annotations : boolean
used to draw values on teh heatmap cells
figsize : tuple
sets the size of the plot, eg figsize=(10,5)
vmin and vmax : double
set the lower and upper limits for the range
Example:
>>> teUtils.plotting.plotFluxControlHeatMap (r, pdfExport='heapmap.pdf')
'''
import seaborn as sns
import pandas as pd
#ss = r.getReactionIds()
#rr = ["E" + str(x) for x in range (r.getNumReactions())]
df = pd.DataFrame (data)
f, ax = _plt.subplots(figsize=figsize)
hp = sns.heatmap(df, annot=annotations, fmt="5.2f", linewidths=.5, vmin=vmin, vmax=vmax, ax=ax, cmap='bwr')
if pdfExport != None:
f.savefig(pdfExport)
def plotConcentrationControlIn3D (r, upperLimit=1, lowerLimit=-1, figsize=(10, 8)):
'''
Display the concentation control coefficients as a 3D plot
Args:
r : reference
roadrunner instance
upperlimit : float
optional parameter, sets the lower z axis limit
upperlimit : float
optional parameter, sets the upper z axis limit
figsize : tuble of float
optional: width and heigh of plot in inches
Example:
>>> teUtils.plotting.plotConcentrationControlIn3D (r)
'''
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = _plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
hist = r.getScaledConcentrationControlCoefficientMatrix()
xedges = _np.arange (float (hist.shape[0]) + 1)
yedges = _np.arange (float (hist.shape[1]) + 1)
# Construct arrays for the anchor positions
# Note: _np.meshgrid gives arrays in (ny, nx) so we use 'F' to flatten xpos,
# ypos in column-major order. For numpy >= 1.7, we could instead call meshgrid
# with indexing='ij'.
xpos, ypos = _np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25)
xpos = xpos.flatten('F')
ypos = ypos.flatten('F')
zpos = _np.zeros_like(xpos)
# Construct arrays with the dimensions for the 16 bars.
dx = 0.5 * _np.ones_like(zpos)
dy = dx.copy()
dz = hist.flatten()
offset = dz + _np.abs(dz.min())
fracs = offset.astype(float)/offset.max()
norm = colors.Normalize(fracs.min(), fracs.max())
colors = cm.YlOrRd (norm(fracs))
ax.set_zlim3d(lowerLimit, upperLimit)
ax.set_zlabel('Control Coefficient')
ax.set_xlabel('Species')
ax.set_ylabel('Enzymes')
ax.w_xaxis.set_ticks(_np.arange (float (hist.shape[0])))
ax.w_xaxis.set_ticklabels(r.getFloatingSpeciesIds())
ax.w_yaxis.set_ticks(_np.arange (float (hist.shape[1])))
#ax.w_yaxis.set_ticks(ypos + dy/2.)
ax.w_yaxis.set_ticklabels(r.getReactionIds())
ax.bar3d (xpos, ypos, zpos, dx, dy, dz, color=colors, zsort='average')
def plotFluxControlIn3D (r, upperLimit=1, lowerLimit=-1, figsize=(9, 7)):
'''
Display the flux control coefficients as a 3D plot
Args:
r : reference
roadrunner instance
upperlimit : float
optional parameter, sets the lower z axis limit
upperlimit : float
optional parameter, sets the upper z axis limit
figsize : tuble of float
optional: width and heigh of plot in inches
Example:
>>> teUtils.plotting.plotFluxControlIn3D (r)
'''
import matplotlib.cm as cm
import matplotlib.colors as colors
fig = _plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
hist = r.getScaledFluxControlCoefficientMatrix()
xedges = _np.arange (float (hist.shape[0]) + 1)
yedges = _np.arange (float (hist.shape[1]) + 1)
# Construct arrays for the anchor positions
# Note: _np.meshgrid gives arrays in (ny, nx) so we use 'F' to flatten xpos,
# ypos in column-major order. For numpy >= 1.7, we could instead call meshgrid
# with indexing='ij'.
xpos, ypos = _np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25)
xpos = xpos.flatten('F')
ypos = ypos.flatten('F')
zpos = _np.zeros_like(xpos)
# Construct arrays with the dimensions for the 16 bars.
dx = 0.5 * _np.ones_like(zpos)
dy = dx.copy()
dz = hist.flatten()
offset = dz + _np.abs(dz.min())
fracs = offset.astype(float)/offset.max()
norm = colors.Normalize(fracs.min(), fracs.max())
colors = cm.YlOrRd (norm(fracs))
ax.set_zlim3d(lowerLimit, upperLimit)
ax.set_zlabel('Control Coefficient')
ax.set_xlabel('Fluxes')
ax.set_ylabel('Enzymes')
ax.w_xaxis.set_ticks(_np.arange (float (hist.shape[0])))
ax.w_xaxis.set_ticklabels(r.getReactionIds())
ax.w_yaxis.set_ticks(_np.arange (float (hist.shape[1])))
print (hist.shape)
ax.w_yaxis.set_ticklabels(r.getReactionIds())
ax.bar3d (xpos, ypos, zpos, dx, dy, dz, color=colors, zsort='average')
def plotReactionRates (r, figsize=(12,6)):
'''
Plots a graph bar graph of the reaction rates
Args:
r : reference
roadrunner instance
figsize : tuple of float
optional: width and heigh of plot in inches
Example:
>>> teUtils.plotting.plotReactionRates (r, figsize=(12,6))
'''
import matplotlib.pyplot as plt
xlabels = r.getReactionIds()
rates = r.getReactionRates()
_plt.figure(figsize=figsize)
_plt.bar(xlabels, rates, label=xlabels)
_plt.xticks(range (len (xlabels)), xlabels, ha='right', rotation=45)
def plotFloatingSpecies (r, figsize=(12,6)):
'''
Plots a graph bar graph of the floating species concentrations.
Args:
r : reference
roadrunner instance
figsize : tuple of float
optional: width and heigh of plot in inches
Example:
>>> teUtils.plotting.plotFloatingSpecies (r, figsize=(12,6))
'''
import matplotlib.pyplot as plt
xlabels = r.getFloatingSpeciesIds()
concs = r.getFloatingSpeciesConcentrations()
_plt.figure(figsize=figsize)
_plt.bar(xlabels, concs, label=xlabels)
_plt.xticks(range (len (xlabels)), xlabels, ha='right', rotation=45)
def plotArray(result, loc='upper right', show=True, resetColorCycle=True,
xlabel=None, ylabel=None, title=None, xlim=None, ylim=None,
xscale='linear', yscale="linear", grid=False, labels=None, **kwargs):
""" Plot a 2D graph based on an array where the first column is the x-axis
The first column of the array must be the x-axis and remaining columns the y-axis.
Note that you can add plotting options as named key values after
the array. To add a legend, include the label legend values:
te.plotArray (m, labels=['Label 1, 'Label 2', etc])
Make sure you include as many labels as there are curves to plot!
Use show=False to add multiple curves. Use color='red' to use the same color for every curve.
Args:
r : reference
roadrunner instance
Returns:
Returns a handle to the plotting object.
Example:
>>> import numpy as np
>>> result = _np.array([[1,2,3], [7.2,6.5,8.8], [9.8, 6.5, 4.3]])
>>> te.plotArray(result, title="My graph', xlim=((0, 5)))
"""
# FIXME: unify r.plot & _te.plot (lots of code duplication)
# reset color cycle (columns in repeated simulations have same color)
if resetColorCycle:
_plt.gca().set_prop_cycle(None)
if 'linewidth' not in kwargs:
kwargs['linewidth'] = 2.0
# get the labeles
Ncol = result.shape[1]
if labels is None:
labels = result.dtype.names
for k in range(1, Ncol):
if loc is None or labels is None:
# no legend or labels
p = _plt.plot(result[:, 0], result[:, k], **kwargs)
else:
p = _plt.plot(result[:, 0], result[:, k], label=labels[k-1], **kwargs)
# labels
if xlabel is not None:
_plt.xlabel(xlabel)
if ylabel is not None:
_plt.ylabel(ylabel)
if title is not None:
_plt.title(title)
if xlim is not None:
_plt.xlim(xlim)
if ylim is not None:
_plt.ylim(ylim)
# axis and grids
_plt.xscale(xscale)
_plt.yscale(yscale)
_plt.grid(grid)
# show legend
if loc is not None and labels is not None:
_plt.legend(loc=loc)
# show plot
if show:
_plt.show()
return p
def plotWithLegend(r, result=None, loc='upper left', show=True, **kwargs):
return r.plot(result=result, loc=loc, show=show, **kwargs)
def testme():
""" Call this method to try out the methods in this module"""
r = _te.loada("""
J1: $Xo -> S1; k1*Xo - k11*S1;
J2: S1 -> S2; k2*S1 - k22*S2;
J3: S2 -> S3; k3*S2 - k33*S3;
J4: S3 -> S4; k3*S3 - k44*S4;
J5: S4 -> S5; k4*S4 - k44*S5;
J6: S5 -> S6; k5*S5 - k55*S6;
J7: S6 -> S7; k4*S6 - k44*S7;
J8: S7 -> S8; k3*S7 - k33*S8;
J9: S8 -> ; k4*S8;
k1 = 0.3; k11 = 0.26;
k2 = 0.5; k22 = 0.41;
k3 = 0.27; k33 = 0.12;
k4 = 0.9; k44 = 0.56
k5 = 0.14; k55 = 0.02
Xo = 10;
""")
import teUtils
r.steadyState()
teUtils.plotting.plotFloatingSpecies (r)
teUtils.plotting.plotConcentrationControlIn3D (r)
teUtils.plotting.plotFluxControlIn3D (r, lowerLimit=0)
teUtils.plotting.plotConcentrationControlHeatMap (r)
teUtils.plotting.plotFluxControlHeatMap (r)
if __name__ == "__main__":
import teUtils
r = _te.loada("""
J1: $Xo -> S1; k1*Xo - k11*S1;
J2: S1 -> S2; k2*S1 - k22*S2;
J3: S2 -> S3; k3*S2 - k33*S3;
J4: S3 -> S4; k3*S3 - k44*S4;
J5: S4 -> S5; k4*S4 - k44*S5;
J6: S5 -> S6; k5*S5 - k55*S6;
J7: S6 -> S7; k4*S6 - k44*S7;
J8: S7 -> S8; k3*S7 - k33*S8;
J9: S8 -> ; k4*S8;
k1 = 0.3; k11 = 0.26;
k2 = 0.5; k22 = 0.41;
k3 = 0.27; k33 = 0.12;
k4 = 0.9; k44 = 0.56
k5 = 0.14; k55 = 0.02
Xo = 10;
""")
m = r.simulate(0, 100,200)
teUtils.plotting.plotArray (m)
teUtils.plotting.plotWithLegend (r, m)
r.steadyState()
teUtils.plotting.plotFloatingSpecies (r, figsize=(6,3))
teUtils.plotting.plotConcentrationControlIn3D (r)
teUtils.plotting.plotFluxControlIn3D (r, lowerLimit=0)
teUtils.plotting.plotConcentrationControlHeatMap (r)
teUtils.plotting.plotFluxControlHeatMap (r)
| [
"matplotlib.pyplot.grid",
"teUtils.plotting.plotFluxControlIn3D",
"matplotlib.pyplot.ylabel",
"math.trunc",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"teUtils.plotting.plotFluxControlHeatMap",
"pandas.DataFrame",
"numpy.meshgrid",
"matplotlib.pyplot.... | [((2939, 2983), 'matplotlib.pyplot.subplots', '_plt.subplots', (['ngrid', 'ngrid'], {'figsize': 'figsize'}), '(ngrid, ngrid, figsize=figsize)\n', (2952, 2983), True, 'import matplotlib.pyplot as _plt\n'), ((4915, 4951), 'matplotlib.pyplot.subplots', '_plt.subplots', (['n', 'n'], {'figsize': 'figsize'}), '(n, n, figsize=figsize)\n', (4928, 4951), True, 'import matplotlib.pyplot as _plt\n'), ((6671, 6711), 'pandas.DataFrame', 'pd.DataFrame', (['hist'], {'columns': 'rr', 'index': 'ss'}), '(hist, columns=rr, index=ss)\n', (6683, 6711), True, 'import pandas as pd\n'), ((6726, 6756), 'matplotlib.pyplot.subplots', '_plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6739, 6756), True, 'import matplotlib.pyplot as _plt\n'), ((6766, 6874), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'annot': 'annotations', 'fmt': '"""5.2f"""', 'linewidths': '(0.5)', 'ax': 'ax', 'cmap': '"""bwr"""', 'vmin': 'vmin', 'vmax': 'vmax'}), "(df, annot=annotations, fmt='5.2f', linewidths=0.5, ax=ax, cmap=\n 'bwr', vmin=vmin, vmax=vmax)\n", (6777, 6874), True, 'import seaborn as sns\n'), ((7871, 7911), 'pandas.DataFrame', 'pd.DataFrame', (['hist'], {'columns': 'rr', 'index': 'ss'}), '(hist, columns=rr, index=ss)\n', (7883, 7911), True, 'import pandas as pd\n'), ((7926, 7956), 'matplotlib.pyplot.subplots', '_plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (7939, 7956), True, 'import matplotlib.pyplot as _plt\n'), ((7966, 8073), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'annot': 'annotations', 'fmt': '"""5.2f"""', 'linewidths': '(0.5)', 'vmin': 'vmin', 'vmax': 'vmax', 'ax': 'ax', 'cmap': '"""bwr"""'}), "(df, annot=annotations, fmt='5.2f', linewidths=0.5, vmin=vmin,\n vmax=vmax, ax=ax, cmap='bwr')\n", (7977, 8073), True, 'import seaborn as sns\n'), ((9014, 9032), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (9026, 9032), True, 'import pandas as pd\n'), ((9047, 9077), 'matplotlib.pyplot.subplots', '_plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9060, 9077), True, 'import matplotlib.pyplot as _plt\n'), ((9087, 9194), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'annot': 'annotations', 'fmt': '"""5.2f"""', 'linewidths': '(0.5)', 'vmin': 'vmin', 'vmax': 'vmax', 'ax': 'ax', 'cmap': '"""bwr"""'}), "(df, annot=annotations, fmt='5.2f', linewidths=0.5, vmin=vmin,\n vmax=vmax, ax=ax, cmap='bwr')\n", (9098, 9194), True, 'import seaborn as sns\n'), ((9940, 9968), 'matplotlib.pyplot.figure', '_plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9951, 9968), True, 'import matplotlib.pyplot as _plt\n'), ((10452, 10504), 'numpy.meshgrid', '_np.meshgrid', (['(xedges[:-1] + 0.25)', '(yedges[:-1] + 0.25)'], {}), '(xedges[:-1] + 0.25, yedges[:-1] + 0.25)\n', (10464, 10504), True, 'import numpy as _np\n'), ((10574, 10594), 'numpy.zeros_like', '_np.zeros_like', (['xpos'], {}), '(xpos)\n', (10588, 10594), True, 'import numpy as _np\n'), ((12044, 12072), 'matplotlib.pyplot.figure', '_plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (12055, 12072), True, 'import matplotlib.pyplot as _plt\n'), ((12547, 12599), 'numpy.meshgrid', '_np.meshgrid', (['(xedges[:-1] + 0.25)', '(yedges[:-1] + 0.25)'], {}), '(xedges[:-1] + 0.25, yedges[:-1] + 0.25)\n', (12559, 12599), True, 'import numpy as _np\n'), ((12669, 12689), 'numpy.zeros_like', '_np.zeros_like', (['xpos'], {}), '(xpos)\n', (12683, 12689), True, 'import numpy as _np\n'), ((13945, 13973), 'matplotlib.pyplot.figure', '_plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (13956, 13973), True, 'import matplotlib.pyplot as _plt\n'), ((13982, 14021), 'matplotlib.pyplot.bar', '_plt.bar', (['xlabels', 'rates'], {'label': 'xlabels'}), '(xlabels, rates, label=xlabels)\n', (13990, 14021), True, 'import matplotlib.pyplot as _plt\n'), ((14601, 14629), 'matplotlib.pyplot.figure', '_plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (14612, 14629), True, 'import matplotlib.pyplot as _plt\n'), ((14638, 14677), 'matplotlib.pyplot.bar', '_plt.bar', (['xlabels', 'concs'], {'label': 'xlabels'}), '(xlabels, concs, label=xlabels)\n', (14646, 14677), True, 'import matplotlib.pyplot as _plt\n'), ((16784, 16803), 'matplotlib.pyplot.xscale', '_plt.xscale', (['xscale'], {}), '(xscale)\n', (16795, 16803), True, 'import matplotlib.pyplot as _plt\n'), ((16808, 16827), 'matplotlib.pyplot.yscale', '_plt.yscale', (['yscale'], {}), '(yscale)\n', (16819, 16827), True, 'import matplotlib.pyplot as _plt\n'), ((16832, 16847), 'matplotlib.pyplot.grid', '_plt.grid', (['grid'], {}), '(grid)\n', (16841, 16847), True, 'import matplotlib.pyplot as _plt\n'), ((17235, 17820), 'tellurium.loada', '_te.loada', (['"""\n J1: $Xo -> S1; k1*Xo - k11*S1;\n J2: S1 -> S2; k2*S1 - k22*S2;\n J3: S2 -> S3; k3*S2 - k33*S3;\n J4: S3 -> S4; k3*S3 - k44*S4;\n J5: S4 -> S5; k4*S4 - k44*S5;\n J6: S5 -> S6; k5*S5 - k55*S6;\n J7: S6 -> S7; k4*S6 - k44*S7;\n J8: S7 -> S8; k3*S7 - k33*S8;\n J9: S8 -> ; k4*S8;\n \n k1 = 0.3; k11 = 0.26;\n k2 = 0.5; k22 = 0.41;\n k3 = 0.27; k33 = 0.12;\n k4 = 0.9; k44 = 0.56\n k5 = 0.14; k55 = 0.02\n Xo = 10;\n """'], {}), '(\n """\n J1: $Xo -> S1; k1*Xo - k11*S1;\n J2: S1 -> S2; k2*S1 - k22*S2;\n J3: S2 -> S3; k3*S2 - k33*S3;\n J4: S3 -> S4; k3*S3 - k44*S4;\n J5: S4 -> S5; k4*S4 - k44*S5;\n J6: S5 -> S6; k5*S5 - k55*S6;\n J7: S6 -> S7; k4*S6 - k44*S7;\n J8: S7 -> S8; k3*S7 - k33*S8;\n J9: S8 -> ; k4*S8;\n \n k1 = 0.3; k11 = 0.26;\n k2 = 0.5; k22 = 0.41;\n k3 = 0.27; k33 = 0.12;\n k4 = 0.9; k44 = 0.56\n k5 = 0.14; k55 = 0.02\n Xo = 10;\n """\n )\n', (17244, 17820), True, 'import tellurium as _te\n'), ((17860, 17899), 'teUtils.plotting.plotFloatingSpecies', 'teUtils.plotting.plotFloatingSpecies', (['r'], {}), '(r)\n', (17896, 17899), False, 'import teUtils\n'), ((17910, 17958), 'teUtils.plotting.plotConcentrationControlIn3D', 'teUtils.plotting.plotConcentrationControlIn3D', (['r'], {}), '(r)\n', (17955, 17958), False, 'import teUtils\n'), ((17964, 18017), 'teUtils.plotting.plotFluxControlIn3D', 'teUtils.plotting.plotFluxControlIn3D', (['r'], {'lowerLimit': '(0)'}), '(r, lowerLimit=0)\n', (18000, 18017), False, 'import teUtils\n'), ((18028, 18079), 'teUtils.plotting.plotConcentrationControlHeatMap', 'teUtils.plotting.plotConcentrationControlHeatMap', (['r'], {}), '(r)\n', (18076, 18079), False, 'import teUtils\n'), ((18085, 18127), 'teUtils.plotting.plotFluxControlHeatMap', 'teUtils.plotting.plotFluxControlHeatMap', (['r'], {}), '(r)\n', (18124, 18127), False, 'import teUtils\n'), ((18186, 18771), 'tellurium.loada', '_te.loada', (['"""\n J1: $Xo -> S1; k1*Xo - k11*S1;\n J2: S1 -> S2; k2*S1 - k22*S2;\n J3: S2 -> S3; k3*S2 - k33*S3;\n J4: S3 -> S4; k3*S3 - k44*S4;\n J5: S4 -> S5; k4*S4 - k44*S5;\n J6: S5 -> S6; k5*S5 - k55*S6;\n J7: S6 -> S7; k4*S6 - k44*S7;\n J8: S7 -> S8; k3*S7 - k33*S8;\n J9: S8 -> ; k4*S8;\n \n k1 = 0.3; k11 = 0.26;\n k2 = 0.5; k22 = 0.41;\n k3 = 0.27; k33 = 0.12;\n k4 = 0.9; k44 = 0.56\n k5 = 0.14; k55 = 0.02\n Xo = 10;\n """'], {}), '(\n """\n J1: $Xo -> S1; k1*Xo - k11*S1;\n J2: S1 -> S2; k2*S1 - k22*S2;\n J3: S2 -> S3; k3*S2 - k33*S3;\n J4: S3 -> S4; k3*S3 - k44*S4;\n J5: S4 -> S5; k4*S4 - k44*S5;\n J6: S5 -> S6; k5*S5 - k55*S6;\n J7: S6 -> S7; k4*S6 - k44*S7;\n J8: S7 -> S8; k3*S7 - k33*S8;\n J9: S8 -> ; k4*S8;\n \n k1 = 0.3; k11 = 0.26;\n k2 = 0.5; k22 = 0.41;\n k3 = 0.27; k33 = 0.12;\n k4 = 0.9; k44 = 0.56\n k5 = 0.14; k55 = 0.02\n Xo = 10;\n """\n )\n', (18195, 18771), True, 'import tellurium as _te\n'), ((18802, 18831), 'teUtils.plotting.plotArray', 'teUtils.plotting.plotArray', (['m'], {}), '(m)\n', (18828, 18831), False, 'import teUtils\n'), ((18837, 18874), 'teUtils.plotting.plotWithLegend', 'teUtils.plotting.plotWithLegend', (['r', 'm'], {}), '(r, m)\n', (18868, 18874), False, 'import teUtils\n'), ((18905, 18960), 'teUtils.plotting.plotFloatingSpecies', 'teUtils.plotting.plotFloatingSpecies', (['r'], {'figsize': '(6, 3)'}), '(r, figsize=(6, 3))\n', (18941, 18960), False, 'import teUtils\n'), ((18970, 19018), 'teUtils.plotting.plotConcentrationControlIn3D', 'teUtils.plotting.plotConcentrationControlIn3D', (['r'], {}), '(r)\n', (19015, 19018), False, 'import teUtils\n'), ((19024, 19077), 'teUtils.plotting.plotFluxControlIn3D', 'teUtils.plotting.plotFluxControlIn3D', (['r'], {'lowerLimit': '(0)'}), '(r, lowerLimit=0)\n', (19060, 19077), False, 'import teUtils\n'), ((19088, 19139), 'teUtils.plotting.plotConcentrationControlHeatMap', 'teUtils.plotting.plotConcentrationControlHeatMap', (['r'], {}), '(r)\n', (19136, 19139), False, 'import teUtils\n'), ((19145, 19187), 'teUtils.plotting.plotFluxControlHeatMap', 'teUtils.plotting.plotFluxControlHeatMap', (['r'], {}), '(r)\n', (19184, 19187), False, 'import teUtils\n'), ((10675, 10694), 'numpy.ones_like', '_np.ones_like', (['zpos'], {}), '(zpos)\n', (10688, 10694), True, 'import numpy as _np\n'), ((12770, 12789), 'numpy.ones_like', '_np.ones_like', (['zpos'], {}), '(zpos)\n', (12783, 12789), True, 'import numpy as _np\n'), ((16534, 16553), 'matplotlib.pyplot.xlabel', '_plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (16545, 16553), True, 'import matplotlib.pyplot as _plt\n'), ((16589, 16608), 'matplotlib.pyplot.ylabel', '_plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (16600, 16608), True, 'import matplotlib.pyplot as _plt\n'), ((16643, 16660), 'matplotlib.pyplot.title', '_plt.title', (['title'], {}), '(title)\n', (16653, 16660), True, 'import matplotlib.pyplot as _plt\n'), ((16694, 16709), 'matplotlib.pyplot.xlim', '_plt.xlim', (['xlim'], {}), '(xlim)\n', (16703, 16709), True, 'import matplotlib.pyplot as _plt\n'), ((16743, 16758), 'matplotlib.pyplot.ylim', '_plt.ylim', (['ylim'], {}), '(ylim)\n', (16752, 16758), True, 'import matplotlib.pyplot as _plt\n'), ((16922, 16942), 'matplotlib.pyplot.legend', '_plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (16933, 16942), True, 'import matplotlib.pyplot as _plt\n'), ((16980, 16991), 'matplotlib.pyplot.show', '_plt.show', ([], {}), '()\n', (16989, 16991), True, 'import matplotlib.pyplot as _plt\n'), ((3423, 3464), 'matplotlib.pyplot.subplot2grid', '_plt.subplot2grid', (['(ngrid, ngrid)', '(i, j)'], {}), '((ngrid, ngrid), (i, j))\n', (3440, 3464), True, 'import matplotlib.pyplot as _plt\n'), ((5141, 5174), 'matplotlib.pyplot.subplot2grid', '_plt.subplot2grid', (['(n, n)', '(i, j)'], {}), '((n, n), (i, j))\n', (5158, 5174), True, 'import matplotlib.pyplot as _plt\n'), ((16340, 16387), 'matplotlib.pyplot.plot', '_plt.plot', (['result[:, 0]', 'result[:, k]'], {}), '(result[:, 0], result[:, k], **kwargs)\n', (16349, 16387), True, 'import matplotlib.pyplot as _plt\n'), ((16418, 16486), 'matplotlib.pyplot.plot', '_plt.plot', (['result[:, 0]', 'result[:, k]'], {'label': 'labels[k - 1]'}), '(result[:, 0], result[:, k], label=labels[k - 1], **kwargs)\n', (16427, 16486), True, 'import matplotlib.pyplot as _plt\n'), ((1033, 1061), 'math.trunc', 'math.trunc', (['(scale * c[value])'], {}), '(scale * c[value])\n', (1043, 1061), False, 'import math\n'), ((1658, 1686), 'math.trunc', 'math.trunc', (['(scale * c[value])'], {}), '(scale * c[value])\n', (1668, 1686), False, 'import math\n'), ((16004, 16014), 'matplotlib.pyplot.gca', '_plt.gca', ([], {}), '()\n', (16012, 16014), True, 'import matplotlib.pyplot as _plt\n'), ((3286, 3301), 'random.random', 'random.random', ([], {}), '()\n', (3299, 3301), False, 'import random\n')] |
import numpy as np
def solution(N):
shape=(N+1,N+1)
steps = np.zeros(shape,int)
steps[3][2] = steps[4][2] = 1
for y in range (5, N+1) :
steps[y][2] = steps[y-2][2] + 1
for x in range (3, y + 1) :
steps[y][x] = steps[y-x][x-1] + steps[y-x][x]
if steps[y][x]==0:
break
return np.sum(steps[N])
print(solution(3))
print(solution(200)) | [
"numpy.sum",
"numpy.zeros"
] | [((70, 90), 'numpy.zeros', 'np.zeros', (['shape', 'int'], {}), '(shape, int)\n', (78, 90), True, 'import numpy as np\n'), ((404, 420), 'numpy.sum', 'np.sum', (['steps[N]'], {}), '(steps[N])\n', (410, 420), True, 'import numpy as np\n')] |
"""
Tests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex
"""
from datetime import datetime, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
PeriodIndex,
TimedeltaIndex,
date_range,
period_range,
)
import pandas._testing as tm
class EqualsTests:
def test_not_equals_numeric(self, index):
assert not index.equals(Index(index.asi8))
assert not index.equals(Index(index.asi8.astype("u8")))
assert not index.equals(Index(index.asi8).astype("f8"))
def test_equals(self, index):
assert index.equals(index)
assert index.equals(index.astype(object))
assert index.equals(CategoricalIndex(index))
assert index.equals(CategoricalIndex(index.astype(object)))
def test_not_equals_non_arraylike(self, index):
assert not index.equals(list(index))
def test_not_equals_strings(self, index):
other = Index([str(x) for x in index], dtype=object)
assert not index.equals(other)
assert not index.equals(CategoricalIndex(other))
def test_not_equals_misc_strs(self, index):
other = Index(list("abc"))
assert not index.equals(other)
class TestPeriodIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return period_range("2013-01-01", periods=5, freq="D")
# TODO: de-duplicate with other test_equals2 methods
@pytest.mark.parametrize("freq", ["D", "M"])
def test_equals2(self, freq):
# GH#13107
idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq)
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="H")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = PeriodIndex._simple_new(
idx._values._simple_new(idx._values.asi8, freq="H")
)
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDatetimeIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return date_range("2013-01-01", periods=5)
def test_equals2(self):
# GH#13107
idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific")
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.astype(object))
assert not idx.astype(object).equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
# check that we do not raise when comparing with OutOfBounds objects
oob = Index([datetime(2500, 1, 1)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
assert not idx3.equals(oob)
# check that we do not raise when comparing with OutOfBounds dt64
oob2 = oob.map(np.datetime64)
assert not idx.equals(oob2)
assert not idx2.equals(oob2)
assert not idx3.equals(oob2)
@pytest.mark.parametrize("freq", ["B", "C"])
def test_not_equals_bday(self, freq):
rng = date_range("2009-01-01", "2010-01-01", freq=freq)
assert not rng.equals(list(rng))
class TestTimedeltaIndexEquals(EqualsTests):
@pytest.fixture
def index(self):
return tm.makeTimedeltaIndex(10)
def test_equals2(self):
# GH#13107
idx = TimedeltaIndex(["1 days", "2 days", "NaT"])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert idx.astype(object).equals(idx)
assert idx.astype(object).equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"])
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.astype(object))
assert not idx.astype(object).equals(idx2)
assert not idx.astype(object).equals(idx2.astype(object))
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# Check that we dont raise OverflowError on comparisons outside the
# implementation range
oob = Index([timedelta(days=10 ** 6)] * 3, dtype=object)
assert not idx.equals(oob)
assert not idx2.equals(oob)
# FIXME: oob.apply(np.timedelta64) incorrectly overflows
oob2 = Index([np.timedelta64(x) for x in oob], dtype=object)
assert not idx.equals(oob2)
assert not idx2.equals(oob2)
| [
"pandas.Series",
"datetime.datetime",
"pandas.DatetimeIndex",
"datetime.timedelta",
"pandas.Index",
"pytest.mark.parametrize",
"pandas._testing.assert_numpy_array_equal",
"pandas.period_range",
"pandas.PeriodIndex",
"numpy.timedelta64",
"pandas.TimedeltaIndex",
"pandas.CategoricalIndex",
"pa... | [((1516, 1559), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['D', 'M']"], {}), "('freq', ['D', 'M'])\n", (1539, 1559), False, 'import pytest\n'), ((4712, 4755), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""freq"""', "['B', 'C']"], {}), "('freq', ['B', 'C'])\n", (4735, 4755), False, 'import pytest\n'), ((1402, 1449), 'pandas.period_range', 'period_range', (['"""2013-01-01"""'], {'periods': '(5)', 'freq': '"""D"""'}), "('2013-01-01', periods=5, freq='D')\n", (1414, 1449), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((1630, 1689), 'pandas.PeriodIndex', 'PeriodIndex', (["['2011-01-01', '2011-01-02', 'NaT']"], {'freq': 'freq'}), "(['2011-01-01', '2011-01-02', 'NaT'], freq=freq)\n", (1641, 1689), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((2024, 2082), 'pandas.PeriodIndex', 'PeriodIndex', (["['2011-01-01', '2011-01-02', 'NaT']"], {'freq': '"""H"""'}), "(['2011-01-01', '2011-01-02', 'NaT'], freq='H')\n", (2035, 2082), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((2526, 2574), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['idx.asi8', 'idx3.asi8'], {}), '(idx.asi8, idx3.asi8)\n', (2553, 2574), True, 'import pandas._testing as tm\n'), ((2959, 2994), 'pandas.date_range', 'date_range', (['"""2013-01-01"""'], {'periods': '(5)'}), "('2013-01-01', periods=5)\n", (2969, 2994), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((3061, 3111), 'pandas.DatetimeIndex', 'DatetimeIndex', (["['2011-01-01', '2011-01-02', 'NaT']"], {}), "(['2011-01-01', '2011-01-02', 'NaT'])\n", (3074, 3111), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((3446, 3513), 'pandas.DatetimeIndex', 'DatetimeIndex', (["['2011-01-01', '2011-01-02', 'NaT']"], {'tz': '"""US/Pacific"""'}), "(['2011-01-01', '2011-01-02', 'NaT'], tz='US/Pacific')\n", (3459, 3513), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((3847, 3887), 'pandas.DatetimeIndex', 'DatetimeIndex', (['idx.asi8'], {'tz': '"""US/Pacific"""'}), "(idx.asi8, tz='US/Pacific')\n", (3860, 3887), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((3897, 3945), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['idx.asi8', 'idx3.asi8'], {}), '(idx.asi8, idx3.asi8)\n', (3924, 3945), True, 'import pandas._testing as tm\n'), ((4814, 4863), 'pandas.date_range', 'date_range', (['"""2009-01-01"""', '"""2010-01-01"""'], {'freq': 'freq'}), "('2009-01-01', '2010-01-01', freq=freq)\n", (4824, 4863), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((5015, 5040), 'pandas._testing.makeTimedeltaIndex', 'tm.makeTimedeltaIndex', (['(10)'], {}), '(10)\n', (5036, 5040), True, 'import pandas._testing as tm\n'), ((5107, 5150), 'pandas.TimedeltaIndex', 'TimedeltaIndex', (["['1 days', '2 days', 'NaT']"], {}), "(['1 days', '2 days', 'NaT'])\n", (5121, 5150), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((5485, 5528), 'pandas.TimedeltaIndex', 'TimedeltaIndex', (["['2 days', '1 days', 'NaT']"], {}), "(['2 days', '1 days', 'NaT'])\n", (5499, 5528), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((763, 786), 'pandas.CategoricalIndex', 'CategoricalIndex', (['index'], {}), '(index)\n', (779, 786), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((461, 478), 'pandas.Index', 'Index', (['index.asi8'], {}), '(index.asi8)\n', (466, 478), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((1144, 1167), 'pandas.CategoricalIndex', 'CategoricalIndex', (['other'], {}), '(other)\n', (1160, 1167), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n'), ((1990, 2004), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (1999, 2004), True, 'import pandas as pd\n'), ((2342, 2357), 'pandas.Series', 'pd.Series', (['idx2'], {}), '(idx2)\n', (2351, 2357), True, 'import pandas as pd\n'), ((2834, 2849), 'pandas.Series', 'pd.Series', (['idx3'], {}), '(idx3)\n', (2843, 2849), True, 'import pandas as pd\n'), ((3412, 3426), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (3421, 3426), True, 'import pandas as pd\n'), ((3773, 3788), 'pandas.Series', 'pd.Series', (['idx2'], {}), '(idx2)\n', (3782, 3788), True, 'import pandas as pd\n'), ((4205, 4220), 'pandas.Series', 'pd.Series', (['idx3'], {}), '(idx3)\n', (4214, 4220), True, 'import pandas as pd\n'), ((5451, 5465), 'pandas.Series', 'pd.Series', (['idx'], {}), '(idx)\n', (5460, 5465), True, 'import pandas as pd\n'), ((5855, 5870), 'pandas.Series', 'pd.Series', (['idx2'], {}), '(idx2)\n', (5864, 5870), True, 'import pandas as pd\n'), ((6214, 6231), 'numpy.timedelta64', 'np.timedelta64', (['x'], {}), '(x)\n', (6228, 6231), True, 'import numpy as np\n'), ((4324, 4344), 'datetime.datetime', 'datetime', (['(2500)', '(1)', '(1)'], {}), '(2500, 1, 1)\n', (4332, 4344), False, 'from datetime import datetime, timedelta\n'), ((6006, 6029), 'datetime.timedelta', 'timedelta', ([], {'days': '(10 ** 6)'}), '(days=10 ** 6)\n', (6015, 6029), False, 'from datetime import datetime, timedelta\n'), ((578, 595), 'pandas.Index', 'Index', (['index.asi8'], {}), '(index.asi8)\n', (583, 595), False, 'from pandas import CategoricalIndex, DatetimeIndex, Index, PeriodIndex, TimedeltaIndex, date_range, period_range\n')] |
import numpy as np
from sklearn import model_selection
from sklearn.metrics import confusion_matrix, mean_squared_error
from sklearn import metrics
from sklearn import model_selection, metrics #Additional sklearn functions
from sklearn.metrics import accuracy_score,f1_score,roc_auc_score,log_loss
from sklearn.metrics import mean_squared_error,median_absolute_error,mean_absolute_error
from sklearn.metrics import classification_report, confusion_matrix,mean_squared_log_error
from sklearn.metrics import precision_recall_curve
from sklearn.model_selection import cross_val_score, StratifiedKFold, KFold
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import f1_score
from sklearn.metrics import log_loss
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
#####################################################################################
from sklearn.metrics import confusion_matrix
def balanced_accuracy_score(y_true, y_pred, sample_weight=None,
adjusted=False):
C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
with np.errstate(divide='ignore', invalid='ignore'):
per_class = np.diag(C) / C.sum(axis=1)
if np.any(np.isnan(per_class)):
warnings.warn('y_pred contains classes not in y_true')
per_class = per_class[~np.isnan(per_class)]
score = np.mean(per_class)
if adjusted:
n_classes = len(per_class)
chance = 1 / n_classes
score -= chance
score /= 1 - chance
return score
def accu(results, y_cv):
return (results==y_cv).astype(int).sum(axis=0)/(y_cv.shape[0])
def rmse(results, y_cv):
return np.sqrt(np.mean((results - y_cv)**2, axis=0))
######## Defining objective functions for HyperOpt here ######################
def gini(truth, predictions):
g = np.asarray(np.c_[truth, predictions, np.arange(len(truth)) ], dtype=np.float)
g = g[np.lexsort((g[:,2], -1*g[:,1]))]
gs = g[:,0].cumsum().sum() / g[:,0].sum()
gs -= (len(truth) + 1) / 2.
return gs / len(truth)
def gini_sklearn(truth, predictions):
return gini(truth, predictions) / gini(truth, truth)
def gini_meae(truth, predictions):
score = median_absolute_error(truth, predictions)
return score
def gini_msle(truth, predictions):
score = mean_squared_log_error(truth, predictions)
return score
def gini_mae(truth, predictions):
score = mean_absolute_error(truth, predictions)
return score
def gini_mse(truth, predictions):
score = mean_squared_error(truth, predictions)
return score
def gini_rmse(truth, predictions):
score = np.sqrt(mean_squared_error(truth, predictions))
return score
def gini_accuracy(truth, predictions):
return accuracy_score(truth, predictions)
def gini_bal_accuracy(truth, predictions):
try:
return balanced_accuracy_score(truth, predictions)
except:
return accuracy_score(truth, predictions)
def gini_roc(truth, predictions):
return roc_auc_score(truth, predictions)
def gini_precision(truth, predictions,pos_label=1):
return precision_score(truth, predictions,average=None)[pos_label]
def gini_average_precision(truth, predictions):
return average_precision_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_weighted_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_macro_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='macro')
def gini_micro_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='micro')
def gini_samples_precision(truth, predictions):
return precision_score(truth, predictions.argmax(axis=1),average='samples')
def gini_f1(truth, predictions,pos_label=1):
return f1_score(truth, predictions,average=None)[pos_label]
def gini_weighted_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_macro_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='macro')
def gini_micro_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='micro')
def gini_samples_f1(truth, predictions):
return f1_score(truth, predictions.argmax(axis=1),average='samples')
def gini_log_loss(truth, predictions):
return log_loss(truth, predictions,normalize=True)
def gini_recall(truth, predictions,pos_label=1):
return recall_score(truth, predictions,average=None)[pos_label]
def gini_weighted_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='weighted')
def gini_samples_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='samples')
def gini_macro_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='macro')
def gini_micro_recall(truth, predictions):
return recall_score(truth, predictions.argmax(axis=1),average='micro')
| [
"numpy.mean",
"sklearn.metrics.f1_score",
"sklearn.metrics.median_absolute_error",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.roc_auc_score",
"numpy.errstate",
"numpy.lexsort",
"sklearn.metrics.log_loss",
"numpy.isnan",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score... | [((1194, 1255), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {'sample_weight': 'sample_weight'}), '(y_true, y_pred, sample_weight=sample_weight)\n', (1210, 1255), False, 'from sklearn.metrics import confusion_matrix\n'), ((1523, 1541), 'numpy.mean', 'np.mean', (['per_class'], {}), '(per_class)\n', (1530, 1541), True, 'import numpy as np\n'), ((2355, 2396), 'sklearn.metrics.median_absolute_error', 'median_absolute_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (2376, 2396), False, 'from sklearn.metrics import mean_squared_error, median_absolute_error, mean_absolute_error\n'), ((2462, 2504), 'sklearn.metrics.mean_squared_log_error', 'mean_squared_log_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (2484, 2504), False, 'from sklearn.metrics import classification_report, confusion_matrix, mean_squared_log_error\n'), ((2569, 2608), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (2588, 2608), False, 'from sklearn.metrics import mean_squared_error, median_absolute_error, mean_absolute_error\n'), ((2673, 2711), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (2691, 2711), False, 'from sklearn.metrics import mean_squared_error, median_absolute_error, mean_absolute_error\n'), ((2893, 2927), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'predictions'], {}), '(truth, predictions)\n', (2907, 2927), False, 'from sklearn.metrics import accuracy_score\n'), ((3148, 3181), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['truth', 'predictions'], {}), '(truth, predictions)\n', (3161, 3181), False, 'from sklearn.metrics import roc_auc_score\n'), ((4569, 4613), 'sklearn.metrics.log_loss', 'log_loss', (['truth', 'predictions'], {'normalize': '(True)'}), '(truth, predictions, normalize=True)\n', (4577, 4613), False, 'from sklearn.metrics import log_loss\n'), ((1265, 1311), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1276, 1311), True, 'import numpy as np\n'), ((1374, 1393), 'numpy.isnan', 'np.isnan', (['per_class'], {}), '(per_class)\n', (1382, 1393), True, 'import numpy as np\n'), ((1830, 1868), 'numpy.mean', 'np.mean', (['((results - y_cv) ** 2)'], {'axis': '(0)'}), '((results - y_cv) ** 2, axis=0)\n', (1837, 1868), True, 'import numpy as np\n'), ((2073, 2108), 'numpy.lexsort', 'np.lexsort', (['(g[:, 2], -1 * g[:, 1])'], {}), '((g[:, 2], -1 * g[:, 1]))\n', (2083, 2108), True, 'import numpy as np\n'), ((2785, 2823), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['truth', 'predictions'], {}), '(truth, predictions)\n', (2803, 2823), False, 'from sklearn.metrics import mean_squared_error, median_absolute_error, mean_absolute_error\n'), ((3246, 3295), 'sklearn.metrics.precision_score', 'precision_score', (['truth', 'predictions'], {'average': 'None'}), '(truth, predictions, average=None)\n', (3261, 3295), False, 'from sklearn.metrics import precision_score\n'), ((4011, 4053), 'sklearn.metrics.f1_score', 'f1_score', (['truth', 'predictions'], {'average': 'None'}), '(truth, predictions, average=None)\n', (4019, 4053), False, 'from sklearn.metrics import f1_score\n'), ((4674, 4720), 'sklearn.metrics.recall_score', 'recall_score', (['truth', 'predictions'], {'average': 'None'}), '(truth, predictions, average=None)\n', (4686, 4720), False, 'from sklearn.metrics import recall_score\n'), ((1333, 1343), 'numpy.diag', 'np.diag', (['C'], {}), '(C)\n', (1340, 1343), True, 'import numpy as np\n'), ((3067, 3101), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['truth', 'predictions'], {}), '(truth, predictions)\n', (3081, 3101), False, 'from sklearn.metrics import accuracy_score\n'), ((1490, 1509), 'numpy.isnan', 'np.isnan', (['per_class'], {}), '(per_class)\n', (1498, 1509), True, 'import numpy as np\n')] |
"""File containing links to data samples used (pointsource tracks).
Path to local copy of point source tracks, downloaded from /data/ana .. /current
with following README:
This directory contains an update to version-002p02 which fixes the
leap second bug for event MJDs in runs 120398 to 126377, inclusive.
These runs are only in seasons IC86, 2012-2014. See
https://drive.google.com/file/d/0B6TW2cWERhC6OFFCbWxsTHB1VzQ/view
For a full description of the leap second issue, which is present
in level 2 data for both Pass 1 & 2.
The data files contain the following columns:
data & MC fields
================
run - int64 - run id
event - int64 - event id
subevent - int64 - subevent id
NOTE: Seasons prior to IC86, 2012 have subevent = -1 for all events
because this info was difficult to find from point source sample
processing prior to 2012.
time - float64 - MJD in days
ra - float64 - right ascension in radians
NOTE: (computed with ra, dec = icecube.astro.dir_to_equa(zen, azi, time))
dec - float64 - declination in radians
NOTE: (computed with ra, dec = icecube.astro.dir_to_equa(zen, azi, time))
sinDec - float64 - sin(declination)
azi - float64 - azimuth in radians from splineMPE
zen - float64 - zenith in radians from splineMPE
angErr - float64 - angular error in radians, defined as sigma of 2D Gaussian
NOTE: angErr is the pull-corrected sigma paraboloid approximated as sigma = sqrt(s1**2 + s2**2)
logE - float64 - log10(reco energy/GeV), energy reco is MuEX
MC only fields
==============
trueE - float64 - true neutrino energy in GeV
trueRa - float64 - true right ascension in radians
trueDec - float64 - true declination in radians
ow - float64 - oneweight in GeV cm2 sr
File list:
IC40_exp.npy/IC40_exp.root Data for IC40 season
IC59_exp.npy/IC59_exp.root Data for IC59 season
IC79_exp.npy/IC79_exp.root Data for IC79 season
NOTE: IC79 data derive from the "b" version of the IC79 selection.
This file was labeled IC79b in version-002p00 and version-002p01
IC86_2011_exp.npy/IC86_2011_exp.root Data for IC86, 2011 season
IC86_2012_exp.npy/IC86_2012_exp.root Data for IC86, 2012 season
IC86_2013_exp.npy/IC86_2013_exp.root Data for IC86, 2013 season
IC86_2014_exp.npy/IC86_2014_exp.root Data for IC86, 2014 season
IC40_MC.npy/IC40_MC.root Monte Carlo for IC40 season
IC59_MC.npy/IC59_MC.root Monte Carlo for IC59 season
IC79_MC.npy/IC79_MC.root Monte Carlo for IC79 season
NOTE: IC79 Monte Carlo derive from the "b" version of the IC79 selection.
This file was labeled IC79b in version-002p00 and version-002p01
IC86_2011_MC.npy/IC86_2011_MC.root Monte Carlo for IC86, 2011 season
IC86_2012_MC.npy/IC86_2011_MC.root Monte Carlo for IC86, 2012 season
IC86_2013_MC.npy/IC86_2011_MC.root Monte Carlo for IC86, 2013 season
IC86_2014_MC.npy/IC86_2011_MC.root Monte Carlo for IC86, 2014 season
Note that the .npy and .root files have the same events and column names,
they only differ in their container format.
Where these files came from?
These files derive from those under /data/ana/analyses/version-002p01 with
subevent IDs take from:
/data/ana/PointSource/IC86_2012_PS/Merged_IC86.2012_*.hd5
/data/ana/PointSource/IC86_2012_PS/Merged_IC86.2013_*.hd5
/data/ana/PointSource/IC86_2012_PS/Merged_IC86.2014_*.hd5
/data/ana/PointSource/IC86_2012_PS/Merged_11*.hd5
"""
from flarestack.data.icecube.ic_season import (
IceCubeDataset,
IceCubeSeason,
icecube_dataset_dir,
)
from flarestack.data.icecube.ps_tracks import get_ps_binning
import numpy as np
import logging
logger = logging.getLogger(__name__)
ps_data_dir = icecube_dataset_dir + "ps_tracks/version-002-p03/"
grl_data_dir = ps_data_dir + "GRL/"
ps_v002_p03 = IceCubeDataset()
sample_name = "ps_tracks_v002_p03"
logger.debug(f"building {sample_name}")
logger.debug(f"adding IC40")
ic40 = IceCubeSeason(
season_name="IC40",
sample_name=sample_name,
exp_path=ps_data_dir + "IC40_exp.npy",
mc_path=ps_data_dir + "IC40_MC.npy",
grl_path=grl_data_dir + "IC40_exp.npy",
sin_dec_bins=get_ps_binning("IC40")[0],
log_e_bins=get_ps_binning("IC40")[1],
)
ps_v002_p03.add_season(ic40)
logger.debug("adding IC59")
ic59 = IceCubeSeason(
season_name="IC59",
sample_name=sample_name,
exp_path=ps_data_dir + "IC59_exp.npy",
mc_path=ps_data_dir + "IC59_MC.npy",
grl_path=grl_data_dir + "IC59_exp.npy",
sin_dec_bins=get_ps_binning("IC59")[0],
log_e_bins=get_ps_binning("IC59")[1],
)
ps_v002_p03.add_season(ic59)
logger.debug("adding IC79")
ic79 = IceCubeSeason(
season_name="IC79",
sample_name=sample_name,
exp_path=ps_data_dir + "IC79_exp.npy",
mc_path=ps_data_dir + "IC79_MC.npy",
grl_path=grl_data_dir + "IC79_exp.npy",
sin_dec_bins=get_ps_binning("IC79")[0],
log_e_bins=get_ps_binning("IC79")[1],
)
ps_v002_p03.add_season(ic79)
boundary = np.sin(np.radians(-5.0)) # North/South transition boundary
logger.debug("adding IC86 2011")
ic86_1 = IceCubeSeason(
season_name="IC86_1",
sample_name=sample_name,
exp_path=ps_data_dir + "IC86_2011_exp.npy",
mc_path=ps_data_dir + "IC86_2011_MC.npy",
grl_path=grl_data_dir + "IC86_2011_exp.npy",
sin_dec_bins=get_ps_binning("IC86_2011")[0],
log_e_bins=get_ps_binning("IC86_2011")[1],
)
ps_v002_p03.add_season(ic86_1)
# Add optional subseasons for IC86 2, 3, and 4, that can be called instead of
# the combined season
for i in range(2, 5):
logger.debug(f"adding IC86 201{i}")
ic86_i = IceCubeSeason(
season_name="IC86_{0}".format(i),
sample_name=sample_name,
exp_path=ps_data_dir + "IC86-201{0}_exp.npy".format(i),
mc_path=ps_data_dir + "IC86_2012_MC.npy",
grl_path=grl_data_dir + "IC86_201{0}_exp.npy".format(i),
sin_dec_bins=get_ps_binning("IC86_2012")[0],
log_e_bins=get_ps_binning("IC86_2012")[1],
)
ps_v002_p03.add_subseason(ic86_i)
logger.debug("adding IC86 2012-2014")
ic86_234 = IceCubeSeason(
season_name="IC86_234",
sample_name=sample_name,
exp_path=[
ps_data_dir + "IC86_2012_exp.npy",
ps_data_dir + "IC86_2013_exp.npy",
ps_data_dir + "IC86_2014_exp.npy",
],
mc_path=ps_data_dir + "IC86_2012_MC.npy",
grl_path=[
grl_data_dir + "IC86_2012_exp.npy",
grl_data_dir + "IC86_2013_exp.npy",
grl_data_dir + "IC86_2014_exp.npy",
],
sin_dec_bins=get_ps_binning("IC86_2012")[0],
log_e_bins=get_ps_binning("IC86_2012")[1],
)
ps_v002_p03.add_season(ic86_234)
# ps_3_systematic_set = IceCubeDataset()
#
# for i, season_name in enumerate(["IC79", "IC86_1", "IC86_2"]):
# try:
# season = copy.copy(ps_v002_p03.seasons[season_name])
# except KeyError:
# season = copy.copy(ps_v002_p03.subseasons[season_name])
#
# season.season_name = ["IC79-2010", "IC86-2011", "IC86-2012"][i]
# season.sample_name = "all_sky_3_year_mc"
#
# ps_3_systematic_set.add_season(season)
| [
"logging.getLogger",
"flarestack.data.icecube.ic_season.IceCubeDataset",
"numpy.radians",
"flarestack.data.icecube.ps_tracks.get_ps_binning"
] | [((3990, 4017), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4007, 4017), False, 'import logging\n'), ((4134, 4150), 'flarestack.data.icecube.ic_season.IceCubeDataset', 'IceCubeDataset', ([], {}), '()\n', (4148, 4150), False, 'from flarestack.data.icecube.ic_season import IceCubeDataset, IceCubeSeason, icecube_dataset_dir\n'), ((5295, 5311), 'numpy.radians', 'np.radians', (['(-5.0)'], {}), '(-5.0)\n', (5305, 5311), True, 'import numpy as np\n'), ((4476, 4498), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC40"""'], {}), "('IC40')\n", (4490, 4498), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((4518, 4540), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC40"""'], {}), "('IC40')\n", (4532, 4540), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((4826, 4848), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC59"""'], {}), "('IC59')\n", (4840, 4848), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((4868, 4890), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC59"""'], {}), "('IC59')\n", (4882, 4890), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((5176, 5198), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC79"""'], {}), "('IC79')\n", (5190, 5198), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((5218, 5240), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC79"""'], {}), "('IC79')\n", (5232, 5240), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((5621, 5648), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC86_2011"""'], {}), "('IC86_2011')\n", (5635, 5648), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((5668, 5695), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC86_2011"""'], {}), "('IC86_2011')\n", (5682, 5695), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((6818, 6845), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC86_2012"""'], {}), "('IC86_2012')\n", (6832, 6845), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((6865, 6892), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC86_2012"""'], {}), "('IC86_2012')\n", (6879, 6892), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((6201, 6228), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC86_2012"""'], {}), "('IC86_2012')\n", (6215, 6228), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n'), ((6252, 6279), 'flarestack.data.icecube.ps_tracks.get_ps_binning', 'get_ps_binning', (['"""IC86_2012"""'], {}), "('IC86_2012')\n", (6266, 6279), False, 'from flarestack.data.icecube.ps_tracks import get_ps_binning\n')] |
import collections
import openmlpimp
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter
from scipy.stats import gaussian_kde
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._search import ParameterSampler
from collections import Sized, defaultdict
from functools import partial
import math
import numpy as np
from sklearn.utils import resample
from sklearn.base import is_classifier, clone
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import _fit_and_score
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.fixes import rankdata
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.validation import indexable
from sklearn.metrics.scorer import check_scoring
class BaseSearchBandits(BaseSearchCV):
def _do_iteration(self, X, y, groups, sample_size, parameter_iterable, cv, eta):
base_estimator = clone(self.estimator)
n_splits = cv.get_n_splits(X, y, groups)
cv_iter = list(cv.split(X, y, groups))
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
fit_params=self.fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv_iter)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_scores, test_scores, test_sample_counts,
fit_time, score_time, parameters) = zip(*out)
else:
(test_scores, test_sample_counts,
fit_time, score_time, parameters) = zip(*out)
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
# TODO: obtain from cv_iter object
sample_sizes = [sample_size] * n_candidates * n_splits
results = dict()
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
# Computed the (weighted) mean and std for test scores alone
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
_store('test_score', test_scores, splits=True, rank=True,
weights=test_sample_counts if self.iid else None)
if self.return_train_score:
_store('train_score', train_scores, splits=True)
_store('fit_time', fit_time)
_store('score_time', score_time)
_store('sample_sizes', sample_sizes)
best_index = np.flatnonzero(results["rank_test_score"] == 1)[0]
best_parameters = candidate_params[best_index]
new_parameter_iterable = []
order = np.argsort(results['mean_test_score'][-n_candidates:] * -1)
for i in range(int(len(parameter_iterable) / eta)):
new_parameter_iterable.append(candidate_params[order[i]])
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates, ),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
return results, new_parameter_iterable, best_index, best_parameters
def _successive_halving(self, X, y, groups, cv, eta, hyperband_s, hyperband_smax=None):
results = dict()
best_index = None
hyperband_B = hyperband_smax + 1 if hyperband_smax is not None else hyperband_s
print(hyperband_B, eta, hyperband_s, (hyperband_s + 1))
hyperband_n = math.ceil(hyperband_B * eta ** hyperband_s / (hyperband_s + 1))
print('- bracket %d; B = %d, n = %d' %(hyperband_s, hyperband_B, hyperband_n))
parameter_iterable = ParameterSampler(self.param_distributions,
hyperband_n,
random_state=self.random_state + hyperband_s)
for hyperband_i in range(0, hyperband_s + 1):
sample_size = int(len(X) * (eta ** -(hyperband_s - hyperband_i)))
arms_pulled = 0
if 'mean_test_score' in results:
arms_pulled = len(results['mean_test_score'])
if groups is not None:
X_resampled, y_resampled, groups_resampled = resample(X, y, groups, n_samples=sample_size, replace=False, random_state=self.random_state)
else:
X_resampled, y_resampled = resample(X, y, n_samples=sample_size, replace=False)
groups_resampled = None
print('-- iteration %d sample size %d arms %d' %(hyperband_i, sample_size, len(parameter_iterable)))
res = self._do_iteration(X_resampled, y_resampled, groups_resampled, sample_size, parameter_iterable, cv, eta)
results_iteration, parameter_iterable, best_index_iteration, best_parameters_iteration = res
# TODO: This assumes we always take the index from the highest bracket.
best_index = arms_pulled + best_index_iteration
best_parameters = best_parameters_iteration
for key, values in results_iteration.items():
if key not in results:
results[key] = values
else:
results[key] = np.append(results[key], values)
return results, best_index, best_parameters
class SuccessiveHalving(BaseSearchBandits):
def __init__(self, estimator, param_distributions, num_steps,
eta, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=True):
self.param_distributions = param_distributions
self.random_state = random_state
self.num_steps = num_steps
self.eta = eta
super(SuccessiveHalving, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def fit(self, X, y, groups=None):
"""Actual fitting, performing the search over parameters."""
num_arms = self.eta ** (self.num_steps - 1)
parameter_iterable = ParameterSampler(self.param_distributions,
num_arms,
random_state=self.random_state)
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
results, best_index, best_parameters = self._successive_halving(X, y, groups, cv, self.eta, self.num_steps - 1, self.num_steps - 1)
self.cv_results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class HyperBand(BaseSearchBandits):
def __init__(self, estimator, param_distributions, num_brackets,
eta, scoring=None, fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=True):
self.param_distributions = param_distributions
self.random_state = random_state
self.num_brackets = num_brackets
self.eta = eta
super(HyperBand, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def fit(self, X, y, groups=None):
"""Actual fitting, performing the search over parameters."""
results = dict()
best_index = None
best_parameters = None
for bracket_idx in range(self.num_brackets - 1, -1, -1):
successive_halving_steps = bracket_idx + 1
# TODO: num_arms should be different
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
arms_pulled = 0
if 'mean_test_score' in results:
arms_pulled = len(results['mean_test_score'])
res = self._successive_halving(X, y, groups, cv, self.eta, successive_halving_steps - 1, self.num_brackets - 1)
bracket_results, bracket_best_index, bracket_best_parameters = res
for key, values in bracket_results.items():
if key not in results:
results[key] = values
else:
results[key] = np.append(results[key], values)
if best_index is None:
best_index = bracket_best_index + arms_pulled
best_parameters = bracket_best_parameters
elif bracket_results['mean_test_score'][bracket_best_index] > results['mean_test_score'][best_index]:
best_index = bracket_best_index + arms_pulled
best_parameters = bracket_best_parameters
self.cv_results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
| [
"sklearn.utils.validation.indexable",
"math.ceil",
"sklearn.externals.joblib.delayed",
"numpy.average",
"sklearn.base.clone",
"sklearn.base.is_classifier",
"numpy.flatnonzero",
"sklearn.model_selection._search.ParameterSampler",
"numpy.argsort",
"numpy.array",
"sklearn.utils.resample",
"sklear... | [((1016, 1037), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (1021, 1037), False, 'from sklearn.base import is_classifier, clone\n'), ((3683, 3736), 'numpy.array', 'np.array', (['test_sample_counts[:n_splits]'], {'dtype': 'np.int'}), '(test_sample_counts[:n_splits], dtype=np.int)\n', (3691, 3736), True, 'import numpy as np\n'), ((4308, 4367), 'numpy.argsort', 'np.argsort', (["(results['mean_test_score'][-n_candidates:] * -1)"], {}), "(results['mean_test_score'][-n_candidates:] * -1)\n", (4318, 4367), True, 'import numpy as np\n'), ((5856, 5919), 'math.ceil', 'math.ceil', (['(hyperband_B * eta ** hyperband_s / (hyperband_s + 1))'], {}), '(hyperband_B * eta ** hyperband_s / (hyperband_s + 1))\n', (5865, 5919), False, 'import math\n'), ((6037, 6143), 'sklearn.model_selection._search.ParameterSampler', 'ParameterSampler', (['self.param_distributions', 'hyperband_n'], {'random_state': '(self.random_state + hyperband_s)'}), '(self.param_distributions, hyperband_n, random_state=self.\n random_state + hyperband_s)\n', (6053, 6143), False, 'from sklearn.model_selection._search import ParameterSampler\n'), ((8670, 8759), 'sklearn.model_selection._search.ParameterSampler', 'ParameterSampler', (['self.param_distributions', 'num_arms'], {'random_state': 'self.random_state'}), '(self.param_distributions, num_arms, random_state=self.\n random_state)\n', (8686, 8759), False, 'from sklearn.model_selection._search import ParameterSampler\n'), ((8977, 9028), 'sklearn.metrics.scorer.check_scoring', 'check_scoring', (['self.estimator'], {'scoring': 'self.scoring'}), '(self.estimator, scoring=self.scoring)\n', (8990, 9028), False, 'from sklearn.metrics.scorer import check_scoring\n'), ((9053, 9076), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (9062, 9076), False, 'from sklearn.utils.validation import indexable\n'), ((9474, 9495), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (9479, 9495), False, 'from sklearn.base import is_classifier, clone\n'), ((1150, 1237), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose', 'pre_dispatch': 'self.pre_dispatch'}), '(n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.\n pre_dispatch)\n', (1158, 1237), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((2932, 2974), 'numpy.average', 'np.average', (['array'], {'axis': '(1)', 'weights': 'weights'}), '(array, axis=1, weights=weights)\n', (2942, 2974), True, 'import numpy as np\n'), ((4149, 4196), 'numpy.flatnonzero', 'np.flatnonzero', (["(results['rank_test_score'] == 1)"], {}), "(results['rank_test_score'] == 1)\n", (4163, 4196), True, 'import numpy as np\n'), ((11533, 11584), 'sklearn.metrics.scorer.check_scoring', 'check_scoring', (['self.estimator'], {'scoring': 'self.scoring'}), '(self.estimator, scoring=self.scoring)\n', (11546, 11584), False, 'from sklearn.metrics.scorer import check_scoring\n'), ((11613, 11636), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (11622, 11636), False, 'from sklearn.utils.validation import indexable\n'), ((11720, 11741), 'sklearn.base.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (11725, 11741), False, 'from sklearn.base import is_classifier, clone\n'), ((3126, 3204), 'numpy.average', 'np.average', (['((array - array_means[:, np.newaxis]) ** 2)'], {'axis': '(1)', 'weights': 'weights'}), '((array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights)\n', (3136, 3204), True, 'import numpy as np\n'), ((4793, 4815), 'numpy.empty', 'np.empty', (['n_candidates'], {}), '(n_candidates)\n', (4801, 4815), True, 'import numpy as np\n'), ((6597, 6694), 'sklearn.utils.resample', 'resample', (['X', 'y', 'groups'], {'n_samples': 'sample_size', 'replace': '(False)', 'random_state': 'self.random_state'}), '(X, y, groups, n_samples=sample_size, replace=False, random_state=\n self.random_state)\n', (6605, 6694), False, 'from sklearn.utils import resample\n'), ((6751, 6803), 'sklearn.utils.resample', 'resample', (['X', 'y'], {'n_samples': 'sample_size', 'replace': '(False)'}), '(X, y, n_samples=sample_size, replace=False)\n', (6759, 6803), False, 'from sklearn.utils import resample\n'), ((8928, 8952), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (8941, 8952), False, 'from sklearn.base import is_classifier, clone\n'), ((1267, 1290), 'sklearn.externals.joblib.delayed', 'delayed', (['_fit_and_score'], {}), '(_fit_and_score)\n', (1274, 1290), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((1291, 1312), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (1296, 1312), False, 'from sklearn.base import is_classifier, clone\n'), ((2594, 2627), 'numpy.array', 'np.array', (['array'], {'dtype': 'np.float64'}), '(array, dtype=np.float64)\n', (2602, 2627), True, 'import numpy as np\n'), ((3451, 3487), 'sklearn.utils.fixes.rankdata', 'rankdata', (['(-array_means)'], {'method': '"""min"""'}), "(-array_means, method='min')\n", (3459, 3487), False, 'from sklearn.utils.fixes import rankdata\n'), ((7584, 7615), 'numpy.append', 'np.append', (['results[key]', 'values'], {}), '(results[key], values)\n', (7593, 7615), True, 'import numpy as np\n'), ((9919, 9940), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (9924, 9940), False, 'from sklearn.base import is_classifier, clone\n'), ((11480, 11504), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (11493, 11504), False, 'from sklearn.base import is_classifier, clone\n'), ((12276, 12307), 'numpy.append', 'np.append', (['results[key]', 'values'], {}), '(results[key], values)\n', (12285, 12307), True, 'import numpy as np\n'), ((12980, 13001), 'sklearn.base.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (12985, 13001), False, 'from sklearn.base import is_classifier, clone\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import scipy.ndimage
from config import config
class FlawDetector(nn.Module):
""" The FC Discriminator proposed in paper:
'Guided Collaborative Training for Pixel-wise Semi-Supervised Learning'
"""
ndf = 64 # basic number of channels
def __init__(self, in_channels, norm_layer=None):
super(FlawDetector, self).__init__()
self.conv1 = nn.Conv2d(in_channels, self.ndf, kernel_size=4, stride=2, padding=1)
self.ibn1 = IBNorm(self.ndf, norm_layer=norm_layer)
self.conv2 = nn.Conv2d(self.ndf, self.ndf * 2, kernel_size=4, stride=2, padding=1)
self.ibn2 = IBNorm(self.ndf * 2, norm_layer=norm_layer)
self.conv2_1 = nn.Conv2d(self.ndf * 2, self.ndf * 2, kernel_size=4, stride=1, padding=1)
self.ibn2_1 = IBNorm(self.ndf * 2, norm_layer=norm_layer)
self.conv3 = nn.Conv2d(self.ndf * 2, self.ndf * 4, kernel_size=4, stride=2, padding=1)
self.ibn3 = IBNorm(self.ndf * 4, norm_layer=norm_layer)
self.conv3_1 = nn.Conv2d(self.ndf * 4, self.ndf * 4, kernel_size=4, stride=1, padding=1)
self.ibn3_1 = IBNorm(self.ndf * 4, norm_layer=norm_layer)
self.conv4 = nn.Conv2d(self.ndf * 4, self.ndf * 8, kernel_size=4, stride=2, padding=1)
self.ibn4 = IBNorm(self.ndf * 8, norm_layer=norm_layer)
self.conv4_1 = nn.Conv2d(self.ndf * 8, self.ndf * 8, kernel_size=4, stride=1, padding=1)
self.ibn4_1 = IBNorm(self.ndf * 8, norm_layer=norm_layer)
self.classifier = nn.Conv2d(self.ndf * 8, 1, kernel_size=4, stride=2, padding=1)
self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, task_inp, task_pred):
resulter, debugger = {}, {}
# task_inp = torch.cat(task_inp, dim=1)
x = torch.cat((task_inp, task_pred), dim=1)
x = self.leaky_relu(self.ibn1(self.conv1(x)))
x = self.leaky_relu(self.ibn2(self.conv2(x)))
x = self.leaky_relu(self.ibn2_1(self.conv2_1(x)))
x = self.leaky_relu(self.ibn3(self.conv3(x)))
x = self.leaky_relu(self.ibn3_1(self.conv3_1(x)))
x = self.leaky_relu(self.ibn4(self.conv4(x)))
x = self.leaky_relu(self.ibn4_1(self.conv4_1(x)))
x = self.classifier(x)
x = F.interpolate(x, size=(task_pred.shape[2], task_pred.shape[3]), mode='bilinear', align_corners=True)
# x is not activated here since it will be activated by the criterion function
assert x.shape[2:] == task_pred.shape[2:]
resulter['flawmap'] = x
return x
class IBNorm(nn.Module):
""" This layer combines BatchNorm and InstanceNorm.
"""
def __init__(self, num_features, split=0.5, norm_layer=None):
super(IBNorm, self).__init__()
self.num_features = num_features
self.num_BN = int(num_features * split + 0.5)
self.bnorm = norm_layer(num_features=self.num_BN, affine=True)
self.inorm = nn.InstanceNorm2d(num_features=num_features - self.num_BN, affine=False)
def forward(self, x):
if self.num_BN == self.num_features:
return self.bnorm(x.contiguous())
else:
xb = self.bnorm(x[:, 0:self.num_BN, :, :].contiguous())
xi = self.inorm(x[:, self.num_BN:, :, :].contiguous())
return torch.cat((xb, xi), 1)
class FlawDetectorCriterion(nn.Module):
""" Criterion of the flaw detector.
"""
def __init__(self):
super(FlawDetectorCriterion, self).__init__()
def forward(self, pred, gt, is_ssl=False, reduction=True):
loss = F.mse_loss(pred, gt, reduction='none')
if reduction:
loss = torch.mean(loss, dim=(1, 2, 3))
return loss
class FlawmapHandler(nn.Module):
""" Post-processing of the predicted flawmap.
This module processes the predicted flawmap to fix some special
cases that may cause errors in the subsequent steps of generating
pseudo ground truth.
"""
def __init__(self):
super(FlawmapHandler, self).__init__()
self.clip_threshold = 0.1
blur_ksize = config.image_height // 16
blur_ksize = blur_ksize + 1 if blur_ksize % 2 == 0 else blur_ksize
self.blur = GaussianBlurLayer(1, blur_ksize)
def forward(self, flawmap):
flawmap = flawmap.data
# force all values to be larger than 0
flawmap.mul_((flawmap >= 0).float())
# smooth the flawmap
flawmap = self.blur(flawmap)
# if all values in the flawmap are less than 'clip_threshold'
# set the entire flawmap to 0, i.e., no flaw pixel
fmax = flawmap.max(dim=3, keepdim=True)[0].max(dim=2, keepdim=True)[0].max(dim=1, keepdim=True)[0]
fmin = flawmap.min(dim=3, keepdim=True)[0].min(dim=2, keepdim=True)[0].min(dim=1, keepdim=True)[0]
max_matrix = fmax.repeat(1, 1, flawmap.shape[2], flawmap.shape[3])
flawmap.mul_((max_matrix > self.clip_threshold).float()) # maximum number is lower than threshold, set the error to 0.
# normalize the flawmap
flawmap = flawmap.sub_(fmin).div_(fmax - fmin + 1e-9)
return flawmap
class DCGTGenerator(nn.Module):
""" Generate the ground truth of the dynamic consistency constraint.
"""
def __init__(self):
super(DCGTGenerator, self).__init__()
def forward(self, l_pred, r_pred, l_handled_flawmap, r_handled_flawmap):
l_tmp = l_handled_flawmap.clone()
r_tmp = r_handled_flawmap.clone()
l_bad = l_tmp > config.dc_threshold
r_bad = r_tmp > config.dc_threshold
both_bad = (l_bad & r_bad).float() # too high error rate
l_handled_flawmap.mul_((l_tmp <= config.dc_threshold).float())
r_handled_flawmap.mul_((r_tmp <= config.dc_threshold).float())
l_handled_flawmap.add_((l_tmp > config.dc_threshold).float())
r_handled_flawmap.add_((r_tmp > config.dc_threshold).float())
l_mask = (r_handled_flawmap >= l_handled_flawmap).float()
r_mask = (l_handled_flawmap >= r_handled_flawmap).float()
l_dc_gt = l_mask * l_pred + (1 - l_mask) * r_pred
r_dc_gt = r_mask * r_pred + (1 - r_mask) * l_pred
return l_dc_gt, r_dc_gt, both_bad, both_bad
class FDGTGenerator(nn.Module):
""" Generate the ground truth of the flaw detector,
i.e., pipeline 'C' in the paper.
"""
def __init__(self):
super(FDGTGenerator, self).__init__()
blur_ksize = int(config.image_height / 8)
blur_ksize = blur_ksize + 1 if blur_ksize % 2 == 0 else blur_ksize
self.blur = GaussianBlurLayer(1, blur_ksize)
reblur_ksize = int(config.image_height / 4)
reblur_ksize = reblur_ksize + 1 if reblur_ksize % 2 == 0 else reblur_ksize
self.reblur = GaussianBlurLayer(1, reblur_ksize)
self.dilate = nn.Sequential(
nn.ReflectionPad2d(1),
nn.MaxPool2d(kernel_size=3, stride=1, padding=0)
)
def forward(self, pred, gt):
diff = torch.abs_(gt - pred.detach())
diff = torch.sum(diff, dim=1, keepdim=True).mul_(config.mu)
diff = self.blur(diff)
for _ in range(0, config.nu):
diff = self.reblur(self.dilate(diff))
# normlize each sample to [0, 1]
dmax = diff.max(dim=3, keepdim=True)[0].max(dim=2, keepdim=True)[0].max(dim=1, keepdim=True)[0]
dmin = diff.min(dim=3, keepdim=True)[0].min(dim=2, keepdim=True)[0].min(dim=1, keepdim=True)[0]
diff.sub_(dmin).div_(dmax - dmin + 1e-9)
flawmap_gt = diff
return flawmap_gt
class GaussianBlurLayer(nn.Module):
""" Add Gaussian Blur to a 4D tensor
This layer takes a 4D tensor of {N, C, H, W} as input.
The Gaussian blur will be performed in given channel number (C) splitly.
"""
def __init__(self, channels, kernel_size):
"""
Arguments:
channels (int): Channel for input tensor
kernel_size (int): Size of the kernel used in blurring
"""
super(GaussianBlurLayer, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
assert self.kernel_size % 2 != 0
self.op = nn.Sequential(
nn.ReflectionPad2d(math.floor(self.kernel_size / 2)),
nn.Conv2d(channels, channels, self.kernel_size,
stride=1, padding=0, bias=None, groups=channels)
)
self._init_kernel()
def forward(self, x):
"""
Arguments:
x (torch.Tensor): input 4D tensor
Returns:
torch.Tensor: Blurred version of the input
"""
assert len(list(x.shape)) == 4
assert x.shape[1] == self.channels
return self.op(x)
def _init_kernel(self):
sigma = 0.3 * ((self.kernel_size - 1) * 0.5 - 1) + 0.8
n = np.zeros((self.kernel_size, self.kernel_size))
i = math.floor(self.kernel_size / 2)
n[i, i] = 1
kernel = scipy.ndimage.gaussian_filter(n, sigma)
for name, param in self.named_parameters():
param.data.copy_(torch.from_numpy(kernel))
def sigmoid_rampup(current, rampup_length):
""" Exponential rampup from https://arxiv.org/abs/1610.02242 .
"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def sslgct_prepare_task_gt_for_fdgt(task_gt):
task_gt = task_gt.unsqueeze(1)
gt_np = task_gt.data.cpu().numpy()
shape = list(gt_np.shape)
assert len(shape) == 4
shape[1] = config.num_classes
one_hot = torch.zeros(shape).cuda()
for i in range(config.num_classes):
one_hot[:, i:i+1, ...].add_((task_gt == i).float())
# ignore segment boundary
one_hot[:, i:i+1, ...].mul_((task_gt != 255).float())
# return torch.FloatTensor(one_hot)
return one_hot | [
"numpy.clip",
"torch.nn.functional.mse_loss",
"torch.nn.LeakyReLU",
"math.floor",
"torch.mean",
"torch.nn.ReflectionPad2d",
"torch.from_numpy",
"torch.nn.InstanceNorm2d",
"torch.nn.Conv2d",
"numpy.exp",
"numpy.zeros",
"torch.nn.MaxPool2d",
"torch.sum",
"torch.nn.functional.interpolate",
... | [((478, 546), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'self.ndf'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(in_channels, self.ndf, kernel_size=4, stride=2, padding=1)\n', (487, 546), True, 'import torch.nn as nn\n'), ((628, 697), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.ndf', '(self.ndf * 2)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(self.ndf, self.ndf * 2, kernel_size=4, stride=2, padding=1)\n', (637, 697), True, 'import torch.nn as nn\n'), ((785, 858), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.ndf * 2)', '(self.ndf * 2)'], {'kernel_size': '(4)', 'stride': '(1)', 'padding': '(1)'}), '(self.ndf * 2, self.ndf * 2, kernel_size=4, stride=1, padding=1)\n', (794, 858), True, 'import torch.nn as nn\n'), ((946, 1019), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.ndf * 2)', '(self.ndf * 4)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(self.ndf * 2, self.ndf * 4, kernel_size=4, stride=2, padding=1)\n', (955, 1019), True, 'import torch.nn as nn\n'), ((1107, 1180), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.ndf * 4)', '(self.ndf * 4)'], {'kernel_size': '(4)', 'stride': '(1)', 'padding': '(1)'}), '(self.ndf * 4, self.ndf * 4, kernel_size=4, stride=1, padding=1)\n', (1116, 1180), True, 'import torch.nn as nn\n'), ((1268, 1341), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.ndf * 4)', '(self.ndf * 8)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(self.ndf * 4, self.ndf * 8, kernel_size=4, stride=2, padding=1)\n', (1277, 1341), True, 'import torch.nn as nn\n'), ((1429, 1502), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.ndf * 8)', '(self.ndf * 8)'], {'kernel_size': '(4)', 'stride': '(1)', 'padding': '(1)'}), '(self.ndf * 8, self.ndf * 8, kernel_size=4, stride=1, padding=1)\n', (1438, 1502), True, 'import torch.nn as nn\n'), ((1595, 1657), 'torch.nn.Conv2d', 'nn.Conv2d', (['(self.ndf * 8)', '(1)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)'}), '(self.ndf * 8, 1, kernel_size=4, stride=2, padding=1)\n', (1604, 1657), True, 'import torch.nn as nn\n'), ((1685, 1731), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)', 'inplace': '(True)'}), '(negative_slope=0.2, inplace=True)\n', (1697, 1731), True, 'import torch.nn as nn\n'), ((1874, 1913), 'torch.cat', 'torch.cat', (['(task_inp, task_pred)'], {'dim': '(1)'}), '((task_inp, task_pred), dim=1)\n', (1883, 1913), False, 'import torch\n'), ((2347, 2452), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': '(task_pred.shape[2], task_pred.shape[3])', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, size=(task_pred.shape[2], task_pred.shape[3]), mode=\n 'bilinear', align_corners=True)\n", (2360, 2452), True, 'import torch.nn.functional as F\n'), ((3020, 3092), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': '(num_features - self.num_BN)', 'affine': '(False)'}), '(num_features=num_features - self.num_BN, affine=False)\n', (3037, 3092), True, 'import torch.nn as nn\n'), ((3651, 3689), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pred', 'gt'], {'reduction': '"""none"""'}), "(pred, gt, reduction='none')\n", (3661, 3689), True, 'import torch.nn.functional as F\n'), ((8926, 8972), 'numpy.zeros', 'np.zeros', (['(self.kernel_size, self.kernel_size)'], {}), '((self.kernel_size, self.kernel_size))\n', (8934, 8972), True, 'import numpy as np\n'), ((8985, 9017), 'math.floor', 'math.floor', (['(self.kernel_size / 2)'], {}), '(self.kernel_size / 2)\n', (8995, 9017), False, 'import math\n'), ((9397, 9433), 'numpy.clip', 'np.clip', (['current', '(0.0)', 'rampup_length'], {}), '(current, 0.0, rampup_length)\n', (9404, 9433), True, 'import numpy as np\n'), ((3380, 3402), 'torch.cat', 'torch.cat', (['(xb, xi)', '(1)'], {}), '((xb, xi), 1)\n', (3389, 3402), False, 'import torch\n'), ((3731, 3762), 'torch.mean', 'torch.mean', (['loss'], {'dim': '(1, 2, 3)'}), '(loss, dim=(1, 2, 3))\n', (3741, 3762), False, 'import torch\n'), ((6938, 6959), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['(1)'], {}), '(1)\n', (6956, 6959), True, 'import torch.nn as nn\n'), ((6973, 7021), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(0)'}), '(kernel_size=3, stride=1, padding=0)\n', (6985, 7021), True, 'import torch.nn as nn\n'), ((8363, 8464), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels', 'self.kernel_size'], {'stride': '(1)', 'padding': '(0)', 'bias': 'None', 'groups': 'channels'}), '(channels, channels, self.kernel_size, stride=1, padding=0, bias=\n None, groups=channels)\n', (8372, 8464), True, 'import torch.nn as nn\n'), ((9501, 9529), 'numpy.exp', 'np.exp', (['(-5.0 * phase * phase)'], {}), '(-5.0 * phase * phase)\n', (9507, 9529), True, 'import numpy as np\n'), ((9758, 9776), 'torch.zeros', 'torch.zeros', (['shape'], {}), '(shape)\n', (9769, 9776), False, 'import torch\n'), ((7127, 7163), 'torch.sum', 'torch.sum', (['diff'], {'dim': '(1)', 'keepdim': '(True)'}), '(diff, dim=1, keepdim=True)\n', (7136, 7163), False, 'import torch\n'), ((8316, 8348), 'math.floor', 'math.floor', (['(self.kernel_size / 2)'], {}), '(self.kernel_size / 2)\n', (8326, 8348), False, 'import math\n'), ((9177, 9201), 'torch.from_numpy', 'torch.from_numpy', (['kernel'], {}), '(kernel)\n', (9193, 9201), False, 'import torch\n')] |
"""
Decoding module for a neural speaker (with attention capabilities).
The MIT License (MIT)
Originally created at 06/15/19, for Python 3.x
Copyright (c) 2021 <NAME> (ai.stanford.edu/~optas) & Stanford Geometric Computing Lab
"""
import torch
import random
import time
import warnings
import tqdm
import math
import numpy as np
import torch.nn.functional as F
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils import clip_grad_norm_
from .attention import AdditiveVisioLinguistic
from ..utils.stats import AverageMeter
class AttentiveDecoder(nn.Module):
"""
Note: code adapted from: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Image-Captioning
implementing a solid version of Show, Attend, and Tell. Many thanks Sagar and the team.
Special (optional) features:
- use stochastic teacher forcer
- add auxiliary input data at each decoding step (besides each 'previous' token).
- tie the weights of the encoder/decoder weight matrices
"""
def __init__(self, word_embedding, rnn_hidden_dim, encoder_dim, attention_dim,
vocab, dropout_rate=0, tie_weights=False, teacher_forcing_ratio=1,
auxiliary_net=None, auxiliary_dim=0):
"""
:param word_embedding: nn.Embedding
:param rnn_hidden_dim: hidden (and thus output) dimension of the decoding rnn
:param encoder_dim: feature dimension of encoded stimulus
:param attention_dim: feature dimension over which attention is computed
:param vocab: artemis.utils.vocabulary instance
:param dropout: dropout rate
:param tie_weights: (opt, boolean) if True, the hidden-to-word weights are equal (tied) to the word-embeddings,
see https://arxiv.org/abs/1611.01462 for explanation of why this might be a good idea.
:param teacher_forcing_ratio:
:param auxiliary_net: (optional) nn.Module that will be feeding the decoder at each time step
with some "auxiliary" information (say an emotion label). Obviously, this information is separate than the
output of the typically used image-encoder.
:param auxiliary_dim: (int, optional) the output feature-dimension of the auxiliary net.
"""
super(AttentiveDecoder, self).__init__()
self.vocab = vocab
self.vocab_size = len(vocab)
self.word_embedding = word_embedding
self.auxiliary_net = auxiliary_net
self.uses_aux_data = False
if auxiliary_dim > 0:
self.uses_aux_data = True
self.decode_step = nn.LSTMCell(word_embedding.embedding_dim + encoder_dim + auxiliary_dim, rnn_hidden_dim)
self.attention = AdditiveVisioLinguistic(encoder_dim, rnn_hidden_dim, attention_dim)
if dropout_rate > 0:
self.dropout = nn.Dropout(p=dropout_rate, inplace=True)
else:
self.dropout = nn.Identity()
self.init_h = nn.Linear(encoder_dim, rnn_hidden_dim) # linear layer to find initial hidden state of LSTMCell
self.init_c = nn.Linear(encoder_dim, rnn_hidden_dim) # linear layer to find initial cell state of LSTMCell
self.f_beta = nn.Linear(rnn_hidden_dim, encoder_dim) # linear layer to create a sigmoid-activated gate
self.sigmoid = nn.Sigmoid()
self.next_word = nn.Linear(rnn_hidden_dim, self.vocab_size) # linear layer to find scores over vocabulary
self.init_weights()
self.teacher_forcing_ratio = teacher_forcing_ratio
if tie_weights:
if self.word_embedding.embedding_dim != rnn_hidden_dim:
raise ValueError('When using the tied weights')
print('tying weights of encoder/decoder')
self.next_word.weight = self.word_embedding.weight
def init_hidden_state(self, encoder_out):
"""
Creates the initial hidden and cell states for the decoder's LSTM based on the encoded images.
:param encoder_out: encoded images, a tensor of dimension (batch_size, num_pixels, encoder_dim)
:return: hidden state, cell state
"""
mean_encoder_out = encoder_out.mean(dim=1)
h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)
c = self.init_c(mean_encoder_out)
return h, c
def init_weights(self, init_range=0.1):
""" Better initialization """
self.word_embedding.weight.data.uniform_(-init_range, init_range) # remove if pre-trained model comes up
self.next_word.bias.data.zero_()
self.next_word.weight.data.uniform_(-init_range, init_range)
def __call__(self, encoder_out, captions, auxiliary_data=None):
""" Forward propagation.
:param encoder_out: encoded images, a tensor of dimension (batch_size, enc_image_size, enc_image_size, encoder_dim)
:param captions: encoded captions, a tensor of dimension (batch_size, max_caption_length)
:param auxiliary_data: extra information associated with the images (batch_size, some_dim)
:return: scores for vocabulary, sorted encoded captions, decode lengths, weights, sort indices
"""
return self.sort_captions_and_forward(encoder_out, captions, auxiliary_data=auxiliary_data)
def sort_captions_and_forward(self, encoder_out, captions, auxiliary_data=None):
""" Feed forward that ...
:param encoder_out:
:param captions:
:return:
"""
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
# Flatten image
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)
num_pixels = encoder_out.size(1)
decode_lengths = torch.where(captions == self.vocab.eos)[1] # "<sos> I am <eos>" => decode_length = 3
# we do not feed <eos> as input to generate
# something after it
# Sort input data by decreasing lengths to reduce compute below
decode_lengths, sort_ind = decode_lengths.sort(dim=0, descending=True)
encoder_out = encoder_out[sort_ind]
captions = captions[sort_ind]
if auxiliary_data is not None:
auxiliary_data = auxiliary_data[sort_ind]
auxiliary_data = self.auxiliary_net(auxiliary_data)
# prepare for unravelling
embeddings = self.word_embedding(captions) # (batch_size, max_caption_length, embed_dim)
h, c = self.init_hidden_state(encoder_out) # (batch_size, decoder_dim)
decode_lengths = decode_lengths.tolist()
device = embeddings.device
# Create tensors to hold word prediction logits and attention maps (alphas)
predictions = torch.zeros(batch_size, max(decode_lengths), self.vocab_size).to(device)
alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)
# At each time-step, decode by
# attention-weighing the encoder's output based on the decoder's previous hidden state output
# then generate a new word in the decoder with the previous word and the attention weighted encoding
for t in range(max(decode_lengths)):
batch_size_t = sum([l > t for l in decode_lengths])
h = h[:batch_size_t] # effective h
attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t], h)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)
attention_weighted_encoding = gate * attention_weighted_encoding
use_teacher_forcing = True if random.random() < self.teacher_forcing_ratio else False
if use_teacher_forcing or t == 0:
decoder_lang_input = embeddings[:batch_size_t, t]
else:
_, top_pred = preds[:batch_size_t].topk(1)
top_pred = top_pred.squeeze(-1).detach() # detach from history as input
decoder_lang_input = self.word_embedding(top_pred)
if auxiliary_data is not None:
auxiliary_data_t = auxiliary_data[:batch_size_t]
decoder_in = torch.cat([decoder_lang_input, attention_weighted_encoding, auxiliary_data_t], dim=1)
else:
decoder_in = torch.cat([decoder_lang_input, attention_weighted_encoding], dim=1)
h, c = self.decode_step(decoder_in, (h, c[:batch_size_t])) # (batch_size_t, decoder_dim)
preds = self.next_word(self.dropout(h)) # (batch_size_t, vocab_size)
predictions[:batch_size_t, t] = preds
alphas[:batch_size_t, t] = alpha
return predictions, captions, decode_lengths, alphas, sort_ind
def attend_and_predict_next_word(self, encoder_out, h, c, tokens, aux_data=None):
"""Given current hidden/memory state of the decoder and the input tokens, guess the next tokens
and update the hidden/memory states.
:param encoder_out: the grounding
:param h: current hidden state
:param c: current memory state
:param tokens: current token input to the decoder
:return: logits over vocabulary distribution, updated h/c
"""
attention_weighted_encoding, alpha = self.attention(encoder_out, h)
gate = self.sigmoid(self.f_beta(h)) # gating scalar, (batch_size_t, encoder_dim)
attention_weighted_encoding = gate * attention_weighted_encoding
embeddings = self.word_embedding(tokens) # (batch_size, embed_dim)
decoder_input = torch.cat([embeddings, attention_weighted_encoding], dim=1)
if aux_data is not None:
aux_feat = self.auxiliary_net(aux_data)
decoder_input = torch.cat([decoder_input, aux_feat], dim=1)
h, c = self.decode_step(decoder_input, (h, c)) # (batch_size_t, decoder_dim)
logits = self.next_word(h) # (batch_size_t, vocab_size)
return h, c, logits, alpha
def single_epoch_train(train_loader, model, criterion, optimizer, epoch, device, tb_writer=None, **kwargs):
""" Perform training for one epoch.
:param train_loader: DataLoader for training data
:param model: nn.ModuleDict with 'encoder', 'decoder' keys
:param criterion: loss layer
:param optimizer: optimizer
:param epoch: epoch number
:param device:
"""
alpha_c = kwargs.get('alpha_c', 1.0) # Weight of doubly stochastic (attention) regularization.
grad_clip = kwargs.get('grad_clip', 5.0) # Gradient clipping (norm magnitude)
print_freq = kwargs.get('print_freq', 100)
use_emotion = kwargs.get('use_emotion', False)
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
entropy_loss_meter = AverageMeter() # entropy loss (per word decoded)
total_loss_meter = AverageMeter()
start = time.time()
steps_taken = (epoch-1) * len(train_loader.dataset)
model.train()
for i, batch in enumerate(train_loader):
imgs = batch['image'].to(device)
caps = batch['tokens'].to(device)
b_size = len(imgs)
data_time.update(time.time() - start)
if use_emotion:
emotion = batch['emotion'].to(device)
res = model.decoder(model.encoder(imgs), caps, emotion)
else:
res = model.decoder(model.encoder(imgs), caps)
logits, caps_sorted, decode_lengths, alphas, sort_ind = res
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
targets = caps_sorted[:, 1:]
# Remove time-steps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
logits = pack_padded_sequence(logits, decode_lengths, batch_first=True)
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
ent_loss = criterion(logits.data, targets.data)
total_loss = ent_loss
# Add doubly stochastic attention regularization
# Note. some implementation simply do this like: d_atn_loss = alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# here we take care of the fact that some samples in the same batch have more/less tokens than others.
if alpha_c > 0:
total_energy = torch.from_numpy(np.array(decode_lengths)) / alphas.shape[-1] # n_tokens / num_pixels
total_energy.unsqueeze_(-1) # B x 1
total_energy = total_energy.to(device)
d_atn_loss = alpha_c * ((total_energy - alphas.sum(dim=1)) ** 2).mean()
total_loss += d_atn_loss
# Back prop.
optimizer.zero_grad()
total_loss.backward()
if grad_clip is not None:
clip_grad_norm_(model.parameters(), grad_clip)
# Update weights
optimizer.step()
# Keep track of metrics
entropy_loss_meter.update(ent_loss.item(), sum(decode_lengths))
total_loss_meter.update(total_loss.item(), sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
steps_taken += b_size
# Print status
if print_freq is not None and i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=total_loss_meter))
if tb_writer is not None:
tb_writer.add_scalar('training-entropy-loss-with-batch-granularity', entropy_loss_meter.avg, steps_taken)
return total_loss_meter.avg
@torch.no_grad()
def negative_log_likelihood(model, data_loader, device):
"""
:param model:
:param data_loader:
:param device:
:param phase:
:return:
"""
model.eval()
nll = AverageMeter()
aux_data = None
for batch in data_loader:
imgs = batch['image'].to(device)
caps = batch['tokens'].to(device)
# TODO Refactor
if model.decoder.uses_aux_data:
aux_data = batch['emotion'].to(device)
logits, caps_sorted, decode_lengths, alphas, sort_ind = model.decoder(model.encoder(imgs), caps, aux_data)
# Since we decoded starting with <sos>, the targets are all words after <sos>, up to <eos>
targets = caps_sorted[:, 1:]
# Remove time-steps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
logits = pack_padded_sequence(logits, decode_lengths, batch_first=True)
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = F.cross_entropy(logits.data, targets.data)
nll.update(loss.item(), sum(decode_lengths))
return nll.avg
@torch.no_grad()
def log_prob_of_caption(model, img, tokens, temperature=1):
"""Given a captioning model, return the log-probability of a caption given an image.
This version expects a batch of images, each assotiated with a single caption.
:param model: encoder/decoder speaker
:param img: Tensor B x channels x spatial-dims
:param tokens: Tensor B x max-n-tokens
:return log_probs: Tensor of size B x max-n-tokens holding the log-probs of each token of each caption
"""
encoder = model.encoder
decoder = model.decoder
assert all(tokens[:, 0] == decoder.vocab.sos)
max_steps = tokens.shape[1]
encoder_out = encoder(img)
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
encoder_out = encoder_out.view(batch_size, -1, encoder_dim)
# Create tensors to hold log-probs
log_probs = torch.zeros(batch_size, max_steps).to(tokens.device)
h, c = decoder.init_hidden_state(encoder_out)
for t in range(max_steps - 1):
h, c, pred_t, _ = decoder.attend_and_predict_next_word(encoder_out, h, c, tokens[:, t])
if temperature != 1:
pred_t /= temperature
pred_t = F.log_softmax(pred_t, dim=1)
log_probs[:, t] = pred_t[torch.arange(batch_size), tokens[:, t+1]] # prob. of guessing next token
lens = torch.where(tokens == decoder.vocab.eos)[1] # true tokens + 1 for <eos>
mask = torch.zeros_like(log_probs)
mask[torch.arange(mask.shape[0]), lens] = 1
mask = mask.cumsum(dim=1).to(torch.bool)
log_probs.masked_fill_(mask, 0) # set to zero all positions after the true size of the caption
return log_probs, lens
@torch.no_grad()
def sample_captions(model, loader, max_utterance_len, sampling_rule, device, temperature=1,
topk=None, drop_unk=True, drop_bigrams=False):
"""
:param model:
:param loader:
:param max_utterance_len: maximum allowed length of captions
:param sampling_rule: (str) 'argmax' or 'multinomial', or 'topk'
:return:
attention_weights: (torch cpu Tensor) N-images x encoded_image_size (e.g., 7 x 7) x max_utterance_len
attention_weights[:,0] corresponds to the attention map over the <SOS> symbol
"""
if sampling_rule not in ['argmax', 'multinomial', 'topk']:
raise ValueError('Unknown sampling rule.')
model.eval()
all_predictions = []
attention_weights = []
unk = model.decoder.vocab.unk
use_aux_data = model.decoder.uses_aux_data
aux_data = None
for batch in loader:
imgs = batch['image'].to(device)
if use_aux_data:
aux_data = batch['emotion'].to(device)
encoder_out = model.encoder(imgs)
enc_image_size = encoder_out.size(1)
batch_size = encoder_out.size(0)
encoder_dim = encoder_out.size(-1)
# Flatten image
encoder_out = encoder_out.view(batch_size, -1, encoder_dim) # (batch_size, num_pixels, encoder_dim)
# Create tensors to hold word predictions
max_steps = max_utterance_len + 1 # one extra step for EOS marker
predictions = torch.zeros(batch_size, max_steps).to(device)
# Initialize decoder state
decoder = model.decoder
h, c = decoder.init_hidden_state(encoder_out) # (batch_size, decoder_dim)
# Tensor to store previous words at each step; now they're just <sos>
prev_words = torch.LongTensor([decoder.vocab.sos] * batch_size).to(device)
for t in range(max_steps):
h, c, pred_t, alpha = decoder.attend_and_predict_next_word(encoder_out, h, c, prev_words, aux_data=aux_data)
if t > 0: # at t=1 it sees <sos> as the previous word
alpha = alpha.view(-1, enc_image_size, enc_image_size) # (bsize, enc_image_size, enc_image_size)
attention_weights.append(alpha.cpu())
pred_t /= temperature
if drop_unk:
pred_t[:, unk] = -math.inf
if t > 0:
pred_t[:, prev_words] = -math.inf # avoid repeating the same word twice
if t > 1:
pred_t[:, predictions[:,t-2].long()] = -math.inf # avoid repeating the prev-prev word
if drop_bigrams and t > 1:
prev_usage = predictions[:, :t-1] # of the previous word (e.g, xx yy xx) (first xx)
x, y = torch.where(prev_usage == torch.unsqueeze(prev_words, -1))
y += 1 # word-after-last-in-prev-usage (e.g., yy in above)
y = prev_usage[x, y].long()
pred_t[x, y] = -math.inf
if sampling_rule == 'argmax':
prev_words = torch.argmax(pred_t, 1)
elif sampling_rule == 'multinomial':
probability = torch.softmax(pred_t, 1)
prev_words = torch.multinomial(probability, 1).squeeze_(-1)
elif sampling_rule == 'topk':
row_idx = torch.arange(batch_size)
row_idx = row_idx.view([1, -1]).repeat(topk, 1).t()
# do soft-max after you zero-out non topk (you could also do this before, ask me/Panos if need be:) )
val, ind = pred_t.topk(topk, dim=1)
val = torch.softmax(val, 1)
probability = torch.zeros_like(pred_t) # only the top-k logits will have non-zero prob.
probability[row_idx, ind] = val
prev_words = torch.multinomial(probability, 1).squeeze_(-1)
predictions[:, t] = prev_words
all_predictions.append(predictions.cpu().long())
all_predictions = torch.cat(all_predictions)
attention_weights = torch.stack(attention_weights, 1)
return all_predictions, attention_weights
@torch.no_grad()
def sample_captions_beam_search(model, data_loader, beam_size, device, temperature=1, max_iter=500,
drop_unk=True, drop_bigrams=False):
"""
:param model (encoder, decoder)
:param data_loader:
:param beam_size:
:param drop_unk:
:return:
hypotheses_alphas: list carrying the attention maps over the encoded-pixel space for each produced token.
Note: batch size must be one.
"""
if data_loader.batch_size != 1:
raise ValueError('not implemented for bigger batch-sizes')
model.eval()
decoder = model.decoder
vocab = model.decoder.vocab
captions = list()
hypotheses_alphas = list()
caption_log_prob = list()
aux_feat = None
for batch in tqdm.tqdm(data_loader): # For each image (batch-size = 1)
image = batch['image'].to(device) # (1, 3, H, W)
if model.decoder.uses_aux_data:
aux_data = batch['emotion'].to(device)
aux_feat = model.decoder.auxiliary_net(aux_data)
k = beam_size
encoder_out = model.encoder(image) # (1, enc_image_size, enc_image_size, encoder_dim)
enc_image_size = encoder_out.size(1)
encoder_dim = encoder_out.size(3)
# Flatten encoding
encoder_out = encoder_out.view(1, -1, encoder_dim) # (1, num_pixels, encoder_dim)
num_pixels = encoder_out.size(1)
# We'll treat the problem as having a batch size of k
encoder_out = encoder_out.expand(k, num_pixels, encoder_dim) # (k, num_pixels, encoder_dim)
# Tensor to store top k previous words at each step; now they're just <sos>
k_prev_words = torch.LongTensor([[vocab.sos]] * k).to(device) # (k, 1)
# Tensor to store top k sequences; now they're just <sos>
seqs = k_prev_words # (k, 1)
# Tensor to store top k sequences' scores; now they're just 0
top_k_scores = torch.zeros(k, 1).to(device) # (k, 1)
# Tensor to store top k sequences' alphas; now they're just 1s
seqs_alpha = torch.ones(k, 1, enc_image_size, enc_image_size).to(device) # (k, 1, enc_image_size, enc_image_size)
# Lists to store completed sequences and scores
complete_seqs = list()
complete_seqs_alpha = list()
complete_seqs_scores = list()
# Start decoding
step = 1
h, c = decoder.init_hidden_state(encoder_out)
# s (below) is a number less than or equal to k, because sequences are removed
# from this process once they hit <eos>
while True:
embeddings = decoder.word_embedding(k_prev_words).squeeze(1) # (s, embed_dim)
awe, alpha = decoder.attention(encoder_out, h) # (s, encoder_dim), (s, num_pixels)
alpha = alpha.view(-1, enc_image_size, enc_image_size) # (s, enc_image_size, enc_image_size)
gate = decoder.sigmoid(decoder.f_beta(h)) # gating scalar, (s, encoder_dim)
awe = gate * awe
decoder_input = torch.cat([embeddings, awe], dim=1)
if aux_feat is not None:
af = torch.repeat_interleave(aux_feat, decoder_input.shape[0], dim=0)
decoder_input = torch.cat([decoder_input, af], dim=1)
h, c = decoder.decode_step(decoder_input, (h, c)) # (s, decoder_dim)
scores = decoder.next_word(h) # (s, vocab_size)
if temperature != 1:
scores /= temperature
scores = F.log_softmax(scores, dim=1)
if drop_unk:
scores[:, vocab.unk] = -math.inf
if drop_bigrams and step > 2:
# drop bi-grams with frequency higher than 1.
prev_usage = seqs[:, :step-1]
x, y = torch.where(prev_usage == k_prev_words)
y += 1 # word-after-last-in-prev-usage
y = seqs[x, y]
scores[x,y] = -math.inf
if step > 2:
## drop x and x
and_token = decoder.vocab('and')
x, y = torch.where(k_prev_words == and_token)
pre_and_word = seqs[x, step-2]
scores[x, pre_and_word] = -math.inf
# Add log-probabilities
scores = top_k_scores.expand_as(scores) + scores # (s, vocab_size)
# For the first step, all k points will have the same scores (since same k previous words, h, c)
if step == 1:
top_k_scores, top_k_words = scores[0].topk(k, 0, True, True) # (s)
else:
# Unroll and find top scores, and their unrolled indices
top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s)
# Convert unrolled indices to actual indices of scores
prev_word_inds = top_k_words // len(vocab) # (s)
next_word_inds = top_k_words % len(vocab) # (s)
# Add new words to sequences
seqs = torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1) # (s, step+1)
seqs_alpha = torch.cat([seqs_alpha[prev_word_inds], alpha[prev_word_inds].unsqueeze(1)],
dim=1) # (s, step+1, enc_image_size, enc_image_size)
# Which sequences are incomplete (didn't reach <eos>)?
incomplete_inds = [ind for ind, word in enumerate(next_word_inds) if word != vocab.eos]
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
# Set aside complete sequences
if len(complete_inds) > 0:
complete_seqs.extend(seqs[complete_inds].tolist())
complete_seqs_alpha.extend(seqs_alpha[complete_inds].tolist())
complete_seqs_scores.extend(top_k_scores[complete_inds].tolist())
k -= len(complete_inds) # reduce beam length accordingly
# Proceed with incomplete sequences
if k == 0:
break
seqs = seqs[incomplete_inds]
seqs_alpha = seqs_alpha[incomplete_inds]
h = h[prev_word_inds[incomplete_inds]]
c = c[prev_word_inds[incomplete_inds]]
encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)
# Break if things have been going on too long
if step > max_iter:
break
step += 1
s_idx = np.argsort(complete_seqs_scores)[::-1]
complete_seqs_scores = [complete_seqs_scores[i] for i in s_idx]
complete_seqs = [complete_seqs[i] for i in s_idx]
alphas = [complete_seqs_alpha[i] for i in s_idx]
captions.append(complete_seqs)
caption_log_prob.append(complete_seqs_scores)
hypotheses_alphas.append(alphas)
return captions, hypotheses_alphas, caption_log_prob
@torch.no_grad()
def properize_captions(captions, vocab, add_sos=True):
"""
:param captions: torch Tensor holding M x max_len integers
:param vocab:
:param add_sos:
:return:
"""
# ensure they end with eos.
new_captions = []
missed_eos = 0
for caption in captions.cpu():
ending = torch.where(caption == vocab.eos)[0]
if len(ending) >= 1: # at least one <eos> symbol is found
first_eos = ending[0]
if first_eos < len(caption):
caption[first_eos+1:] = vocab.pad
else:
missed_eos += 1
caption[-1] = vocab.eos
new_captions.append(caption)
new_captions = torch.stack(new_captions)
dummy = torch.unique(torch.where(new_captions == vocab.eos)[0])
assert len(dummy) == len(new_captions) # assert all have an eos.
if add_sos:
sos = torch.LongTensor([vocab.sos] * len(new_captions)).view(-1, 1)
new_captions = torch.cat([sos, new_captions], dim=1)
if missed_eos > 0:
warnings.warn('{} sentences without <eos> were generated.'.format(missed_eos))
return new_captions
def log_prob_of_dataset(model, data_loader, device, temperature=1):
all_log_probs = []
all_lens = []
model.eval()
for batch in data_loader:
imgs = batch['image'].to(device)
tokens = batch['tokens'].to(device)
log_probs, n_tokens = log_prob_of_caption(model, imgs, tokens, temperature=temperature)
all_log_probs.append(log_probs.cpu())
all_lens.append(n_tokens.cpu())
all_log_probs = torch.cat(all_log_probs, dim=0)
all_lens = torch.cat(all_lens, dim=0)
return all_log_probs, all_lens
def perplexity_of_dataset(model, data_loader, device):
""" for a test corpus perplexity is 2 ^ {-l} where l is log_2(prob_of_sentences) * M, where M is the number
of tokens in the dataset.
:param model:
:param data_loader:
:param device:
:return:
"""
all_log_probs, all_lens = log_prob_of_dataset(model, data_loader, device)
log_prob_per_sent = torch.sum(all_log_probs, 1).double() # sum over tokens to get the log_p of each utterance
prob_per_sent = torch.exp(log_prob_per_sent)
n_tokens = torch.sum(all_lens).double() # number of words in dataset
average_log_prob = torch.sum(torch.log2(prob_per_sent)) / n_tokens # log_2 for perplexity
perplexity = 2.0 ** (-average_log_prob)
return perplexity, prob_per_sent, all_lens
| [
"torch.nn.Dropout",
"torch.LongTensor",
"torch.exp",
"numpy.argsort",
"torch.log2",
"numpy.array",
"torch.softmax",
"torch.sum",
"torch.repeat_interleave",
"torch.arange",
"torch.nn.Sigmoid",
"torch.unsqueeze",
"torch.nn.Identity",
"torch.zeros_like",
"torch.argmax",
"torch.nn.utils.rn... | [((14065, 14080), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14078, 14080), False, 'import torch\n'), ((15238, 15253), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15251, 15253), False, 'import torch\n'), ((16902, 16917), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16915, 16917), False, 'import torch\n'), ((20970, 20985), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20983, 20985), False, 'import torch\n'), ((27927, 27942), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (27940, 27942), False, 'import torch\n'), ((10946, 10957), 'time.time', 'time.time', ([], {}), '()\n', (10955, 10957), False, 'import time\n'), ((16652, 16679), 'torch.zeros_like', 'torch.zeros_like', (['log_probs'], {}), '(log_probs)\n', (16668, 16679), False, 'import torch\n'), ((20836, 20862), 'torch.cat', 'torch.cat', (['all_predictions'], {}), '(all_predictions)\n', (20845, 20862), False, 'import torch\n'), ((20887, 20920), 'torch.stack', 'torch.stack', (['attention_weights', '(1)'], {}), '(attention_weights, 1)\n', (20898, 20920), False, 'import torch\n'), ((21739, 21761), 'tqdm.tqdm', 'tqdm.tqdm', (['data_loader'], {}), '(data_loader)\n', (21748, 21761), False, 'import tqdm\n'), ((28617, 28642), 'torch.stack', 'torch.stack', (['new_captions'], {}), '(new_captions)\n', (28628, 28642), False, 'import torch\n'), ((29515, 29546), 'torch.cat', 'torch.cat', (['all_log_probs'], {'dim': '(0)'}), '(all_log_probs, dim=0)\n', (29524, 29546), False, 'import torch\n'), ((29562, 29588), 'torch.cat', 'torch.cat', (['all_lens'], {'dim': '(0)'}), '(all_lens, dim=0)\n', (29571, 29588), False, 'import torch\n'), ((30117, 30145), 'torch.exp', 'torch.exp', (['log_prob_per_sent'], {}), '(log_prob_per_sent)\n', (30126, 30145), False, 'import torch\n'), ((2616, 2707), 'torch.nn.LSTMCell', 'nn.LSTMCell', (['(word_embedding.embedding_dim + encoder_dim + auxiliary_dim)', 'rnn_hidden_dim'], {}), '(word_embedding.embedding_dim + encoder_dim + auxiliary_dim,\n rnn_hidden_dim)\n', (2627, 2707), False, 'from torch import nn\n'), ((2973, 3011), 'torch.nn.Linear', 'nn.Linear', (['encoder_dim', 'rnn_hidden_dim'], {}), '(encoder_dim, rnn_hidden_dim)\n', (2982, 3011), False, 'from torch import nn\n'), ((3091, 3129), 'torch.nn.Linear', 'nn.Linear', (['encoder_dim', 'rnn_hidden_dim'], {}), '(encoder_dim, rnn_hidden_dim)\n', (3100, 3129), False, 'from torch import nn\n'), ((3207, 3245), 'torch.nn.Linear', 'nn.Linear', (['rnn_hidden_dim', 'encoder_dim'], {}), '(rnn_hidden_dim, encoder_dim)\n', (3216, 3245), False, 'from torch import nn\n'), ((3320, 3332), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (3330, 3332), False, 'from torch import nn\n'), ((3358, 3400), 'torch.nn.Linear', 'nn.Linear', (['rnn_hidden_dim', 'self.vocab_size'], {}), '(rnn_hidden_dim, self.vocab_size)\n', (3367, 3400), False, 'from torch import nn\n'), ((9626, 9685), 'torch.cat', 'torch.cat', (['[embeddings, attention_weighted_encoding]'], {'dim': '(1)'}), '([embeddings, attention_weighted_encoding], dim=1)\n', (9635, 9685), False, 'import torch\n'), ((11798, 11860), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['logits', 'decode_lengths'], {'batch_first': '(True)'}), '(logits, decode_lengths, batch_first=True)\n', (11818, 11860), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((11879, 11942), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['targets', 'decode_lengths'], {'batch_first': '(True)'}), '(targets, decode_lengths, batch_first=True)\n', (11899, 11942), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((13169, 13180), 'time.time', 'time.time', ([], {}), '()\n', (13178, 13180), False, 'import time\n'), ((14934, 14996), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['logits', 'decode_lengths'], {'batch_first': '(True)'}), '(logits, decode_lengths, batch_first=True)\n', (14954, 14996), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((15015, 15078), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['targets', 'decode_lengths'], {'batch_first': '(True)'}), '(targets, decode_lengths, batch_first=True)\n', (15035, 15078), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((15120, 15162), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits.data', 'targets.data'], {}), '(logits.data, targets.data)\n', (15135, 15162), True, 'import torch.nn.functional as F\n'), ((16422, 16450), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['pred_t'], {'dim': '(1)'}), '(pred_t, dim=1)\n', (16435, 16450), True, 'import torch.nn.functional as F\n'), ((16569, 16609), 'torch.where', 'torch.where', (['(tokens == decoder.vocab.eos)'], {}), '(tokens == decoder.vocab.eos)\n', (16580, 16609), False, 'import torch\n'), ((28897, 28934), 'torch.cat', 'torch.cat', (['[sos, new_captions]'], {'dim': '(1)'}), '([sos, new_captions], dim=1)\n', (28906, 28934), False, 'import torch\n'), ((2854, 2894), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_rate', 'inplace': '(True)'}), '(p=dropout_rate, inplace=True)\n', (2864, 2894), False, 'from torch import nn\n'), ((2936, 2949), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (2947, 2949), False, 'from torch import nn\n'), ((5745, 5784), 'torch.where', 'torch.where', (['(captions == self.vocab.eos)'], {}), '(captions == self.vocab.eos)\n', (5756, 5784), False, 'import torch\n'), ((9800, 9843), 'torch.cat', 'torch.cat', (['[decoder_input, aux_feat]'], {'dim': '(1)'}), '([decoder_input, aux_feat], dim=1)\n', (9809, 9843), False, 'import torch\n'), ((16105, 16139), 'torch.zeros', 'torch.zeros', (['batch_size', 'max_steps'], {}), '(batch_size, max_steps)\n', (16116, 16139), False, 'import torch\n'), ((16689, 16716), 'torch.arange', 'torch.arange', (['mask.shape[0]'], {}), '(mask.shape[0])\n', (16701, 16716), False, 'import torch\n'), ((23991, 24026), 'torch.cat', 'torch.cat', (['[embeddings, awe]'], {'dim': '(1)'}), '([embeddings, awe], dim=1)\n', (24000, 24026), False, 'import torch\n'), ((24459, 24487), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (24472, 24487), True, 'import torch.nn.functional as F\n'), ((27506, 27538), 'numpy.argsort', 'np.argsort', (['complete_seqs_scores'], {}), '(complete_seqs_scores)\n', (27516, 27538), True, 'import numpy as np\n'), ((28254, 28287), 'torch.where', 'torch.where', (['(caption == vocab.eos)'], {}), '(caption == vocab.eos)\n', (28265, 28287), False, 'import torch\n'), ((28669, 28707), 'torch.where', 'torch.where', (['(new_captions == vocab.eos)'], {}), '(new_captions == vocab.eos)\n', (28680, 28707), False, 'import torch\n'), ((30007, 30034), 'torch.sum', 'torch.sum', (['all_log_probs', '(1)'], {}), '(all_log_probs, 1)\n', (30016, 30034), False, 'import torch\n'), ((30161, 30180), 'torch.sum', 'torch.sum', (['all_lens'], {}), '(all_lens)\n', (30170, 30180), False, 'import torch\n'), ((30253, 30278), 'torch.log2', 'torch.log2', (['prob_per_sent'], {}), '(prob_per_sent)\n', (30263, 30278), False, 'import torch\n'), ((8241, 8330), 'torch.cat', 'torch.cat', (['[decoder_lang_input, attention_weighted_encoding, auxiliary_data_t]'], {'dim': '(1)'}), '([decoder_lang_input, attention_weighted_encoding,\n auxiliary_data_t], dim=1)\n', (8250, 8330), False, 'import torch\n'), ((8374, 8441), 'torch.cat', 'torch.cat', (['[decoder_lang_input, attention_weighted_encoding]'], {'dim': '(1)'}), '([decoder_lang_input, attention_weighted_encoding], dim=1)\n', (8383, 8441), False, 'import torch\n'), ((11213, 11224), 'time.time', 'time.time', ([], {}), '()\n', (11222, 11224), False, 'import time\n'), ((13132, 13143), 'time.time', 'time.time', ([], {}), '()\n', (13141, 13143), False, 'import time\n'), ((16484, 16508), 'torch.arange', 'torch.arange', (['batch_size'], {}), '(batch_size)\n', (16496, 16508), False, 'import torch\n'), ((18362, 18396), 'torch.zeros', 'torch.zeros', (['batch_size', 'max_steps'], {}), '(batch_size, max_steps)\n', (18373, 18396), False, 'import torch\n'), ((18658, 18708), 'torch.LongTensor', 'torch.LongTensor', (['([decoder.vocab.sos] * batch_size)'], {}), '([decoder.vocab.sos] * batch_size)\n', (18674, 18708), False, 'import torch\n'), ((19906, 19929), 'torch.argmax', 'torch.argmax', (['pred_t', '(1)'], {}), '(pred_t, 1)\n', (19918, 19929), False, 'import torch\n'), ((22646, 22681), 'torch.LongTensor', 'torch.LongTensor', (['([[vocab.sos]] * k)'], {}), '([[vocab.sos]] * k)\n', (22662, 22681), False, 'import torch\n'), ((22901, 22918), 'torch.zeros', 'torch.zeros', (['k', '(1)'], {}), '(k, 1)\n', (22912, 22918), False, 'import torch\n'), ((23033, 23081), 'torch.ones', 'torch.ones', (['k', '(1)', 'enc_image_size', 'enc_image_size'], {}), '(k, 1, enc_image_size, enc_image_size)\n', (23043, 23081), False, 'import torch\n'), ((24086, 24150), 'torch.repeat_interleave', 'torch.repeat_interleave', (['aux_feat', 'decoder_input.shape[0]'], {'dim': '(0)'}), '(aux_feat, decoder_input.shape[0], dim=0)\n', (24109, 24150), False, 'import torch\n'), ((24183, 24220), 'torch.cat', 'torch.cat', (['[decoder_input, af]'], {'dim': '(1)'}), '([decoder_input, af], dim=1)\n', (24192, 24220), False, 'import torch\n'), ((24737, 24776), 'torch.where', 'torch.where', (['(prev_usage == k_prev_words)'], {}), '(prev_usage == k_prev_words)\n', (24748, 24776), False, 'import torch\n'), ((25033, 25071), 'torch.where', 'torch.where', (['(k_prev_words == and_token)'], {}), '(k_prev_words == and_token)\n', (25044, 25071), False, 'import torch\n'), ((7701, 7716), 'random.random', 'random.random', ([], {}), '()\n', (7714, 7716), False, 'import random\n'), ((12411, 12435), 'numpy.array', 'np.array', (['decode_lengths'], {}), '(decode_lengths)\n', (12419, 12435), True, 'import numpy as np\n'), ((20009, 20033), 'torch.softmax', 'torch.softmax', (['pred_t', '(1)'], {}), '(pred_t, 1)\n', (20022, 20033), False, 'import torch\n'), ((19640, 19671), 'torch.unsqueeze', 'torch.unsqueeze', (['prev_words', '(-1)'], {}), '(prev_words, -1)\n', (19655, 19671), False, 'import torch\n'), ((20178, 20202), 'torch.arange', 'torch.arange', (['batch_size'], {}), '(batch_size)\n', (20190, 20202), False, 'import torch\n'), ((20463, 20484), 'torch.softmax', 'torch.softmax', (['val', '(1)'], {}), '(val, 1)\n', (20476, 20484), False, 'import torch\n'), ((20515, 20539), 'torch.zeros_like', 'torch.zeros_like', (['pred_t'], {}), '(pred_t)\n', (20531, 20539), False, 'import torch\n'), ((20063, 20096), 'torch.multinomial', 'torch.multinomial', (['probability', '(1)'], {}), '(probability, 1)\n', (20080, 20096), False, 'import torch\n'), ((20666, 20699), 'torch.multinomial', 'torch.multinomial', (['probability', '(1)'], {}), '(probability, 1)\n', (20683, 20699), False, 'import torch\n')] |
import numpy as np
from PIL import Image
from typing import Tuple
SQUARE_COLOR = (255, 0, 0, 255) # Let's make a red square
ICON_SIZE = (512, 512) # The recommended minimum size from WordPress
def generate_pixels(resolution: Tuple[int, int]) -> np.ndarray:
"""Generate pixels of an image with the provided resolution."""
pixels = []
# Eventually I'll extend this to generate an image one pixel at a time
# based on an input song.
for _row in range(resolution[1]):
cur_row = []
for _col in range(resolution[0]):
cur_row.append(SQUARE_COLOR)
pixels.append(cur_row)
return np.array(pixels, dtype=np.uint8)
def main():
"""Entry point."""
# For now, just make a solid color square, one pixel at a time,
# for each resolution of our image.
img_pixels = generate_pixels(ICON_SIZE)
# Create the image from our multi-dimmensional array of pixels
img = Image.fromarray(img_pixels)
img.save('favicon.png', sizes=ICON_SIZE)
if __name__ == "__main__":
main() | [
"numpy.array",
"PIL.Image.fromarray"
] | [((657, 689), 'numpy.array', 'np.array', (['pixels'], {'dtype': 'np.uint8'}), '(pixels, dtype=np.uint8)\n', (665, 689), True, 'import numpy as np\n'), ((969, 996), 'PIL.Image.fromarray', 'Image.fromarray', (['img_pixels'], {}), '(img_pixels)\n', (984, 996), False, 'from PIL import Image\n')] |
import os
from reinforcement_learning.crypto_market.comitee_trader_agent import ComiteeTraderAgent
from reinforcement_learning.crypto_market.crypto_trader_agent import CryptoTraderAgent
import sys
sys.path.insert(0, '../../../etf_data')
from etf_data_loader import load_all_data_from_file2
import numpy as np
import matplotlib.pyplot as plt
# start_date = '2010-01-01'
# start_date = '2017-10-01'
start_date = '2017-01-01'
end_date = '2018-06-15'
prefix = 'btc_'
ticket = 'BTC-EUR'
df_adj_close = load_all_data_from_file2(prefix + 'etf_data_adj_close.csv', start_date, end_date)
np.warnings.filterwarnings('ignore')
try:
df_ticket_data = df_adj_close[['date', ticket]]
except:
print('failed to find ticket: ' + ticket)
exit(1)
df_ticket_data = df_ticket_data[df_ticket_data[ticket] > 0.]
df_ticket_data = df_ticket_data.reindex(method='bfill')
print(df_ticket_data.head())
print(df_ticket_data.tail())
plt.plot(df_ticket_data[[ticket]])
plt.show()
data = df_ticket_data
legends = ['benchmark']
plt.plot(data[[ticket]].pct_change().cumsum().as_matrix())
agent = ComiteeTraderAgent(ticket)
agent.invest(data[[ticket]], window=30)
plt.plot(agent.ror_history)
legends.append('ror comitee')
counts = [agent.actions.count('S'),
agent.actions.count('B'),
agent.actions.count('H'), ]
print('\n[S, B, H, ]\n', counts)
print('-' * 80)
print(start_date, ' <-> ', end_date)
print('ror:', agent.ror_history[-1])
print('cash:', agent.cash)
print('shares:', agent.shares)
print('value:', agent.history[-1])
# models = os.listdir('models')
models = [
'btc_eur_adaboost_15.pkl']
for model in models:
agent = CryptoTraderAgent(ticket, model='models_eu/'+str(model))
agent.invest(data[[ticket]], window=30)
plt.plot(agent.ror_history)
legends.append('ror '+str(model))
chaos_counts = [agent.actions.count('S'),
agent.actions.count('B'),
agent.actions.count('H'), ]
print('\n[S, B, H, ]\n', chaos_counts)
print('-' * 80)
print(start_date, ' <-> ', end_date)
print('ror:', agent.ror_history[-1])
print('cash:', agent.cash)
print('shares:', agent.shares)
print('value:', agent.history[-1])
plt.legend(legends)
plt.show()
| [
"sys.path.insert",
"matplotlib.pyplot.plot",
"numpy.warnings.filterwarnings",
"reinforcement_learning.crypto_market.comitee_trader_agent.ComiteeTraderAgent",
"etf_data_loader.load_all_data_from_file2",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((200, 239), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../etf_data"""'], {}), "(0, '../../../etf_data')\n", (215, 239), False, 'import sys\n'), ((503, 588), 'etf_data_loader.load_all_data_from_file2', 'load_all_data_from_file2', (["(prefix + 'etf_data_adj_close.csv')", 'start_date', 'end_date'], {}), "(prefix + 'etf_data_adj_close.csv', start_date,\n end_date)\n", (527, 588), False, 'from etf_data_loader import load_all_data_from_file2\n'), ((586, 622), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (612, 622), True, 'import numpy as np\n'), ((924, 958), 'matplotlib.pyplot.plot', 'plt.plot', (['df_ticket_data[[ticket]]'], {}), '(df_ticket_data[[ticket]])\n', (932, 958), True, 'import matplotlib.pyplot as plt\n'), ((959, 969), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (967, 969), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1113), 'reinforcement_learning.crypto_market.comitee_trader_agent.ComiteeTraderAgent', 'ComiteeTraderAgent', (['ticket'], {}), '(ticket)\n', (1105, 1113), False, 'from reinforcement_learning.crypto_market.comitee_trader_agent import ComiteeTraderAgent\n'), ((1155, 1182), 'matplotlib.pyplot.plot', 'plt.plot', (['agent.ror_history'], {}), '(agent.ror_history)\n', (1163, 1182), True, 'import matplotlib.pyplot as plt\n'), ((2227, 2246), 'matplotlib.pyplot.legend', 'plt.legend', (['legends'], {}), '(legends)\n', (2237, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2257), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2255, 2257), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1791), 'matplotlib.pyplot.plot', 'plt.plot', (['agent.ror_history'], {}), '(agent.ror_history)\n', (1772, 1791), True, 'import matplotlib.pyplot as plt\n')] |
import threading
import numpy as np
import SimpleITK as sitk
class NiftiGenerator2D_ExtraInput(object):
def __init__(self, batch_size, image_locations,
labels, image_size, extra_inputs, random_shuffle=True):
self.n = len(image_locations)
self.batch_size = batch_size
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
self.image_locations = image_locations
self.labels = labels
self.image_size = image_size
self.random_shuffle = random_shuffle
self.extra_inputs = extra_inputs
def _set_index_array(self):
if self.random_shuffle:
self.index_array = np.random.permutation(self.n)
else:
self.index_array = np.arange(self.n)
def __iter__(self):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def reset(self):
self.batch_index = 0
self.total_batches_seen = 0
self._set_index_array()
def _flow_index(self):
self.reset()
while 1:
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
if current_index + self.batch_size > self.n:
N_missing_samples = (current_index + self.batch_size) - self.n
batch_indices_leftover = self.index_array[current_index:]
self.reset()
batch_indices_filler = self.index_array[0:N_missing_samples]
batch_indices = np.concatenate((batch_indices_leftover, batch_indices_filler))
else:
batch_indices = self.index_array[current_index:
current_index + self.batch_size]
yield batch_indices
def on_epoch_end(self):
self.reset()
def _get_batch_of_samples(self, index_array):
image_tensor = np.zeros((self.batch_size,
self.image_size[0],
self.image_size[1],
1))
out_labels = self.labels[index_array, :]
out_extra_inputs = self.extra_inputs[index_array, :]
image_locations = self.image_locations[index_array]
for i_sample, i_image_location in enumerate(image_locations):
i_image = sitk.ReadImage(i_image_location, sitk.sitkFloat32)
i_image_array = sitk.GetArrayFromImage(i_image)
image_tensor[i_sample, :, :, 0] = i_image_array[:, :]
return [image_tensor, out_extra_inputs], out_labels
def next(self):
with self.lock:
index_array = next(self.index_generator)
return self._get_batch_of_samples(index_array)
def get_single_image(self, image_path):
full_sample_tensor = np.zeros((1,
self.image_size[0],
self.image_size[1],
1))
i_image = sitk.ReadImage(image_path, sitk.sitkFloat32)
i_image_array = sitk.GetArrayFromImage(i_image)
full_sample_tensor[0, :, :, 0] = i_image_array
return full_sample_tensor
| [
"threading.Lock",
"SimpleITK.GetArrayFromImage",
"numpy.zeros",
"numpy.concatenate",
"SimpleITK.ReadImage",
"numpy.arange",
"numpy.random.permutation"
] | [((392, 408), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (406, 408), False, 'import threading\n'), ((2264, 2334), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.image_size[0], self.image_size[1], 1)'], {}), '((self.batch_size, self.image_size[0], self.image_size[1], 1))\n', (2272, 2334), True, 'import numpy as np\n'), ((3165, 3221), 'numpy.zeros', 'np.zeros', (['(1, self.image_size[0], self.image_size[1], 1)'], {}), '((1, self.image_size[0], self.image_size[1], 1))\n', (3173, 3221), True, 'import numpy as np\n'), ((3357, 3401), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_path', 'sitk.sitkFloat32'], {}), '(image_path, sitk.sitkFloat32)\n', (3371, 3401), True, 'import SimpleITK as sitk\n'), ((3426, 3457), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['i_image'], {}), '(i_image)\n', (3448, 3457), True, 'import SimpleITK as sitk\n'), ((786, 815), 'numpy.random.permutation', 'np.random.permutation', (['self.n'], {}), '(self.n)\n', (807, 815), True, 'import numpy as np\n'), ((861, 878), 'numpy.arange', 'np.arange', (['self.n'], {}), '(self.n)\n', (870, 878), True, 'import numpy as np\n'), ((2699, 2749), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['i_image_location', 'sitk.sitkFloat32'], {}), '(i_image_location, sitk.sitkFloat32)\n', (2713, 2749), True, 'import SimpleITK as sitk\n'), ((2778, 2809), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['i_image'], {}), '(i_image)\n', (2800, 2809), True, 'import SimpleITK as sitk\n'), ((1886, 1948), 'numpy.concatenate', 'np.concatenate', (['(batch_indices_leftover, batch_indices_filler)'], {}), '((batch_indices_leftover, batch_indices_filler))\n', (1900, 1948), True, 'import numpy as np\n')] |
import control as c
from control.xferfcn import clean_tf
from control.statesp import clean_ss
from control.timeresp import fival
import numpy as np
ci = 2 / np.sqrt(13)
w = np.sqrt(13)
Kq = -24
T02 = 1.4
V = 160
s = c.tf([1, 0], [1])
Hq = Kq * (1 + T02 * s) / (s ** 2 + 2 * ci * w * s + w ** 2)
Htheta = Hq / s
Hgamma = Kq / s / (s ** 2 + 2 * ci * w * s + w ** 2)
Hh = Hgamma * V / s
H = c.tf([[Hq.num[0][0], Htheta.num[0][0]], [Hgamma.num[0][0], Hh.num[0][0]]],
[[Hq.den[0][0], Htheta.den[0][0]], [Hgamma.den[0][0], Hh.den[0][0]]])
sys1 = c.ss(H)
sys1.D = np.array([[1, 2], [3e-13, 4]]) # Changes it to a non-zero input matrix D
H = c.tf(sys1) # Gives a tf with nice unrounded residual components
H2 = c.tf([1, -3, 0, 0], [1, 1e-13, 7, 0, 0, 0]) # to test minreal things
print(H.clean())
#print(sys1)
#print(H.clean(input = 1, output = 2, precision = 9))
#print(sys1.clean(precision = 12))
#print(clean_tf(sys1, input = 1, output = 2, precision = 1))
#print(clean_tf(H, input = 2, output = 2, precision = 8))
#print(clean_ss(sys1, precision = 2))
#print(clean_ss(H, precision = 12))
print(H.fival(forcing="step", input=1, output=1, stabilityCheck=True))
print(sys1.fival(forcing="step", input=2, stabilityCheck=True, precision=17))
print(fival(H, forcing="step", output=2, stabilityCheck=True, precision=3))
print(fival(sys1, forcing="step", input=2, output=2, precision=10)) | [
"control.timeresp.fival",
"numpy.sqrt",
"control.ss",
"numpy.array",
"control.tf"
] | [((174, 185), 'numpy.sqrt', 'np.sqrt', (['(13)'], {}), '(13)\n', (181, 185), True, 'import numpy as np\n'), ((217, 234), 'control.tf', 'c.tf', (['[1, 0]', '[1]'], {}), '([1, 0], [1])\n', (221, 234), True, 'import control as c\n'), ((389, 537), 'control.tf', 'c.tf', (['[[Hq.num[0][0], Htheta.num[0][0]], [Hgamma.num[0][0], Hh.num[0][0]]]', '[[Hq.den[0][0], Htheta.den[0][0]], [Hgamma.den[0][0], Hh.den[0][0]]]'], {}), '([[Hq.num[0][0], Htheta.num[0][0]], [Hgamma.num[0][0], Hh.num[0][0]]],\n [[Hq.den[0][0], Htheta.den[0][0]], [Hgamma.den[0][0], Hh.den[0][0]]])\n', (393, 537), True, 'import control as c\n'), ((550, 557), 'control.ss', 'c.ss', (['H'], {}), '(H)\n', (554, 557), True, 'import control as c\n'), ((567, 597), 'numpy.array', 'np.array', (['[[1, 2], [3e-13, 4]]'], {}), '([[1, 2], [3e-13, 4]])\n', (575, 597), True, 'import numpy as np\n'), ((646, 656), 'control.tf', 'c.tf', (['sys1'], {}), '(sys1)\n', (650, 656), True, 'import control as c\n'), ((717, 760), 'control.tf', 'c.tf', (['[1, -3, 0, 0]', '[1, 1e-13, 7, 0, 0, 0]'], {}), '([1, -3, 0, 0], [1, 1e-13, 7, 0, 0, 0])\n', (721, 760), True, 'import control as c\n'), ((158, 169), 'numpy.sqrt', 'np.sqrt', (['(13)'], {}), '(13)\n', (165, 169), True, 'import numpy as np\n'), ((1255, 1323), 'control.timeresp.fival', 'fival', (['H'], {'forcing': '"""step"""', 'output': '(2)', 'stabilityCheck': '(True)', 'precision': '(3)'}), "(H, forcing='step', output=2, stabilityCheck=True, precision=3)\n", (1260, 1323), False, 'from control.timeresp import fival\n'), ((1331, 1391), 'control.timeresp.fival', 'fival', (['sys1'], {'forcing': '"""step"""', 'input': '(2)', 'output': '(2)', 'precision': '(10)'}), "(sys1, forcing='step', input=2, output=2, precision=10)\n", (1336, 1391), False, 'from control.timeresp import fival\n')] |
"""Replacement r2_score function for when sklearn is not available."""
import numpy as np
#===============================================================================
# BSD 3-Clause License
# Copyright (c) 2007-2021 The scikit-learn developers.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def r2_score(y_true, y_pred):
""":math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a :math:`R^2` score of 0.0.
This function is taken from sklearn, and is used when sklearn is not available.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
Returns
-------
z : float or ndarray of floats
The :math:`R^2` score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, :math:`R^2` score may be negative (it need not
actually be the square of a quantity R).
This metric is not well-defined for single samples and will return a NaN
value if n_samples is less than two.
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
"""
if len(y_pred) < 2:
# TODO use proper logging
print("R^2 score is not well-defined with less than two samples.")
return np.nan
if len(y_true.shape) == 1:
y_true = y_true.reshape((len(y_true), 1)) # unsqueeze
if len(y_pred.shape) == 1:
y_pred = y_pred.reshape((len(y_true), 1)) # unsqueeze
numerator = ((y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64)
denominator = ((y_true - np.average( y_true, axis=0)) ** 2).sum(axis=0, dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
return np.average(output_scores)
| [
"numpy.ones",
"numpy.average"
] | [((3741, 3767), 'numpy.ones', 'np.ones', (['[y_true.shape[1]]'], {}), '([y_true.shape[1]])\n', (3748, 3767), True, 'import numpy as np\n'), ((4103, 4128), 'numpy.average', 'np.average', (['output_scores'], {}), '(output_scores)\n', (4113, 4128), True, 'import numpy as np\n'), ((3516, 3542), 'numpy.average', 'np.average', (['y_true'], {'axis': '(0)'}), '(y_true, axis=0)\n', (3526, 3542), True, 'import numpy as np\n')] |
import numpy as np
import random
from sklearn.model_selection import train_test_split
one_hot_conv = {"A": [1, 0, 0, 0], "T": [0, 0, 0, 1],
"C": [0, 1, 0, 0], "G": [0, 0, 1, 0],
"a": [1, 0, 0, 0], "t": [0, 0, 0, 1],
"c": [0, 1, 0, 0], "g": [0, 0, 1, 0],
"n": [0, 0, 0, 0], "N": [0, 0, 0, 0]}
capital ={"A": "A", "T": "T",
"C": "C", "G": "G",
"a": "A", "t": "T",
"c": "C", "g": "G"}
from scipy import signal
def cropping(inpt,len_seq,random_seed):
out = np.zeros(inpt.shape)
end_idx = random.randint(len_seq-random_seed, len_seq)
out[0:end_idx,:] = inpt[0:end_idx,:]
return out
mutation_dic = {
"1":[1,0,0,0],
"2":[0,1,0,0],
"3":[0,0,1,0],
"4":[0,0,0,1]
}
def mutation(inpt,len_seq,rate):
out = inpt.copy()
num_mutation = int(len_seq * rate)
mutate_spot = np.random.randint(len_seq,size=num_mutation)
for spot in mutate_spot:
into_ = random.randint(1,4)
out[spot,:] = mutation_dic[str(into_)]
return out
def one_hot_padding(sequence,out_length):
output = np.zeros((out_length,4))
temp = np.array([np.array(one_hot_conv[base], dtype=np.float) for base in sequence] )
len_seq = temp.shape[0]
if len_seq > out_length:
output[:, :] = temp[0:out_length, :]
else:
output[0:len_seq,:]=temp
return output , len_seq
import gzip as gz
def load_fasta_gz(f_name):
sequences = []
cur_string = ""
s = 0
with gz.open(f_name) as fasta_file:
for line in fasta_file:
line = line.decode("ascii")
if line[0] == '>':
s+=1
if cur_string:
assert len(cur_string) ==1000
sequences.append(cur_string)
cur_string = ""
else:
line = line.strip()
cur_string += line
assert len(cur_string) ==1000
sequences.append(cur_string)
return sequences
| [
"gzip.open",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"random.randint"
] | [((552, 572), 'numpy.zeros', 'np.zeros', (['inpt.shape'], {}), '(inpt.shape)\n', (560, 572), True, 'import numpy as np\n'), ((587, 633), 'random.randint', 'random.randint', (['(len_seq - random_seed)', 'len_seq'], {}), '(len_seq - random_seed, len_seq)\n', (601, 633), False, 'import random\n'), ((896, 941), 'numpy.random.randint', 'np.random.randint', (['len_seq'], {'size': 'num_mutation'}), '(len_seq, size=num_mutation)\n', (913, 941), True, 'import numpy as np\n'), ((1124, 1149), 'numpy.zeros', 'np.zeros', (['(out_length, 4)'], {}), '((out_length, 4))\n', (1132, 1149), True, 'import numpy as np\n'), ((986, 1006), 'random.randint', 'random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (1000, 1006), False, 'import random\n'), ((1515, 1530), 'gzip.open', 'gz.open', (['f_name'], {}), '(f_name)\n', (1522, 1530), True, 'import gzip as gz\n'), ((1170, 1214), 'numpy.array', 'np.array', (['one_hot_conv[base]'], {'dtype': 'np.float'}), '(one_hot_conv[base], dtype=np.float)\n', (1178, 1214), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
# 常量
pi = 3.1415926
# 声波属性
A = 0.01
u = 343
v = 40000
_lambda = u / v
w = 2 * pi * v
k = 2 * pi / _lambda
T = 2 * pi / w
rho = 1.293
# 悬浮物件的尺度
R = 0.005
# 两点换算为距离
def r(x0, y0, x1=0, y1=0):
return np.sqrt((x0 - x1) ** 2 + (y0 - y1) ** 2)
# 波运算函数
def wave(x1, y1, times):
global array_v_2
ans1 = 0
for x, y, theta in points.points:
ans1 += np.sin(w * times - k * (r(x, y, x1, y1)) + theta)
array_v_2[j][i] += np.square(ans1)
# 长宽
L = 5
W = 5
_l = L * _lambda / 2
_w = W * _lambda / 2
# 精细化程度
# _L 用于切细横坐标
# _W 用于切细纵坐标
_L, _W, _Time_Split = 50, 50, 50
# 由点投影为真实坐标
def coordinate(x, y):
x = x * _w / _W
y = y * _l / _L
return x, y
# 用以保存速度均方之和的矩阵
array_v_2 = np.zeros((_W, _L))
# 将曲线转换为各点坐标
class Point:
def __init__(self):
self.points = []
self.len = 0
def input(self, x, y, theta):
self.points.append([x, y, theta])
self.len += 1
points = Point()
# 保存波源位置的函数
# theta 表示存在半个波程差
# 用于线性阵列的
# x = at + b
# y = ct + d
# e < t < f
class F:
def __init__(self, a, b, c, d, e, f, theta=0):
mini = (f - e) / (_W + _L)
for t_split in range(_W + _L):
t_i = mini * t_split + e
points.input(a * t_i + b, c * t_i + d, theta)
# 用于圆周阵列的
# x = a*cos(t) + b
# y = c*sin(t) + d
# e < t < f
class FCircle:
def __init__(self, a, b, c, d, e, f, theta = False):
self.points = []
mini = (f - e) / (_W + _L)
for t_split in range(_W + _L):
t_i = mini * t_split + e
points.input(a * np.cos(t_i) + b, c * np.sin(t_i) + d, theta)
# wave sounder
# f0: x = t
# y = 0
# _w/4<t<3_w/4
# f1: x = t
# y = l
# _w/4<t<3_w/4
# f3: x = 0
# y = t
# _l/4<t<3_l/4
# f4: x = _w
# y = t
# _l/4<t<3_l/4
f0 = F(1, 0, 0, 0, 0, _w)
# 模拟
split_time = T / _Time_Split
for t in range(_Time_Split):
for i in range(_W):
for j in range(_L):
_x, _y = coordinate(i, j)
time = split_time * t
wave(_x, _y, time)
# 将矩阵乘以系数
array_v_2 = A ** 2 * w ** 2 * array_v_2 / (points.len + 1)**2
array_p_2 = rho**2 * u**2 * k**2 * array_v_2
array_v_2 /= _Time_Split
array_p_2 /= _Time_Split
array_p_2_sqrt = np.sqrt(array_p_2)
# 声势能的计算公式
array_U = 2 * pi * (R**3) * (array_p_2_sqrt / (3 * rho * (u**2)) - rho * array_v_2 / 2)
# 梯度的计算式
array_grad = np.gradient(array_U)
# 声压级
p_rms = 2e-5
array_level = 20 *np.log(np.abs(array_p_2_sqrt) / p_rms)
# 下面的函数均用于绘制图形
# 顺序是声压、声压级、声势能
# 声压
contour = plt.contourf(array_p_2_sqrt)
plt.colorbar(contour)
plt.title("Sound Pressure")
plt.show()
# 声压级
contour = plt.contourf(array_level)
plt.colorbar(contour)
plt.title("Sound Pressure Level")
plt.text(1, 1, 'L={}lambda/2,W={}lambda/2'.format(L, W))
plt.show()
# 声势能
contour = plt.contourf(array_U)
plt.colorbar(contour)
plt.title("Acoustic Potential Energy")
plt.text(1, 1, 'L={}lambda/2,W={}lambda/2'.format(L, W))
plt.show()
# 声势力
plt.quiver(array_grad[0], array_grad[1])
plt.title("Acoustic Potential Power")
plt.text(1, 1, 'L={}lambda/2,W={}lambda/2'.format(L, W))
plt.show()
| [
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.quiver",
"numpy.abs",
"numpy.sqrt",
"matplotlib.pyplot.colorbar",
"numpy.square",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.title",
"numpy.gradient",
"matplotlib.pyplot.show"
] | [((762, 780), 'numpy.zeros', 'np.zeros', (['(_W, _L)'], {}), '((_W, _L))\n', (770, 780), True, 'import numpy as np\n'), ((2273, 2291), 'numpy.sqrt', 'np.sqrt', (['array_p_2'], {}), '(array_p_2)\n', (2280, 2291), True, 'import numpy as np\n'), ((2415, 2435), 'numpy.gradient', 'np.gradient', (['array_U'], {}), '(array_U)\n', (2426, 2435), True, 'import numpy as np\n'), ((2563, 2591), 'matplotlib.pyplot.contourf', 'plt.contourf', (['array_p_2_sqrt'], {}), '(array_p_2_sqrt)\n', (2575, 2591), True, 'import matplotlib.pyplot as plt\n'), ((2593, 2614), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['contour'], {}), '(contour)\n', (2605, 2614), True, 'import matplotlib.pyplot as plt\n'), ((2616, 2643), 'matplotlib.pyplot.title', 'plt.title', (['"""Sound Pressure"""'], {}), "('Sound Pressure')\n", (2625, 2643), True, 'import matplotlib.pyplot as plt\n'), ((2645, 2655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2653, 2655), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2699), 'matplotlib.pyplot.contourf', 'plt.contourf', (['array_level'], {}), '(array_level)\n', (2686, 2699), True, 'import matplotlib.pyplot as plt\n'), ((2701, 2722), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['contour'], {}), '(contour)\n', (2713, 2722), True, 'import matplotlib.pyplot as plt\n'), ((2724, 2757), 'matplotlib.pyplot.title', 'plt.title', (['"""Sound Pressure Level"""'], {}), "('Sound Pressure Level')\n", (2733, 2757), True, 'import matplotlib.pyplot as plt\n'), ((2817, 2827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2825, 2827), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2867), 'matplotlib.pyplot.contourf', 'plt.contourf', (['array_U'], {}), '(array_U)\n', (2858, 2867), True, 'import matplotlib.pyplot as plt\n'), ((2869, 2890), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['contour'], {}), '(contour)\n', (2881, 2890), True, 'import matplotlib.pyplot as plt\n'), ((2892, 2930), 'matplotlib.pyplot.title', 'plt.title', (['"""Acoustic Potential Energy"""'], {}), "('Acoustic Potential Energy')\n", (2901, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2998, 3000), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3049), 'matplotlib.pyplot.quiver', 'plt.quiver', (['array_grad[0]', 'array_grad[1]'], {}), '(array_grad[0], array_grad[1])\n', (3019, 3049), True, 'import matplotlib.pyplot as plt\n'), ((3051, 3088), 'matplotlib.pyplot.title', 'plt.title', (['"""Acoustic Potential Power"""'], {}), "('Acoustic Potential Power')\n", (3060, 3088), True, 'import matplotlib.pyplot as plt\n'), ((3148, 3158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3156, 3158), True, 'import matplotlib.pyplot as plt\n'), ((258, 298), 'numpy.sqrt', 'np.sqrt', (['((x0 - x1) ** 2 + (y0 - y1) ** 2)'], {}), '((x0 - x1) ** 2 + (y0 - y1) ** 2)\n', (265, 298), True, 'import numpy as np\n'), ((495, 510), 'numpy.square', 'np.square', (['ans1'], {}), '(ans1)\n', (504, 510), True, 'import numpy as np\n'), ((2482, 2504), 'numpy.abs', 'np.abs', (['array_p_2_sqrt'], {}), '(array_p_2_sqrt)\n', (2488, 2504), True, 'import numpy as np\n'), ((1606, 1617), 'numpy.cos', 'np.cos', (['t_i'], {}), '(t_i)\n', (1612, 1617), True, 'import numpy as np\n'), ((1627, 1638), 'numpy.sin', 'np.sin', (['t_i'], {}), '(t_i)\n', (1633, 1638), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""DecisionTreeClassifier(Telco Dataset).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1lSnAsYluPfeTR_sbPvf5qGcz1wBwhRNW
"""
import pandas as pd
import numpy as np
from google.colab import files
uploaded = files.upload()
data_pd = pd.read_csv('WA_Fn-UseC_-Telco-Customer-Churn.csv', index_col=False)
df = data_pd
for col_name in df.columns:
if(df[col_name].dtype == 'object'):
df[col_name]= df[col_name].astype('category')
df[col_name] = df[col_name].cat.codes
with np.printoptions(threshold=np.inf):
print(df.columns)
features_cols = ['customerID', 'gender', 'SeniorCitizen', 'Partner', 'Dependents',
'tenure', 'PhoneService', 'MultipleLines', 'InternetService',
'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling',
'PaymentMethod', 'MonthlyCharges', 'TotalCharges']
X = df[features_cols]
Y = df.Churn
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=1)
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
model = DecisionTreeClassifier()
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
pd.set_option('display.max_columns', None)
print(Y_pred[0:5])
from sklearn import metrics
print("Accuracy:",(round(metrics.accuracy_score(Y_test,Y_pred) * 100)), "%")
print("Confusion Matrix For UNPRUNED TREE:")
metrics.confusion_matrix(Y_test, Y_pred)
#Visualizing DecisionTree
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from IPython.display import Image
import pydotplus
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = features_cols,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('telco.png')
Image(graph.create_png())
#Pruning the tree
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
model = DecisionTreeClassifier(criterion='entropy', max_depth=3)
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
pd.set_option('display.max_columns', None)
#print(X_test[0:19])
print(Y_pred[0:5])
from sklearn import metrics
print("Accuracy:",(round(metrics.accuracy_score(Y_test,Y_pred) * 100)), "%")
print("Confusion Matrix For PRUNED TREE:")
metrics.confusion_matrix(Y_test, Y_pred)
#Visualizing DecisionTree-Prunned
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from IPython.display import Image
import pydotplus
dot_data = StringIO()
export_graphviz(model, out_file=dot_data,
filled=True, rounded=True,
special_characters=True,feature_names = features_cols,class_names=['0','1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png('telco.png')
Image(graph.create_png()) | [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.tree.DecisionTreeClassifier",
"google.colab.files.upload",
"pandas.set_option",
"sklearn.tree.export_graphviz",
"numpy.printoptions",
"sklearn.externals.six.StringIO",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.confusi... | [((306, 320), 'google.colab.files.upload', 'files.upload', ([], {}), '()\n', (318, 320), False, 'from google.colab import files\n'), ((332, 400), 'pandas.read_csv', 'pd.read_csv', (['"""WA_Fn-UseC_-Telco-Customer-Churn.csv"""'], {'index_col': '(False)'}), "('WA_Fn-UseC_-Telco-Customer-Churn.csv', index_col=False)\n", (343, 400), True, 'import pandas as pd\n'), ((1127, 1180), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(X, Y, test_size=0.3, random_state=1)\n', (1143, 1180), False, 'from sklearn.model_selection import train_test_split\n'), ((1267, 1291), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (1289, 1291), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1353, 1395), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (1366, 1395), True, 'import pandas as pd\n'), ((1568, 1608), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['Y_test', 'Y_pred'], {}), '(Y_test, Y_pred)\n', (1592, 1608), False, 'from sklearn import metrics\n'), ((1787, 1797), 'sklearn.externals.six.StringIO', 'StringIO', ([], {}), '()\n', (1795, 1797), False, 'from sklearn.externals.six import StringIO\n'), ((1798, 1952), 'sklearn.tree.export_graphviz', 'export_graphviz', (['model'], {'out_file': 'dot_data', 'filled': '(True)', 'rounded': '(True)', 'special_characters': '(True)', 'feature_names': 'features_cols', 'class_names': "['0', '1']"}), "(model, out_file=dot_data, filled=True, rounded=True,\n special_characters=True, feature_names=features_cols, class_names=['0',\n '1'])\n", (1813, 1952), False, 'from sklearn.tree import export_graphviz\n'), ((2198, 2254), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'max_depth': '(3)'}), "(criterion='entropy', max_depth=3)\n", (2220, 2254), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2316, 2358), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (2329, 2358), True, 'import pandas as pd\n'), ((2550, 2590), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['Y_test', 'Y_pred'], {}), '(Y_test, Y_pred)\n', (2574, 2590), False, 'from sklearn import metrics\n'), ((2777, 2787), 'sklearn.externals.six.StringIO', 'StringIO', ([], {}), '()\n', (2785, 2787), False, 'from sklearn.externals.six import StringIO\n'), ((2788, 2942), 'sklearn.tree.export_graphviz', 'export_graphviz', (['model'], {'out_file': 'dot_data', 'filled': '(True)', 'rounded': '(True)', 'special_characters': '(True)', 'feature_names': 'features_cols', 'class_names': "['0', '1']"}), "(model, out_file=dot_data, filled=True, rounded=True,\n special_characters=True, feature_names=features_cols, class_names=['0',\n '1'])\n", (2803, 2942), False, 'from sklearn.tree import export_graphviz\n'), ((587, 620), 'numpy.printoptions', 'np.printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (602, 620), True, 'import numpy as np\n'), ((1471, 1509), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['Y_test', 'Y_pred'], {}), '(Y_test, Y_pred)\n', (1493, 1509), False, 'from sklearn import metrics\n'), ((2455, 2493), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['Y_test', 'Y_pred'], {}), '(Y_test, Y_pred)\n', (2477, 2493), False, 'from sklearn import metrics\n')] |
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import math
import numpy as np
from scipy import linalg
from scipy.fftpack import fft, ifft
import six
def _framing(a, L):
shape = a.shape[:-1] + (a.shape[-1] - L + 1, L)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape,
strides=strides)[::L // 2].T.copy()
def mdct_waveform(scale, freq_bin):
L = float(scale)
K = L / 2.0
fact = math.sqrt(2.0 / K)
const_fact = (np.pi / K) * (float(freq_bin) + 0.5)
const_offset = (L + 1.0) / 2.0
f = np.pi / L
i = np.arange(scale, dtype=np.float)
wf = (fact * np.sin(f * (i + 0.5)) *
np.cos(const_fact * ((i - K / 2.0) + const_offset)))
return wf / linalg.norm(wf)
def mdct(x, L):
"""Modified Discrete Cosine Transform (MDCT)
Returns the Modified Discrete Cosine Transform with fixed
window size L of the signal x.
The window is based on a sine window.
Parameters
----------
x : ndarray, shape (N,)
The signal
L : int
The window length
Returns
-------
y : ndarray, shape (L/2, 2 * N / L)
The MDCT coefficients
See also
--------
imdct
"""
x = np.asarray(x, dtype=np.float)
N = x.size
# Number of frequency channels
K = L // 2
# Test length
if N % K != 0:
raise RuntimeError('Input length must be a multiple of the half of '
'the window size')
# Pad edges with zeros
xx = np.zeros(L // 4 + N + L // 4)
xx[L // 4:-L // 4] = x
x = xx
del xx
# Number of frames
P = N // K
if P < 2:
raise ValueError('Signal too short')
# Framing
x = _framing(x, L)
# Windowing
aL = np.arange(L, dtype=np.float)
w_long = np.sin((np.pi / L) * (aL + 0.5))
w_edge_L = w_long.copy()
w_edge_L[:L // 4] = 0.
w_edge_L[L // 4:L // 2] = 1.
w_edge_R = w_long.copy()
w_edge_R[L // 2:L // 2 + L // 4] = 1.
w_edge_R[L // 2 + L // 4:] = 0.
x[:, 0] *= w_edge_L
x[:, 1:-1] *= w_long[:, None]
x[:, -1] *= w_edge_R
# Pre-twiddle
x = x.astype(np.complex)
x *= np.exp((-1j * np.pi / L) * aL)[:, None]
# FFT
y = fft(x, axis=0)
# Post-twiddle
y = y[:L // 2, :]
y *= np.exp((-1j * np.pi * (L // 2 + 1.) / L)
* (0.5 + aL[:L // 2]))[:, None]
# Real part and scaling
y = math.sqrt(2. / K) * np.real(y)
return y
def imdct(y, L):
"""Inverse Modified Discrete Cosine Transform (MDCT)
Returns the Inverse Modified Discrete Cosine Transform
with fixed window size L of the vector of coefficients y.
The window is based on a sine window.
Parameters
----------
y : ndarray, shape (L/2, 2 * N / L)
The MDCT coefficients
L : int
The window length
Returns
-------
x : ndarray, shape (N,)
The reconstructed signal
See also
--------
mdct
"""
# Signal length
N = y.size
# Number of frequency channels
K = L // 2
# Test length
if N % K != 0:
raise ValueError('Input length must be a multiple of the half of '
'the window size')
# Number of frames
P = N // K
if P < 2:
raise ValueError('Signal too short')
# Reshape
temp = y
y = np.zeros((L, P), dtype=np.float)
y[:K, :] = temp
del temp
# Pre-twiddle
aL = np.arange(L, dtype=np.float)
y = y * np.exp((1j * np.pi * (L / 2. + 1.) / L) * aL)[:, None]
# IFFT
x = ifft(y, axis=0)
# Post-twiddle
x *= np.exp((1j * np.pi / L) * (aL + (L / 2. + 1.) / 2.))[:, None]
# Windowing
w_long = np.sin((np.pi / L) * (aL + 0.5))
w_edge_L = w_long.copy()
w_edge_L[:L // 4] = 0.
w_edge_L[L // 4:L // 2] = 1.
w_edge_R = w_long.copy()
w_edge_R[L // 2:L // 2 + L // 4] = 1.
w_edge_R[L // 2 + L // 4:L] = 0.
x[:, 0] *= w_edge_L
x[:, 1:-1] *= w_long[:, None]
x[:, -1] *= w_edge_R
# Real part and scaling
x = math.sqrt(2. / K) * L * np.real(x)
# Overlap and add
def overlap_add(y, x):
z = np.concatenate((y, np.zeros((K,))))
z[-2 * K:] += x
return z
x = six.moves.reduce(overlap_add, [x[:, i] for i in range(x.shape[1])])
# Cut edges
x = x[K // 2:-K // 2].copy()
return x
class MDCT(object):
"""Modified Discrete Cosine Transform (MDCT)
Supports multiple MDCT dictionaries.
Parameters
----------
sizes : list of int
The sizes of MDCT windows e.g. [256, 1024]
"""
def __init__(self, sizes):
self.sizes = sizes
def _dot(self, y):
cnt = 0
N = y.size / len(self.sizes)
x = np.zeros(N)
for L in self.sizes:
this_y = y[cnt:cnt + N]
if (np.count_nonzero(this_y) > 0):
this_x = imdct(np.reshape(this_y, (L // 2, -1)), L)
x += this_x
cnt += N
return x
def dot(self, y):
if y.ndim == 1:
return self._dot(y)
else:
return np.array([self._dot(this_y) for this_y in y])
def _doth(self, x):
return np.concatenate([mdct(x, L).ravel() for L in self.sizes])
def doth(self, x):
if x.ndim == 1:
return self._doth(x)
else:
return np.array([self._doth(this_x) for this_x in x])
| [
"numpy.reshape",
"scipy.fftpack.ifft",
"numpy.asarray",
"math.sqrt",
"numpy.lib.stride_tricks.as_strided",
"numpy.exp",
"numpy.real",
"numpy.zeros",
"numpy.count_nonzero",
"scipy.fftpack.fft",
"numpy.cos",
"scipy.linalg.norm",
"numpy.sin",
"numpy.arange"
] | [((529, 547), 'math.sqrt', 'math.sqrt', (['(2.0 / K)'], {}), '(2.0 / K)\n', (538, 547), False, 'import math\n'), ((664, 696), 'numpy.arange', 'np.arange', (['scale'], {'dtype': 'np.float'}), '(scale, dtype=np.float)\n', (673, 696), True, 'import numpy as np\n'), ((1305, 1334), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (1315, 1334), True, 'import numpy as np\n'), ((1600, 1629), 'numpy.zeros', 'np.zeros', (['(L // 4 + N + L // 4)'], {}), '(L // 4 + N + L // 4)\n', (1608, 1629), True, 'import numpy as np\n'), ((1841, 1869), 'numpy.arange', 'np.arange', (['L'], {'dtype': 'np.float'}), '(L, dtype=np.float)\n', (1850, 1869), True, 'import numpy as np\n'), ((1883, 1913), 'numpy.sin', 'np.sin', (['(np.pi / L * (aL + 0.5))'], {}), '(np.pi / L * (aL + 0.5))\n', (1889, 1913), True, 'import numpy as np\n'), ((2311, 2325), 'scipy.fftpack.fft', 'fft', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2314, 2325), False, 'from scipy.fftpack import fft, ifft\n'), ((3434, 3466), 'numpy.zeros', 'np.zeros', (['(L, P)'], {'dtype': 'np.float'}), '((L, P), dtype=np.float)\n', (3442, 3466), True, 'import numpy as np\n'), ((3528, 3556), 'numpy.arange', 'np.arange', (['L'], {'dtype': 'np.float'}), '(L, dtype=np.float)\n', (3537, 3556), True, 'import numpy as np\n'), ((3644, 3659), 'scipy.fftpack.ifft', 'ifft', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (3648, 3659), False, 'from scipy.fftpack import fft, ifft\n'), ((3781, 3811), 'numpy.sin', 'np.sin', (['(np.pi / L * (aL + 0.5))'], {}), '(np.pi / L * (aL + 0.5))\n', (3787, 3811), True, 'import numpy as np\n'), ((748, 797), 'numpy.cos', 'np.cos', (['(const_fact * (i - K / 2.0 + const_offset))'], {}), '(const_fact * (i - K / 2.0 + const_offset))\n', (754, 797), True, 'import numpy as np\n'), ((817, 832), 'scipy.linalg.norm', 'linalg.norm', (['wf'], {}), '(wf)\n', (828, 832), False, 'from scipy import linalg\n'), ((2252, 2282), 'numpy.exp', 'np.exp', (['(-1.0j * np.pi / L * aL)'], {}), '(-1.0j * np.pi / L * aL)\n', (2258, 2282), True, 'import numpy as np\n'), ((2377, 2441), 'numpy.exp', 'np.exp', (['(-1.0j * np.pi * (L // 2 + 1.0) / L * (0.5 + aL[:L // 2]))'], {}), '(-1.0j * np.pi * (L // 2 + 1.0) / L * (0.5 + aL[:L // 2]))\n', (2383, 2441), True, 'import numpy as np\n'), ((2503, 2521), 'math.sqrt', 'math.sqrt', (['(2.0 / K)'], {}), '(2.0 / K)\n', (2512, 2521), False, 'import math\n'), ((2523, 2533), 'numpy.real', 'np.real', (['y'], {}), '(y)\n', (2530, 2533), True, 'import numpy as np\n'), ((3689, 3744), 'numpy.exp', 'np.exp', (['(1.0j * np.pi / L * (aL + (L / 2.0 + 1.0) / 2.0))'], {}), '(1.0j * np.pi / L * (aL + (L / 2.0 + 1.0) / 2.0))\n', (3695, 3744), True, 'import numpy as np\n'), ((4155, 4165), 'numpy.real', 'np.real', (['x'], {}), '(x)\n', (4162, 4165), True, 'import numpy as np\n'), ((4819, 4830), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4827, 4830), True, 'import numpy as np\n'), ((714, 735), 'numpy.sin', 'np.sin', (['(f * (i + 0.5))'], {}), '(f * (i + 0.5))\n', (720, 735), True, 'import numpy as np\n'), ((3569, 3616), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * (L / 2.0 + 1.0) / L * aL)'], {}), '(1.0j * np.pi * (L / 2.0 + 1.0) / L * aL)\n', (3575, 3616), True, 'import numpy as np\n'), ((4131, 4149), 'math.sqrt', 'math.sqrt', (['(2.0 / K)'], {}), '(2.0 / K)\n', (4140, 4149), False, 'import math\n'), ((4247, 4261), 'numpy.zeros', 'np.zeros', (['(K,)'], {}), '((K,))\n', (4255, 4261), True, 'import numpy as np\n'), ((4912, 4936), 'numpy.count_nonzero', 'np.count_nonzero', (['this_y'], {}), '(this_y)\n', (4928, 4936), True, 'import numpy as np\n'), ((316, 380), 'numpy.lib.stride_tricks.as_strided', 'np.lib.stride_tricks.as_strided', (['a'], {'shape': 'shape', 'strides': 'strides'}), '(a, shape=shape, strides=strides)\n', (347, 380), True, 'import numpy as np\n'), ((4974, 5006), 'numpy.reshape', 'np.reshape', (['this_y', '(L // 2, -1)'], {}), '(this_y, (L // 2, -1))\n', (4984, 5006), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
"""Auxilary functions for group representations"""
import numpy as np
def sgn(s):
"""return (-1)**(s)"""
return 1 - ((s & 1) << 1)
def zero_vector(length, *data):
"""Return zero numpy vector of given length and type np.int32"""
return np.zeros(length, dtype = np.int32)
#234567890123456789012345678901234567890123456789012345678901234567890
def pm_mat_from_function(f, l):
"""Create an l times l matrix from a function f.
The returned matrix m has entries m[i,j] = (-1)**f(i,j), i, j
= 0,...,l-1. It is a numpy array of shape (l,l) and type np.int32.
"""
a = np.zeros((l,l), dtype = np.int32)
for i in range(l):
for j in range(l):
a[i,j] = 1 - 2 * (f(i,j) & 1)
return a
def pm_diag_from_function(f, l):
"""Create an l times l diagonal matrix from a function f.
The returned matrix m has entries m[i,i] = (-1)**f(i),
i = 0,...,l-1, and zero entries for i != j.
It is a numpy array of shape (l,l) and type np.int32.
"""
a = np.zeros((l,l), dtype = np.int32)
for i in range(l):
a[i,i] = 1 - 2 * (f(i) & 1)
return a
| [
"numpy.zeros"
] | [((364, 396), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'np.int32'}), '(length, dtype=np.int32)\n', (372, 396), True, 'import numpy as np\n'), ((717, 749), 'numpy.zeros', 'np.zeros', (['(l, l)'], {'dtype': 'np.int32'}), '((l, l), dtype=np.int32)\n', (725, 749), True, 'import numpy as np\n'), ((1142, 1174), 'numpy.zeros', 'np.zeros', (['(l, l)'], {'dtype': 'np.int32'}), '((l, l), dtype=np.int32)\n', (1150, 1174), True, 'import numpy as np\n')] |
from keras.models import Sequential, load_model
from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization
from keras.losses import SparseCategoricalCrossentropy
from keras.optimizers import RMSprop, Adam
from keras.metrics import SparseCategoricalAccuracy
import matplotlib.pyplot as plt
import numpy as np
# default values
LEARNINGRATE = 2e-4
DROPOUT = 0.4 # 0.5
class Classifier:
def __init__(self, ae_model):
self.model = Sequential()
self.train_history = None
# each layer name in the autoencoder ends with either 'e' or 'd'
# 'e': encoder layer, 'd': decoder layer
for i in range(len(ae_model.layers)):
if ae_model.layers[i].name[-1] == "d":
break
self.model.add(ae_model.get_layer(index=i))
# after each max pooling layer add a dropout layer
if ae_model.layers[i].name[:4] == "pool":
self.model.add(Dropout(DROPOUT))
self.model.add(Flatten())
def add_layers(self, fc_nodes):
self.model.add(Dense(fc_nodes, activation="relu", name="fully_connected"))
self.model.add(BatchNormalization())
self.model.add(Dropout(DROPOUT))
self.model.add(Dense(10, activation="softmax", name="output"))
print(self.model.summary())
def train(self, train_images, train_labels, val_images, val_labels):
# 1st training stage: train only the weights of the fc layer, "freeze" the rest
for l in self.model.layers:
if l.name != "fully_connected":
l.trainable = False
# compile
self.model.compile(loss=SparseCategoricalCrossentropy(),
optimizer=Adam(learning_rate=LEARNINGRATE),
metrics=[SparseCategoricalAccuracy()])
epochs1 = int(input("\n> Enter training epochs for training stage 1: "))
minibatch1 = int(input("> Enter training batch size for training stage 1: "))
print("\nTraining Stage 1: Training only the Fully-Connected layer's weights...")
self.model.fit(train_images, train_labels, batch_size=minibatch1, epochs=epochs1, validation_data=(val_images, val_labels))
print("Done!\n")
# 2nd training stage: train the entire network
for l in self.model.layers:
l.trainable = True
# re-compile the model and repeat training
self.model.compile(loss=SparseCategoricalCrossentropy(),
optimizer=Adam(learning_rate=LEARNINGRATE),
metrics=[SparseCategoricalAccuracy()])
epochs2 = int(input("> Enter training epochs for training stage 2: "))
minibatch2 = int(input("> Enter training batch size for training stage 2: "))
print("\nTraining Stage 2: Training the entire network...")
self.train_history = self.model.fit(train_images, train_labels, batch_size=minibatch2,
epochs=epochs2, validation_data=(val_images, val_labels))
print("Done!\n")
# we use epochs and batch size of the 2nd training stage for plotting
return (epochs2, minibatch2)
def test(self, test_images, test_labels, size):
y_pred1 = self.model.predict(test_images)
y_pred2 = np.argmax(y_pred1, axis=1)
res = self.model.evaluate(test_images, test_labels)
print("\nClassifier Test Accuracy = {}".format(res[1]))
print("Classifier Test Loss = {}".format(res[0]))
return (y_pred2, res[1], res[0])
def plot_acc_loss(self):
fig, ((ax1, ax2)) = plt.subplots(nrows=1, ncols=2)
fig.tight_layout()
ax1.plot(self.train_history.history["sparse_categorical_accuracy"])
ax1.plot(self.train_history.history["val_sparse_categorical_accuracy"])
ax1.set_title("Training Curve")
ax1.set_ylabel("accuracy")
ax1.set_xlabel("epochs")
ax1.legend(["train accuracy", "val accuracy"], loc="lower right")
ax2.plot(self.train_history.history["loss"])
ax2.plot(self.train_history.history["val_loss"])
ax2.set_title("Training Curve")
ax2.set_ylabel("loss")
ax2.set_xlabel("epochs")
ax2.legend(["train loss", "val loss"], loc="upper right")
fig.subplots_adjust(wspace=0.5)
plt.show()
| [
"keras.optimizers.Adam",
"keras.layers.Flatten",
"keras.losses.SparseCategoricalCrossentropy",
"numpy.argmax",
"keras.models.Sequential",
"keras.layers.Dense",
"keras.layers.BatchNormalization",
"keras.layers.Dropout",
"matplotlib.pyplot.subplots",
"keras.metrics.SparseCategoricalAccuracy",
"mat... | [((470, 482), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (480, 482), False, 'from keras.models import Sequential, load_model\n'), ((3333, 3359), 'numpy.argmax', 'np.argmax', (['y_pred1'], {'axis': '(1)'}), '(y_pred1, axis=1)\n', (3342, 3359), True, 'import numpy as np\n'), ((3652, 3682), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (3664, 3682), True, 'import matplotlib.pyplot as plt\n'), ((4380, 4390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4388, 4390), True, 'import matplotlib.pyplot as plt\n'), ((994, 1003), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1001, 1003), False, 'from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1066, 1124), 'keras.layers.Dense', 'Dense', (['fc_nodes'], {'activation': '"""relu"""', 'name': '"""fully_connected"""'}), "(fc_nodes, activation='relu', name='fully_connected')\n", (1071, 1124), False, 'from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1149, 1169), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1167, 1169), False, 'from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1194, 1210), 'keras.layers.Dropout', 'Dropout', (['DROPOUT'], {}), '(DROPOUT)\n', (1201, 1210), False, 'from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1235, 1281), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""output"""'}), "(10, activation='softmax', name='output')\n", (1240, 1281), False, 'from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1651, 1682), 'keras.losses.SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy', ([], {}), '()\n', (1680, 1682), False, 'from keras.losses import SparseCategoricalCrossentropy\n'), ((1722, 1754), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'LEARNINGRATE'}), '(learning_rate=LEARNINGRATE)\n', (1726, 1754), False, 'from keras.optimizers import RMSprop, Adam\n'), ((2458, 2489), 'keras.losses.SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy', ([], {}), '()\n', (2487, 2489), False, 'from keras.losses import SparseCategoricalCrossentropy\n'), ((2529, 2561), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'LEARNINGRATE'}), '(learning_rate=LEARNINGRATE)\n', (2533, 2561), False, 'from keras.optimizers import RMSprop, Adam\n'), ((952, 968), 'keras.layers.Dropout', 'Dropout', (['DROPOUT'], {}), '(DROPOUT)\n', (959, 968), False, 'from keras.layers import MaxPool2D, Flatten, Dense, Dropout, BatchNormalization\n'), ((1793, 1820), 'keras.metrics.SparseCategoricalAccuracy', 'SparseCategoricalAccuracy', ([], {}), '()\n', (1818, 1820), False, 'from keras.metrics import SparseCategoricalAccuracy\n'), ((2600, 2627), 'keras.metrics.SparseCategoricalAccuracy', 'SparseCategoricalAccuracy', ([], {}), '()\n', (2625, 2627), False, 'from keras.metrics import SparseCategoricalAccuracy\n')] |
##
import os
import numpy as np
import argparse
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import torch.utils.data as data_utils
from dataset import Polygon3DSample
from network import ImplicitNet, LossFunction
from common_tools.utils import read_json, draw_colored_points_to_obj, AverageMeter, SimpleLogger, catch_nan
from common_tools.geometry import read_obj_file, write_obj_file
from torch.autograd import grad
import time
# from pytorch3d.ops.knn import _KNN, knn_points
# def _create_tree(points_padded: torch.Tensor, num_points_per_cloud=None):
# """
# create a data structure, per-point cache knn-neighbor
# Args:
# points_padded (N,P,D)
# num_points_per_cloud list
# """
# knn_k = 8
# assert (points_padded.ndim == 3)
# if num_points_per_cloud is None:
# num_points_per_cloud = torch.tensor([points_padded.shape[1]] * points_padded.shape[0],
# device=points_padded.device, dtype=torch.long)
# knn_result = knn_points(
# points_padded, points_padded, num_points_per_cloud, num_points_per_cloud,
# K=knn_k + 1, return_nn=True, return_sorted=True)
# return knn_result
# ## g_sdf_resample
# def repulsive_update(init_points, knn_results, num_points, normals):
# normals = F.normalize(normals, dim=-1)
# knn_nn = knn_results.knn[..., 1:, :]
# diag = (init_points.view(-1, 3).max(dim=0).values -
# init_points.view(-1, 3).min(0).values).norm().item()
# number = num_points.item()
# difference = init_points[:, :, None, :] - knn_nn
# inv_sigma_spatial = number / diag
# distance = torch.sum(difference ** 2, dim=-1)
# spatial = torch.exp(-distance * inv_sigma_spatial)
# difference_proj = difference - (difference * normals[:, :, None, :]).sum(dim=-1, keepdim=True) * normals[:, :, None, :]
# move = torch.sum(spatial[..., None] * difference_proj, dim=-2)
# points = init_points + move
# return points
def repulsive_update(points, gradients, K=8):
## TODO: need to change to knn
np_points = points.detach().cpu().numpy()
sqdist_matrix = np.sum((np_points[:,None,:] - np_points[None,:,:])**2, axis=-1)
knn_sqdist, knn_idx = torch.topk(torch.tensor(sqdist_matrix, device=points.device), dim=-1, k=K+1, largest=False)
del sqdist_matrix
knn_sqdist = knn_sqdist[:, 1:]
knn_idx = knn_idx[:, 1:] ## exclude self
knn_points = points[knn_idx,:] ## [N, K, 3]
diag = (points.max(dim=0).values - points.min(dim=0).values).norm() ## sample points aabb's diagonal length
num_points = points.shape[0]
inv_sigma_spatial = (num_points / diag)*1.5 ## magic number 2.0
spatial = torch.exp(-knn_sqdist*inv_sigma_spatial) ## some kind of weights
difference = points[:,None,:] - knn_points
difference_proj = difference - (difference * gradients[:,None,:]).sum(dim=-1, keepdim=True) * gradients[:,None,:]
moves = torch.sum(spatial[..., None] * difference_proj, dim=-2)
return moves
def project_to_surface(points, model, max_iter=10):
for iter in range(max_iter):
sdfs = model(points)
grad = model.grad_compute(points, outputs=sdfs, detach=True)
points = points - (grad * sdfs)
if abs(sdfs).max() < 0.0001:
print(f"converged at {iter}")
return points
print("max dist to surface", abs(sdfs).max())
return points
## compute gradient
def grad_compute(x, outputs, model):
outputs = model(x)
d_points = torch.ones_like(outputs, requires_grad=False, device=x.device)
ori_grad = grad(
outputs=outputs,
inputs=x,
grad_outputs=d_points,
create_graph=True,
retain_graph=True,
only_inputs=True
)
points_grad = ori_grad[0]
return points_grad
def parse_args():
parser = argparse.ArgumentParser(description="Modeling multi-body motion with neural implicits")
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
## get config
args = parse_args()
config_path = args.cfg
if os.path.isfile(config_path):
config = read_json(config_path)
print("==== config ====")
for k, v in config.items():
print(k, v)
print("==== config ====")
else:
print("no config file")
exit()
## use cuda or not?
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print("device: ", device)
## directories for monitoring
saved_dir = config['trainer']['save_dir'] ## save checkpoint
log_dir = config['trainer']['log_dir'] ## save logger
res_dir = config['trainer']['res_dir'] ## add timestamp
if not os.path.exists(res_dir):
os.makedirs(res_dir)
shutil.copyfile(config_path, os.path.join(res_dir, 'config.json'))
## load network
net = ImplicitNet(dim=3, **config["network"]["implicit_args"])
net.load_state_dict(torch.load("saved/checkpoint_epoch500.pth"))
net = net.to(device)
## initialize the samples
N = int(5e3)
points_n = torch.randn((N, 3))*0.3 ## normal
points_u = (torch.rand((N, 3)) - 0.5)*0.8 ## uniform
points = torch.cat([points_n, points_u], dim=0)
# dataset = Polygon3DSample(**config["data"])
# points = torch.tensor(dataset.vertices, dtype=torch.float32)
points.requires_grad = True ## requires grad (pytorch 1.8)
points = points.to(device)
points = project_to_surface(points, net, 20)
## iterative solving
time1 = time.time()
for i in range(201):
print(i)
sdfs = net(points)
gradients = net.grad_compute(points, outputs=sdfs, detach=True)
moves = repulsive_update(points, gradients, K=20)
if i%25 == 0 and True:
write_obj_file(f"saved_pc{i}_after.obj", V=points)
points += moves
points = project_to_surface(points, net, 20)
if i%100 == 0 and True:
write_obj_file(f"saved_pc{i}_before.obj", V=points)
time2 = time.time()
print((time2 - time1)/100)
| [
"common_tools.geometry.write_obj_file",
"common_tools.utils.read_json",
"torch.exp",
"torch.cuda.is_available",
"torch.sum",
"os.path.exists",
"argparse.ArgumentParser",
"torch.randn",
"torch.ones_like",
"os.path.isfile",
"torch.autograd.grad",
"time.time",
"network.ImplicitNet",
"torch.ca... | [((2201, 2270), 'numpy.sum', 'np.sum', (['((np_points[:, None, :] - np_points[None, :, :]) ** 2)'], {'axis': '(-1)'}), '((np_points[:, None, :] - np_points[None, :, :]) ** 2, axis=-1)\n', (2207, 2270), True, 'import numpy as np\n'), ((2761, 2803), 'torch.exp', 'torch.exp', (['(-knn_sqdist * inv_sigma_spatial)'], {}), '(-knn_sqdist * inv_sigma_spatial)\n', (2770, 2803), False, 'import torch\n'), ((3003, 3058), 'torch.sum', 'torch.sum', (['(spatial[..., None] * difference_proj)'], {'dim': '(-2)'}), '(spatial[..., None] * difference_proj, dim=-2)\n', (3012, 3058), False, 'import torch\n'), ((3571, 3633), 'torch.ones_like', 'torch.ones_like', (['outputs'], {'requires_grad': '(False)', 'device': 'x.device'}), '(outputs, requires_grad=False, device=x.device)\n', (3586, 3633), False, 'import torch\n'), ((3649, 3763), 'torch.autograd.grad', 'grad', ([], {'outputs': 'outputs', 'inputs': 'x', 'grad_outputs': 'd_points', 'create_graph': '(True)', 'retain_graph': '(True)', 'only_inputs': '(True)'}), '(outputs=outputs, inputs=x, grad_outputs=d_points, create_graph=True,\n retain_graph=True, only_inputs=True)\n', (3653, 3763), False, 'from torch.autograd import grad\n'), ((3905, 3997), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Modeling multi-body motion with neural implicits"""'}), "(description=\n 'Modeling multi-body motion with neural implicits')\n", (3928, 3997), False, 'import argparse\n'), ((4316, 4343), 'os.path.isfile', 'os.path.isfile', (['config_path'], {}), '(config_path)\n', (4330, 4343), False, 'import os\n'), ((4610, 4635), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4633, 4635), False, 'import torch\n'), ((4649, 4692), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (4661, 4692), False, 'import torch\n'), ((5117, 5173), 'network.ImplicitNet', 'ImplicitNet', ([], {'dim': '(3)'}), "(dim=3, **config['network']['implicit_args'])\n", (5128, 5173), False, 'from network import ImplicitNet, LossFunction\n'), ((5436, 5474), 'torch.cat', 'torch.cat', (['[points_n, points_u]'], {'dim': '(0)'}), '([points_n, points_u], dim=0)\n', (5445, 5474), False, 'import torch\n'), ((5773, 5784), 'time.time', 'time.time', ([], {}), '()\n', (5782, 5784), False, 'import time\n'), ((6306, 6317), 'time.time', 'time.time', ([], {}), '()\n', (6315, 6317), False, 'import time\n'), ((2302, 2351), 'torch.tensor', 'torch.tensor', (['sqdist_matrix'], {'device': 'points.device'}), '(sqdist_matrix, device=points.device)\n', (2314, 2351), False, 'import torch\n'), ((4362, 4384), 'common_tools.utils.read_json', 'read_json', (['config_path'], {}), '(config_path)\n', (4371, 4384), False, 'from common_tools.utils import read_json, draw_colored_points_to_obj, AverageMeter, SimpleLogger, catch_nan\n'), ((4961, 4984), 'os.path.exists', 'os.path.exists', (['res_dir'], {}), '(res_dir)\n', (4975, 4984), False, 'import os\n'), ((4994, 5014), 'os.makedirs', 'os.makedirs', (['res_dir'], {}), '(res_dir)\n', (5005, 5014), False, 'import os\n'), ((5048, 5084), 'os.path.join', 'os.path.join', (['res_dir', '"""config.json"""'], {}), "(res_dir, 'config.json')\n", (5060, 5084), False, 'import os\n'), ((5198, 5241), 'torch.load', 'torch.load', (['"""saved/checkpoint_epoch500.pth"""'], {}), "('saved/checkpoint_epoch500.pth')\n", (5208, 5241), False, 'import torch\n'), ((5331, 5350), 'torch.randn', 'torch.randn', (['(N, 3)'], {}), '((N, 3))\n', (5342, 5350), False, 'import torch\n'), ((5382, 5400), 'torch.rand', 'torch.rand', (['(N, 3)'], {}), '((N, 3))\n', (5392, 5400), False, 'import torch\n'), ((6042, 6092), 'common_tools.geometry.write_obj_file', 'write_obj_file', (['f"""saved_pc{i}_after.obj"""'], {'V': 'points'}), "(f'saved_pc{i}_after.obj', V=points)\n", (6056, 6092), False, 'from common_tools.geometry import read_obj_file, write_obj_file\n'), ((6224, 6275), 'common_tools.geometry.write_obj_file', 'write_obj_file', (['f"""saved_pc{i}_before.obj"""'], {'V': 'points'}), "(f'saved_pc{i}_before.obj', V=points)\n", (6238, 6275), False, 'from common_tools.geometry import read_obj_file, write_obj_file\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 19 13:15:14 2019
@author: HP
"""
import cv2
import numpy as np
from flask import Flask,render_template
import json
app= Flask(__name__)
@app.route('/')
def hello():
return render_template('index.html')
hand_hist = None
traverse_point = []
total_rectangle = 9
hand_rect_one_x = None
hand_rect_one_y = None
hand_rect_two_x = None
hand_rect_two_y = None
def rescale_frame(frame, wpercent=130, hpercent=130):
width = int(frame.shape[1] * wpercent / 100)
height = int(frame.shape[0] * hpercent / 100)
return cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
def contours(hist_mask_image):
gray_hist_mask_image = cv2.cvtColor(hist_mask_image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_hist_mask_image, 0, 255, 0)
_, cont, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return cont
def max_contour(contour_list):
max_i = 0
max_area = 0
for i in range(len(contour_list)):
cnt = contour_list[i]
area_cnt = cv2.contourArea(cnt)
if area_cnt > max_area:
max_area = area_cnt
max_i = i
return contour_list[max_i]
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20, 9 * rows / 20, 12 * rows / 20,
12 * rows / 20, 12 * rows / 20], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20,
10 * cols / 20, 11 * cols / 20], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([90, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(frame, hist):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(frame, thresh)
def centroid(max_contour):
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
cx = int(moment['m10'] / moment['m00'])
cy = int(moment['m01'] / moment['m00'])
return cx, cy
else:
return None
def farthest_point(defects, contour, centroid):
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
cx, cy = centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, cx), 2)
yp = cv2.pow(cv2.subtract(y, cy), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def draw_circles(frame, traverse_point):
if traverse_point is not None:
for i in range(len(traverse_point)):
cv2.circle(frame, traverse_point[i], int(5 - (5 * i * 3) / 100), [0, 255, 255], -1)
subsum=0
prev=0
def manage_image_opr(frame, hand_hist):
global subsum,prev
hist_mask_image = hist_masking(frame, hand_hist)
contour_list = contours(hist_mask_image)
max_cont = max_contour(contour_list)
cnt_centroid = centroid(max_cont)
cv2.circle(frame, cnt_centroid, 5, [255, 0, 255], -1)
#print('subsum:{0}',subsum)
#print('prev:{0}',prev)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point))
# if far_point is not None:
# nxt=far_point[1]
# subsum =subsum+ nxt - prev
# prev=nxt
# print(subsum)
# #subsum=farpoint[1]
# print(far_point[1])
cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
if len(traverse_point) < 20:
traverse_point.append(far_point)
else:
traverse_point.pop(0)
traverse_point.append(far_point)
draw_circles(frame, traverse_point)
if far_point is None:
return 0
return far_point
@app.route('/main')
def main():
global hand_hist,subsum,prev
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
list_far_point=[]
#fgbg = cv2.createBackgroundSubtractorMOG2()
while capture.isOpened():
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
#fgmask = fgbg.apply(frame)
if pressed_key & 0xFF == ord('z'):
is_hand_hist_created = True
hand_hist = hand_histogram(frame)
if is_hand_hist_created:
list_far_point.append(manage_image_opr(frame, hand_hist))
else:
frame = draw_rect(frame)
cv2.imshow("Live Feed", rescale_frame(frame))
if pressed_key == 27:
break
#print(list_far_point)
# prev=0
# nxt=0
# subsum=0
#for li in list_far_point:
#print(li)
#nxt=int(li[1])
#subsum=subsum+nxt-prev
#prev=nxt
# if subsum>0 :
# print("scroll up")
# else :
# print('scroll down')
cv2.destroyAllWindows()
capture.release()
start=0
end=0
l=len(list_far_point)
#print(list_far_point[1][1])
#print(list_far_point[0][1])
rng=10
if l*2 < rng:
rng = 5
for i in range(10):
if(list_far_point[i] !=0 and list_far_point[l-i-1] != 0 ):
start+=int(list_far_point[i][1])
end+=int(list_far_point[l-i-1][1])
else :
i-=1
coords={
"coords_x":str(end-start)
}
return json.dumps(coords)
if __name__ == '__main__':
app.run('127.0.0.1',debug=True)
| [
"flask.render_template",
"cv2.rectangle",
"cv2.normalize",
"flask.Flask",
"cv2.filter2D",
"cv2.convexityDefects",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.calcHist",
"cv2.calcBackProject",
"cv2.threshold",
"json.dumps",
"cv2.contourArea",
"cv2.waitKey",
"cv2.add",
"cv2.merge",
"n... | [((170, 185), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (175, 185), False, 'from flask import Flask, render_template\n'), ((227, 256), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (242, 256), False, 'from flask import Flask, render_template\n'), ((574, 638), 'cv2.resize', 'cv2.resize', (['frame', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (width, height), interpolation=cv2.INTER_AREA)\n', (584, 638), False, 'import cv2\n'), ((699, 748), 'cv2.cvtColor', 'cv2.cvtColor', (['hist_mask_image', 'cv2.COLOR_BGR2GRAY'], {}), '(hist_mask_image, cv2.COLOR_BGR2GRAY)\n', (711, 748), False, 'import cv2\n'), ((767, 813), 'cv2.threshold', 'cv2.threshold', (['gray_hist_mask_image', '(0)', '(255)', '(0)'], {}), '(gray_hist_mask_image, 0, 255, 0)\n', (780, 813), False, 'import cv2\n'), ((839, 903), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (855, 903), False, 'import cv2\n'), ((1392, 1566), 'numpy.array', 'np.array', (['[6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 * rows / 20,\n 9 * rows / 20, 12 * rows / 20, 12 * rows / 20, 12 * rows / 20]'], {'dtype': 'np.uint32'}), '([6 * rows / 20, 6 * rows / 20, 6 * rows / 20, 9 * rows / 20, 9 *\n rows / 20, 9 * rows / 20, 12 * rows / 20, 12 * rows / 20, 12 * rows / \n 20], dtype=np.uint32)\n', (1400, 1566), True, 'import numpy as np\n'), ((1599, 1776), 'numpy.array', 'np.array', (['[9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / \n 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / 20]'], {'dtype': 'np.uint32'}), '([9 * cols / 20, 10 * cols / 20, 11 * cols / 20, 9 * cols / 20, 10 *\n cols / 20, 11 * cols / 20, 9 * cols / 20, 10 * cols / 20, 11 * cols / \n 20], dtype=np.uint32)\n', (1607, 1776), True, 'import numpy as np\n'), ((2192, 2230), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2204, 2230), False, 'import cv2\n'), ((2241, 2285), 'numpy.zeros', 'np.zeros', (['[90, 10, 3]'], {'dtype': 'hsv_frame.dtype'}), '([90, 10, 3], dtype=hsv_frame.dtype)\n', (2249, 2285), True, 'import numpy as np\n'), ((2523, 2586), 'cv2.calcHist', 'cv2.calcHist', (['[roi]', '[0, 1]', 'None', '[180, 256]', '[0, 180, 0, 256]'], {}), '([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])\n', (2535, 2586), False, 'import cv2\n'), ((2598, 2658), 'cv2.normalize', 'cv2.normalize', (['hand_hist', 'hand_hist', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)\n', (2611, 2658), False, 'import cv2\n'), ((2702, 2740), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2714, 2740), False, 'import cv2\n'), ((2751, 2812), 'cv2.calcBackProject', 'cv2.calcBackProject', (['[hsv]', '[0, 1]', 'hist', '[0, 180, 0, 256]', '(1)'], {}), '([hsv], [0, 1], hist, [0, 180, 0, 256], 1)\n', (2770, 2812), False, 'import cv2\n'), ((2825, 2879), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(31, 31)'], {}), '(cv2.MORPH_ELLIPSE, (31, 31))\n', (2850, 2879), False, 'import cv2\n'), ((2884, 2916), 'cv2.filter2D', 'cv2.filter2D', (['dst', '(-1)', 'disc', 'dst'], {}), '(dst, -1, disc, dst)\n', (2896, 2916), False, 'import cv2\n'), ((2936, 2983), 'cv2.threshold', 'cv2.threshold', (['dst', '(150)', '(255)', 'cv2.THRESH_BINARY'], {}), '(dst, 150, 255, cv2.THRESH_BINARY)\n', (2949, 2983), False, 'import cv2\n'), ((3053, 3088), 'cv2.merge', 'cv2.merge', (['(thresh, thresh, thresh)'], {}), '((thresh, thresh, thresh))\n', (3062, 3088), False, 'import cv2\n'), ((3101, 3131), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'thresh'], {}), '(frame, thresh)\n', (3116, 3131), False, 'import cv2\n'), ((3174, 3198), 'cv2.moments', 'cv2.moments', (['max_contour'], {}), '(max_contour)\n', (3185, 3198), False, 'import cv2\n'), ((4523, 4576), 'cv2.circle', 'cv2.circle', (['frame', 'cnt_centroid', '(5)', '[255, 0, 255]', '(-1)'], {}), '(frame, cnt_centroid, 5, [255, 0, 255], -1)\n', (4533, 4576), False, 'import cv2\n'), ((5674, 5693), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (5690, 5693), False, 'import cv2\n'), ((6615, 6638), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6636, 6638), False, 'import cv2\n'), ((7111, 7129), 'json.dumps', 'json.dumps', (['coords'], {}), '(coords)\n', (7121, 7129), False, 'import json\n'), ((1074, 1094), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1089, 1094), False, 'import cv2\n'), ((1919, 2044), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(hand_rect_one_y[i], hand_rect_one_x[i])', '(hand_rect_two_y[i], hand_rect_two_x[i])', '(0, 255, 0)', '(1)'], {}), '(frame, (hand_rect_one_y[i], hand_rect_one_x[i]), (\n hand_rect_two_y[i], hand_rect_two_x[i]), (0, 255, 0), 1)\n', (1932, 2044), False, 'import cv2\n'), ((3548, 3596), 'numpy.array', 'np.array', (['contour[s][:, 0][:, 0]'], {'dtype': 'np.float'}), '(contour[s][:, 0][:, 0], dtype=np.float)\n', (3556, 3596), True, 'import numpy as np\n'), ((3609, 3657), 'numpy.array', 'np.array', (['contour[s][:, 0][:, 1]'], {'dtype': 'np.float'}), '(contour[s][:, 0][:, 1], dtype=np.float)\n', (3617, 3657), True, 'import numpy as np\n'), ((3812, 3827), 'numpy.argmax', 'np.argmax', (['dist'], {}), '(dist)\n', (3821, 3827), True, 'import numpy as np\n'), ((4686, 4730), 'cv2.convexHull', 'cv2.convexHull', (['max_cont'], {'returnPoints': '(False)'}), '(max_cont, returnPoints=False)\n', (4700, 4730), False, 'import cv2\n'), ((4749, 4785), 'cv2.convexityDefects', 'cv2.convexityDefects', (['max_cont', 'hull'], {}), '(max_cont, hull)\n', (4769, 4785), False, 'import cv2\n'), ((5216, 5264), 'cv2.circle', 'cv2.circle', (['frame', 'far_point', '(5)', '[0, 0, 255]', '(-1)'], {}), '(frame, far_point, 5, [0, 0, 255], -1)\n', (5226, 5264), False, 'import cv2\n'), ((5817, 5831), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5828, 5831), False, 'import cv2\n'), ((3680, 3699), 'cv2.subtract', 'cv2.subtract', (['x', 'cx'], {}), '(x, cx)\n', (3692, 3699), False, 'import cv2\n'), ((3725, 3744), 'cv2.subtract', 'cv2.subtract', (['y', 'cy'], {}), '(y, cy)\n', (3737, 3744), False, 'import cv2\n'), ((3773, 3788), 'cv2.add', 'cv2.add', (['xp', 'yp'], {}), '(xp, yp)\n', (3780, 3788), False, 'import cv2\n')] |
from pathlib import Path
from tempfile import TemporaryDirectory
import numpy as np
import torch
from agent import DqnAgent
from model import DqnModel
from replay_buffer import ReplayBuffer
from strategy import EpsilonGreedyStrategy
from torch import nn
import pytest
BATCH_SIZE = 5
@pytest.fixture
def agent():
model = nn.Linear(10, 2)
memory = ReplayBuffer(10)
agent = DqnAgent(model, memory, EpsilonGreedyStrategy(1.0))
yield agent
def create_batch():
states = np.random.randn(BATCH_SIZE, 10)
actions = np.random.randint(0, 2, (BATCH_SIZE, 1))
rewards = np.random.randint(0, 10, (BATCH_SIZE, 1))
next_states = np.random.randn(BATCH_SIZE, 10)
dones = np.random.randint(0, 2, (BATCH_SIZE, 1))
batch = states, actions, rewards, next_states, dones
return batch
def test_initialize():
model = DqnModel(10, 2, (5,))
agent = DqnAgent(model, None, EpsilonGreedyStrategy(1.0))
agent.initialize(True)
def test_store_load(agent):
with TemporaryDirectory() as dir:
path = Path(dir)
agent.store(path)
agent.load(path)
def test_get_action(agent):
for train_mode in [False, True]:
agent.initialize(train_mode)
for i in range(5):
action = agent.get_action(np.random.randn(10))
assert 0 <= action <= 1
def test_make_tensor(agent):
arr1 = np.random.randn(3, 5)
res = agent.make_tensor(arr1)
assert isinstance(res, torch.Tensor)
arr2 = np.random.randn(4, 6)
res1, res2 = agent.make_tensor(arr1, arr2)
assert isinstance(res1, torch.Tensor)
assert isinstance(res2, torch.Tensor)
def test_train_model_dqn(agent):
agent.initialize(True)
batch = create_batch()
agent.train_model(batch)
def test_train_model_ddqn(agent):
agent.ddqn = True
agent.initialize(True)
batch = create_batch()
agent.train_model(batch)
def test_state_dict(agent):
res = agent.state_dict()
print(res)
| [
"tempfile.TemporaryDirectory",
"model.DqnModel",
"pathlib.Path",
"replay_buffer.ReplayBuffer",
"numpy.random.randint",
"torch.nn.Linear",
"strategy.EpsilonGreedyStrategy",
"numpy.random.randn"
] | [((329, 345), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(2)'], {}), '(10, 2)\n', (338, 345), False, 'from torch import nn\n'), ((359, 375), 'replay_buffer.ReplayBuffer', 'ReplayBuffer', (['(10)'], {}), '(10)\n', (371, 375), False, 'from replay_buffer import ReplayBuffer\n'), ((491, 522), 'numpy.random.randn', 'np.random.randn', (['BATCH_SIZE', '(10)'], {}), '(BATCH_SIZE, 10)\n', (506, 522), True, 'import numpy as np\n'), ((537, 577), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(BATCH_SIZE, 1)'], {}), '(0, 2, (BATCH_SIZE, 1))\n', (554, 577), True, 'import numpy as np\n'), ((592, 633), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(BATCH_SIZE, 1)'], {}), '(0, 10, (BATCH_SIZE, 1))\n', (609, 633), True, 'import numpy as np\n'), ((652, 683), 'numpy.random.randn', 'np.random.randn', (['BATCH_SIZE', '(10)'], {}), '(BATCH_SIZE, 10)\n', (667, 683), True, 'import numpy as np\n'), ((696, 736), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(BATCH_SIZE, 1)'], {}), '(0, 2, (BATCH_SIZE, 1))\n', (713, 736), True, 'import numpy as np\n'), ((848, 869), 'model.DqnModel', 'DqnModel', (['(10)', '(2)', '(5,)'], {}), '(10, 2, (5,))\n', (856, 869), False, 'from model import DqnModel\n'), ((1371, 1392), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)'], {}), '(3, 5)\n', (1386, 1392), True, 'import numpy as np\n'), ((1480, 1501), 'numpy.random.randn', 'np.random.randn', (['(4)', '(6)'], {}), '(4, 6)\n', (1495, 1501), True, 'import numpy as np\n'), ((412, 438), 'strategy.EpsilonGreedyStrategy', 'EpsilonGreedyStrategy', (['(1.0)'], {}), '(1.0)\n', (433, 438), False, 'from strategy import EpsilonGreedyStrategy\n'), ((904, 930), 'strategy.EpsilonGreedyStrategy', 'EpsilonGreedyStrategy', (['(1.0)'], {}), '(1.0)\n', (925, 930), False, 'from strategy import EpsilonGreedyStrategy\n'), ((998, 1018), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1016, 1018), False, 'from tempfile import TemporaryDirectory\n'), ((1042, 1051), 'pathlib.Path', 'Path', (['dir'], {}), '(dir)\n', (1046, 1051), False, 'from pathlib import Path\n'), ((1272, 1291), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (1287, 1291), True, 'import numpy as np\n')] |
# Test the GalSim interface to a PixelMapCollection
from __future__ import print_function
import pixmappy
import time
import numpy as np
import os
import galsim
def test_basic():
"""Test basic operation of the GalSimWCS class """
# Check that building a GalSimWCS builds successfully and has some useful attributes.
input_dir = 'input'
yaml_file = 'test.astro'
exp = 375294
ccdnum = 14
ccdname = 'S15'
t0 = time.time()
wcs = pixmappy.GalSimWCS(yaml_file=yaml_file, dir=input_dir, exp=exp, ccdnum=ccdnum)
t1 = time.time() - t0
print('wcs = ',wcs)
print('wcs.wcs_name = ',wcs.wcs_name)
print('time to load = ',t1)
assert wcs.wcs_name == 'D%s/%s'%(exp,ccdname)
t0 = time.time()
wcs2 = pixmappy.GalSimWCS(yaml_file=yaml_file, dir=input_dir, exp=252223, ccdnum=11)
t2 = time.time() - t0
print('wcs2 = ',wcs2)
print('time to load = ',t2)
assert t2 < 0.1 # This should be fast since already in cache
assert wcs2.wcs_name == 'D252223/S23'
# Check that invalid initializations raise the appropriate errors
np.testing.assert_raises(TypeError, pixmappy.GalSimWCS, yaml_file=yaml_file, pmc=wcs.pmc,
wcs_name=wcs.wcs_name)
np.testing.assert_raises(TypeError, pixmappy.GalSimWCS, wcs_name=wcs.wcs_name)
np.testing.assert_raises(TypeError, pixmappy.GalSimWCS, pmc=wcs.pmc,
exp=exp, ccdnum=ccdnum, wcs_name=wcs.wcs_name)
np.testing.assert_raises(TypeError, pixmappy.GalSimWCS, pmc=wcs.pmc)
np.testing.assert_raises(IOError, pixmappy.GalSimWCS, yaml_file=yaml_file)
np.testing.assert_raises(IOError, pixmappy.GalSimWCS, yaml_file=yaml_file,
wcs_name=wcs.wcs_name)
np.testing.assert_raises(IOError, pixmappy.GalSimWCS, yaml_file=yaml_file, dir='foo',
wcs_name=wcs.wcs_name)
np.testing.assert_raises(IOError, pixmappy.GalSimWCS, yaml_file='foo', dir=input_dir,
wcs_name=wcs.wcs_name)
def test_tpv():
"""Test that reading a tpv file is equivalent to a regular TPV FITS wcs"""
yaml_file = os.path.join('input','tpv.yaml')
wcs1 = pixmappy.GalSimWCS(yaml_file=yaml_file, wcs_name='testwcs')
fits_name = os.path.join('input','tpv.fits')
wcs2 = galsim.FitsWCS(fits_name)
coords = [ (1322.1, 857.2), (1,1), (0,0), (943.234, 234.943), (2048, 2048) ]
for coord in coords:
print('coord = ',coord)
pos = galsim.PositionD(*coord)
sky1 = wcs1.toWorld(pos)
sky2 = wcs2.toWorld(pos)
print(' GalSimWCS: ',sky1)
print(' FitsWCS: ',sky2)
np.testing.assert_allclose(sky1.ra.rad, sky2.ra.rad, rtol=1.e-8)
np.testing.assert_allclose(sky1.dec.rad, sky2.dec.rad, rtol=1.e-8)
pos1 = wcs1.toImage(sky1)
print(' Back to image: ',pos1)
np.testing.assert_allclose(pos1.x, pos.x, rtol=1.e-6, atol=1.e-8)
np.testing.assert_allclose(pos1.y, pos.y, rtol=1.e-6, atol=1.e-8)
jac1 = wcs1.jacobian(pos).getMatrix()
jac2 = wcs2.jacobian(pos).getMatrix()
jac3 = wcs1._wcs.jacobian(coord[0], coord[1], step=0.0001) * 3600. # degrees -> arcsec
jac3[0,:] *= -1 # Different definition of +x
np.testing.assert_allclose(jac1, jac2, rtol=1.e-8, atol=1.e-8)
# This one isn't particularly close, because Gary defined his Jacobian on the tangent plane
# (wherever the projection point is) rather than locally on the tangent plane centered
# at the object itself.
np.testing.assert_allclose(jac1, jac3, atol=1.e-3)
# Now do all the coords at once
xy = np.array(coords)
ra1, dec1 = wcs1._radec(xy[:,0], xy[:,1])
ra2, dec2 = wcs2._radec(xy[:,0], xy[:,1])
np.testing.assert_allclose(ra1, ra2, rtol=1.e-8)
np.testing.assert_allclose(dec1, dec2, rtol=1.e-8)
x1, y1 = wcs1._xy(ra1, dec1)
np.testing.assert_allclose(x1, xy[:,0], rtol=1.e-6, atol=1.e-8)
np.testing.assert_allclose(y1, xy[:,1], rtol=1.e-6, atol=1.e-8)
def test_complex():
"""Test a complex PMC file against some reference values"""
wcs = pixmappy.GalSimWCS(yaml_file=os.path.join('input', 'complex_wcs.astro'), wcs_name='TEST/N1')
ref = np.genfromtxt(os.path.join('input', 'complex_wcs.results'), names=True)
for row in ref:
print(row)
x = row['xpix']
y = row['ypix']
c = row['color']
pos = galsim.PositionD(x,y)
sky = wcs.toWorld(pos, color=c)
ra = sky.ra / galsim.degrees
dec = sky.dec / galsim.degrees
np.testing.assert_allclose(ra, row['RA'], rtol=1.e-6)
np.testing.assert_allclose(dec, row['Dec'], rtol=1.e-6)
pos1 = wcs.toImage(sky, color=c)
np.testing.assert_allclose(pos1.x, pos.x, rtol=1.e-6, atol=1.e-8)
np.testing.assert_allclose(pos1.y, pos.y, rtol=1.e-6, atol=1.e-8)
jac1 = wcs.jacobian(pos, color=c).getMatrix()
jac2 = wcs._wcs.jacobian(x, y, step=0.0001, c=c) * 3600. # degrees -> arcsec
jac2[0,:] *= -1 # Different definition of +x
np.testing.assert_allclose(jac1, jac2, atol=1.e-2)
# This WCS requires a color term. Raises an exception if you don't provide it.
np.testing.assert_raises(Exception, wcs.toWorld, pos)
def test_cache():
"""Test the caching features of GalSimWCS"""
class MockGalSimWCS(pixmappy.GalSimWCS):
# Everything is the same, but we have our own cache dict so we don't have to
# worry about messing up other tests that may be using the cache.
cache = dict()
wcs = MockGalSimWCS(yaml_file='input/test.astro', wcs_name='D231890/N1')
assert len(wcs.cache) == 1
assert 'input/test.astro' in wcs.cache
wcs2 = MockGalSimWCS(yaml_file='input/test.astro', wcs_name='D469524/S13')
assert len(wcs2.cache) == 1
wcs3 = MockGalSimWCS(yaml_file='input/tpv.yaml', wcs_name='testwcs')
assert len(wcs3.cache) == 2
assert 'input/test.astro' in wcs.cache
assert 'input/tpv.yaml' in wcs.cache
wcs4 = MockGalSimWCS(yaml_file='input/complex_wcs.astro', wcs_name='TEST/N1', cache=False)
assert len(wcs3.cache) == 2
assert 'input/complex_wcs.astro' not in wcs.cache
wcs.clear_cache()
assert len(MockGalSimWCS.cache) == 0
# Can also call as a class method:
MockGalSimWCS.clear_cache()
assert len(MockGalSimWCS.cache) == 0
def test_sky():
"""Test using the GalSimWCS to fill an image with constant surface brightness from the sky
"""
import time
import cProfile, pstats
pr = cProfile.Profile()
sky_level = 177
im = galsim.Image(2048, 4096)
wcs = pixmappy.GalSimWCS(dir='input', yaml_file='test.astro', exp=375294, ccdnum=14)
print('wcs = ',wcs)
pr.enable()
t0 = time.time()
wcs.makeSkyImage(im, sky_level)
t1 = time.time()
pr.disable()
ps = pstats.Stats(pr).sort_stats('time')
#ps = pstats.Stats(pr).sort_stats('cumtime')
ps.print_stats(20)
print('made sky in %f sec'%(t1-t0))
im.write('output/sky.fits')
for x,y in [ (im.bounds.xmin, im.bounds.ymin),
(im.bounds.xmax, im.bounds.ymin),
(im.bounds.xmin, im.bounds.ymax),
(im.bounds.xmax, im.bounds.ymax),
(im.center.x, im.center.y) ]:
val = im(x,y)
area = wcs.pixelArea(galsim.PositionD(x,y))
np.testing.assert_almost_equal(val/(area*sky_level), 1., 6,
"Sky image at %d,%d is wrong"%(x,y))
print('min = ',im.array.min())
print('max = ',im.array.max())
print('mean = ',im.array.mean())
print('nominal level = ',sky_level * 0.265**2)
# For this particular exp/ccdnum, and this sky value, these are the min/max/mean values
np.testing.assert_almost_equal(im.array.min(), 11.972244, 5)
np.testing.assert_almost_equal(im.array.max(), 12.506965, 5)
np.testing.assert_almost_equal(im.array.mean(), 12.243378, 5)
def test_repr():
"""Test some things like repr, pickling, etc. to make sure they are reasonable.
In particular, the naive pickle string used to be extremely long.
"""
try:
import cPickle as pickle
except ImportError:
import pickle
wcs = pixmappy.GalSimWCS(dir='input', yaml_file='test.astro', exp=375294, ccdnum=14)
wcs_str = str(wcs)
wcs_repr = repr(wcs)
wcs_pkl = pickle.dumps(wcs)
print('str(wcs) = ',wcs_str)
print('repr(wcs) = ',wcs_repr)
assert eval(wcs_repr) == wcs
print('pickle.dumps(wcs) has len = ',len(wcs_pkl))
assert len(wcs_pkl) < 1.e5
# For informational purposes to see where all the length is happening.
# Mostly (now that _pmc is gone) in the tree-ring template.
print('dict = ',wcs.__dict__.keys())
for k in wcs.__dict__:
print('wcs.%s has len %d'%(k, len(pickle.dumps(wcs.__dict__[k]))))
for k in wcs._wcs.__dict__:
print('wcs._wcs.%s has len %d'%(k, len(pickle.dumps(wcs._wcs.__dict__[k]))))
for k in wcs._wcs.pmap.__dict__:
print('wcs._wcs.pmap.%s has len %d'%(k, len(pickle.dumps(wcs._wcs.pmap.__dict__[k]))))
for k in wcs._wcs.pmap.elements:
print('wcs._wcs.pmap.elements.%s has len %d'%(k, len(pickle.dumps(k))))
for k in wcs._wcs.pmap.elements[0].__dict__:
print('wcs._wcs.pmap.elements[0].%s has len %d'%(k, len(pickle.dumps(wcs._wcs.pmap.elements[0].__dict__[k]))))
for k in wcs._wcs.pmap.elements[0].elements:
print('wcs._wcs.pmap.elements[0].elements.%s has len %d'%(k, len(pickle.dumps(k))))
for k in wcs._wcs.pmap.elements[0].elements[0].__dict__:
print('wcs._wcs.pmap.elements[0].elements[0].%s has len %d'%(k, len(pickle.dumps(wcs._wcs.pmap.elements[0].elements[0].__dict__[k]))))
assert pickle.loads(wcs_pkl) == wcs
def test_config():
"""Test using pixmappy as a valid wcs type in a GalSim config file.
"""
config = {
"modules" : [ "pixmappy" ],
"image" : {
"wcs" : {
"type": "Pixmappy",
"dir": "input",
"yaml_file": "test.astro",
"exp": 375294,
"ccdnum": 14,
}
},
}
galsim.config.ImportModules(config)
wcs1 = galsim.config.BuildWCS(config["image"], "wcs", config)
wcs2 = pixmappy.GalSimWCS(yaml_file='test.astro', dir='input', exp=375294, ccdnum=14)
print('wcs1 = ',wcs1)
print('wcs2 = ',wcs2)
assert wcs1 == wcs2
if __name__ == '__main__':
test_basic()
test_tpv()
test_complex()
test_cache()
test_sky()
test_repr()
test_config()
| [
"pixmappy.GalSimWCS",
"galsim.config.ImportModules",
"galsim.config.BuildWCS",
"pickle.dumps",
"numpy.testing.assert_allclose",
"os.path.join",
"numpy.testing.assert_raises",
"galsim.PositionD",
"numpy.array",
"numpy.testing.assert_almost_equal",
"pstats.Stats",
"galsim.Image",
"pickle.loads... | [((442, 453), 'time.time', 'time.time', ([], {}), '()\n', (451, 453), False, 'import time\n'), ((464, 542), 'pixmappy.GalSimWCS', 'pixmappy.GalSimWCS', ([], {'yaml_file': 'yaml_file', 'dir': 'input_dir', 'exp': 'exp', 'ccdnum': 'ccdnum'}), '(yaml_file=yaml_file, dir=input_dir, exp=exp, ccdnum=ccdnum)\n', (482, 542), False, 'import pixmappy\n'), ((728, 739), 'time.time', 'time.time', ([], {}), '()\n', (737, 739), False, 'import time\n'), ((751, 828), 'pixmappy.GalSimWCS', 'pixmappy.GalSimWCS', ([], {'yaml_file': 'yaml_file', 'dir': 'input_dir', 'exp': '(252223)', 'ccdnum': '(11)'}), '(yaml_file=yaml_file, dir=input_dir, exp=252223, ccdnum=11)\n', (769, 828), False, 'import pixmappy\n'), ((1096, 1212), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['TypeError', 'pixmappy.GalSimWCS'], {'yaml_file': 'yaml_file', 'pmc': 'wcs.pmc', 'wcs_name': 'wcs.wcs_name'}), '(TypeError, pixmappy.GalSimWCS, yaml_file=yaml_file,\n pmc=wcs.pmc, wcs_name=wcs.wcs_name)\n', (1120, 1212), True, 'import numpy as np\n'), ((1242, 1320), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['TypeError', 'pixmappy.GalSimWCS'], {'wcs_name': 'wcs.wcs_name'}), '(TypeError, pixmappy.GalSimWCS, wcs_name=wcs.wcs_name)\n', (1266, 1320), True, 'import numpy as np\n'), ((1325, 1445), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['TypeError', 'pixmappy.GalSimWCS'], {'pmc': 'wcs.pmc', 'exp': 'exp', 'ccdnum': 'ccdnum', 'wcs_name': 'wcs.wcs_name'}), '(TypeError, pixmappy.GalSimWCS, pmc=wcs.pmc, exp=\n exp, ccdnum=ccdnum, wcs_name=wcs.wcs_name)\n', (1349, 1445), True, 'import numpy as np\n'), ((1474, 1542), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['TypeError', 'pixmappy.GalSimWCS'], {'pmc': 'wcs.pmc'}), '(TypeError, pixmappy.GalSimWCS, pmc=wcs.pmc)\n', (1498, 1542), True, 'import numpy as np\n'), ((1547, 1621), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['IOError', 'pixmappy.GalSimWCS'], {'yaml_file': 'yaml_file'}), '(IOError, pixmappy.GalSimWCS, yaml_file=yaml_file)\n', (1571, 1621), True, 'import numpy as np\n'), ((1626, 1727), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['IOError', 'pixmappy.GalSimWCS'], {'yaml_file': 'yaml_file', 'wcs_name': 'wcs.wcs_name'}), '(IOError, pixmappy.GalSimWCS, yaml_file=yaml_file,\n wcs_name=wcs.wcs_name)\n', (1650, 1727), True, 'import numpy as np\n'), ((1757, 1869), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['IOError', 'pixmappy.GalSimWCS'], {'yaml_file': 'yaml_file', 'dir': '"""foo"""', 'wcs_name': 'wcs.wcs_name'}), "(IOError, pixmappy.GalSimWCS, yaml_file=yaml_file,\n dir='foo', wcs_name=wcs.wcs_name)\n", (1781, 1869), True, 'import numpy as np\n'), ((1899, 2012), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['IOError', 'pixmappy.GalSimWCS'], {'yaml_file': '"""foo"""', 'dir': 'input_dir', 'wcs_name': 'wcs.wcs_name'}), "(IOError, pixmappy.GalSimWCS, yaml_file='foo', dir=\n input_dir, wcs_name=wcs.wcs_name)\n", (1923, 2012), True, 'import numpy as np\n'), ((2151, 2184), 'os.path.join', 'os.path.join', (['"""input"""', '"""tpv.yaml"""'], {}), "('input', 'tpv.yaml')\n", (2163, 2184), False, 'import os\n'), ((2195, 2254), 'pixmappy.GalSimWCS', 'pixmappy.GalSimWCS', ([], {'yaml_file': 'yaml_file', 'wcs_name': '"""testwcs"""'}), "(yaml_file=yaml_file, wcs_name='testwcs')\n", (2213, 2254), False, 'import pixmappy\n'), ((2271, 2304), 'os.path.join', 'os.path.join', (['"""input"""', '"""tpv.fits"""'], {}), "('input', 'tpv.fits')\n", (2283, 2304), False, 'import os\n'), ((2315, 2340), 'galsim.FitsWCS', 'galsim.FitsWCS', (['fits_name'], {}), '(fits_name)\n', (2329, 2340), False, 'import galsim\n'), ((3673, 3689), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (3681, 3689), True, 'import numpy as np\n'), ((3786, 3834), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ra1', 'ra2'], {'rtol': '(1e-08)'}), '(ra1, ra2, rtol=1e-08)\n', (3812, 3834), True, 'import numpy as np\n'), ((3839, 3889), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dec1', 'dec2'], {'rtol': '(1e-08)'}), '(dec1, dec2, rtol=1e-08)\n', (3865, 3889), True, 'import numpy as np\n'), ((3928, 3992), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['x1', 'xy[:, 0]'], {'rtol': '(1e-06)', 'atol': '(1e-08)'}), '(x1, xy[:, 0], rtol=1e-06, atol=1e-08)\n', (3954, 3992), True, 'import numpy as np\n'), ((3996, 4060), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['y1', 'xy[:, 1]'], {'rtol': '(1e-06)', 'atol': '(1e-08)'}), '(y1, xy[:, 1], rtol=1e-06, atol=1e-08)\n', (4022, 4060), True, 'import numpy as np\n'), ((5257, 5310), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['Exception', 'wcs.toWorld', 'pos'], {}), '(Exception, wcs.toWorld, pos)\n', (5281, 5310), True, 'import numpy as np\n'), ((6588, 6606), 'cProfile.Profile', 'cProfile.Profile', ([], {}), '()\n', (6604, 6606), False, 'import cProfile, pstats\n'), ((6637, 6661), 'galsim.Image', 'galsim.Image', (['(2048)', '(4096)'], {}), '(2048, 4096)\n', (6649, 6661), False, 'import galsim\n'), ((6672, 6750), 'pixmappy.GalSimWCS', 'pixmappy.GalSimWCS', ([], {'dir': '"""input"""', 'yaml_file': '"""test.astro"""', 'exp': '(375294)', 'ccdnum': '(14)'}), "(dir='input', yaml_file='test.astro', exp=375294, ccdnum=14)\n", (6690, 6750), False, 'import pixmappy\n'), ((6801, 6812), 'time.time', 'time.time', ([], {}), '()\n', (6810, 6812), False, 'import time\n'), ((6858, 6869), 'time.time', 'time.time', ([], {}), '()\n', (6867, 6869), False, 'import time\n'), ((8274, 8352), 'pixmappy.GalSimWCS', 'pixmappy.GalSimWCS', ([], {'dir': '"""input"""', 'yaml_file': '"""test.astro"""', 'exp': '(375294)', 'ccdnum': '(14)'}), "(dir='input', yaml_file='test.astro', exp=375294, ccdnum=14)\n", (8292, 8352), False, 'import pixmappy\n'), ((8416, 8433), 'pickle.dumps', 'pickle.dumps', (['wcs'], {}), '(wcs)\n', (8428, 8433), False, 'import pickle\n'), ((10230, 10265), 'galsim.config.ImportModules', 'galsim.config.ImportModules', (['config'], {}), '(config)\n', (10257, 10265), False, 'import galsim\n'), ((10277, 10331), 'galsim.config.BuildWCS', 'galsim.config.BuildWCS', (["config['image']", '"""wcs"""', 'config'], {}), "(config['image'], 'wcs', config)\n", (10299, 10331), False, 'import galsim\n'), ((10343, 10421), 'pixmappy.GalSimWCS', 'pixmappy.GalSimWCS', ([], {'yaml_file': '"""test.astro"""', 'dir': '"""input"""', 'exp': '(375294)', 'ccdnum': '(14)'}), "(yaml_file='test.astro', dir='input', exp=375294, ccdnum=14)\n", (10361, 10421), False, 'import pixmappy\n'), ((552, 563), 'time.time', 'time.time', ([], {}), '()\n', (561, 563), False, 'import time\n'), ((838, 849), 'time.time', 'time.time', ([], {}), '()\n', (847, 849), False, 'import time\n'), ((2494, 2518), 'galsim.PositionD', 'galsim.PositionD', (['*coord'], {}), '(*coord)\n', (2510, 2518), False, 'import galsim\n'), ((2663, 2727), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sky1.ra.rad', 'sky2.ra.rad'], {'rtol': '(1e-08)'}), '(sky1.ra.rad, sky2.ra.rad, rtol=1e-08)\n', (2689, 2727), True, 'import numpy as np\n'), ((2736, 2802), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sky1.dec.rad', 'sky2.dec.rad'], {'rtol': '(1e-08)'}), '(sky1.dec.rad, sky2.dec.rad, rtol=1e-08)\n', (2762, 2802), True, 'import numpy as np\n'), ((2887, 2952), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pos1.x', 'pos.x'], {'rtol': '(1e-06)', 'atol': '(1e-08)'}), '(pos1.x, pos.x, rtol=1e-06, atol=1e-08)\n', (2913, 2952), True, 'import numpy as np\n'), ((2961, 3026), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pos1.y', 'pos.y'], {'rtol': '(1e-06)', 'atol': '(1e-08)'}), '(pos1.y, pos.y, rtol=1e-06, atol=1e-08)\n', (2987, 3026), True, 'import numpy as np\n'), ((3278, 3340), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac1', 'jac2'], {'rtol': '(1e-08)', 'atol': '(1e-08)'}), '(jac1, jac2, rtol=1e-08, atol=1e-08)\n', (3304, 3340), True, 'import numpy as np\n'), ((3576, 3626), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac1', 'jac3'], {'atol': '(0.001)'}), '(jac1, jac3, atol=0.001)\n', (3602, 3626), True, 'import numpy as np\n'), ((4274, 4318), 'os.path.join', 'os.path.join', (['"""input"""', '"""complex_wcs.results"""'], {}), "('input', 'complex_wcs.results')\n", (4286, 4318), False, 'import os\n'), ((4459, 4481), 'galsim.PositionD', 'galsim.PositionD', (['x', 'y'], {}), '(x, y)\n', (4475, 4481), False, 'import galsim\n'), ((4605, 4658), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ra', "row['RA']"], {'rtol': '(1e-06)'}), "(ra, row['RA'], rtol=1e-06)\n", (4631, 4658), True, 'import numpy as np\n'), ((4667, 4722), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dec', "row['Dec']"], {'rtol': '(1e-06)'}), "(dec, row['Dec'], rtol=1e-06)\n", (4693, 4722), True, 'import numpy as np\n'), ((4773, 4838), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pos1.x', 'pos.x'], {'rtol': '(1e-06)', 'atol': '(1e-08)'}), '(pos1.x, pos.x, rtol=1e-06, atol=1e-08)\n', (4799, 4838), True, 'import numpy as np\n'), ((4847, 4912), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['pos1.y', 'pos.y'], {'rtol': '(1e-06)', 'atol': '(1e-08)'}), '(pos1.y, pos.y, rtol=1e-06, atol=1e-08)\n', (4873, 4912), True, 'import numpy as np\n'), ((5116, 5165), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['jac1', 'jac2'], {'atol': '(0.01)'}), '(jac1, jac2, atol=0.01)\n', (5142, 5165), True, 'import numpy as np\n'), ((7411, 7520), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(val / (area * sky_level))', '(1.0)', '(6)', "('Sky image at %d,%d is wrong' % (x, y))"], {}), "(val / (area * sky_level), 1.0, 6, \n 'Sky image at %d,%d is wrong' % (x, y))\n", (7441, 7520), True, 'import numpy as np\n'), ((9797, 9818), 'pickle.loads', 'pickle.loads', (['wcs_pkl'], {}), '(wcs_pkl)\n', (9809, 9818), False, 'import pickle\n'), ((4186, 4228), 'os.path.join', 'os.path.join', (['"""input"""', '"""complex_wcs.astro"""'], {}), "('input', 'complex_wcs.astro')\n", (4198, 4228), False, 'import os\n'), ((6897, 6913), 'pstats.Stats', 'pstats.Stats', (['pr'], {}), '(pr)\n', (6909, 6913), False, 'import cProfile, pstats\n'), ((7380, 7402), 'galsim.PositionD', 'galsim.PositionD', (['x', 'y'], {}), '(x, y)\n', (7396, 7402), False, 'import galsim\n'), ((8873, 8902), 'pickle.dumps', 'pickle.dumps', (['wcs.__dict__[k]'], {}), '(wcs.__dict__[k])\n', (8885, 8902), False, 'import pickle\n'), ((8985, 9019), 'pickle.dumps', 'pickle.dumps', (['wcs._wcs.__dict__[k]'], {}), '(wcs._wcs.__dict__[k])\n', (8997, 9019), False, 'import pickle\n'), ((9112, 9151), 'pickle.dumps', 'pickle.dumps', (['wcs._wcs.pmap.__dict__[k]'], {}), '(wcs._wcs.pmap.__dict__[k])\n', (9124, 9151), False, 'import pickle\n'), ((9253, 9268), 'pickle.dumps', 'pickle.dumps', (['k'], {}), '(k)\n', (9265, 9268), False, 'import pickle\n'), ((9385, 9436), 'pickle.dumps', 'pickle.dumps', (['wcs._wcs.pmap.elements[0].__dict__[k]'], {}), '(wcs._wcs.pmap.elements[0].__dict__[k])\n', (9397, 9436), False, 'import pickle\n'), ((9562, 9577), 'pickle.dumps', 'pickle.dumps', (['k'], {}), '(k)\n', (9574, 9577), False, 'import pickle\n'), ((9718, 9781), 'pickle.dumps', 'pickle.dumps', (['wcs._wcs.pmap.elements[0].elements[0].__dict__[k]'], {}), '(wcs._wcs.pmap.elements[0].elements[0].__dict__[k])\n', (9730, 9781), False, 'import pickle\n')] |
#%% First
import numpy as np
import json
import os
from numpy.lib.type_check import _asfarray_dispatcher
import pandas as pd
import requests
from contextlib import closing
import time
from datetime import datetime
import seaborn as sns
from matplotlib import pyplot as plt
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# %% Define Functions
def is_good_response_json(resp):
"""
Returns True if the response seems to be HTML, False otherwise.
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('json') > -1)
def simple_request(url):
"""
Tries to use requests as an api call. Checks response to make sure it is good.
Returns the response if good call, returns error if not.
"""
try:
with requests.get(url) as resp:
if is_good_response_json(resp):
return resp
else:
return None
except RequestException as e:
print('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def get_all_logs(guild_info, api_key):
link = "https://www.warcraftlogs.com:443/v1/reports/guild/" + \
guild_info['guild_name'] + "/" + guild_info['realm'] + "/" + \
guild_info['region'] + "?api_key=" + api_key
guild_logs = requests.get(link)
# if guild_logs.status_code != 200:
# raise Exception('Invalid guild info. Name: '+guild_info['guild_name'] + \
# ', Realm: '+guild_info['realm'] + \
# ', Region: '+guild_info['region'])
log_list = guild_logs.json()
fight_link = 'https://www.warcraftlogs.com:443/v1/report/fights/'
pull_df = []
sl_release_ms = datetime.fromisoformat('2020-11-20').timestamp()*1000
sanctum_release_ms = datetime.fromisoformat('2021-06-05').timestamp()*1000
past_day = 0
fight_starts_ms = []
for k, single_log in enumerate(log_list):
if single_log['start'] < sl_release_ms:
break
if single_log['start'] > sanctum_release_ms:
continue
date = datetime.fromtimestamp(single_log['start']/1000)
log_start_ms = single_log['start']
fight_id = single_log['id']
log = simple_request(fight_link + fight_id + '?api_key=' + api_key)
# time.sleep(.25)
if log:
log = log.json()
log_day = datetime.fromtimestamp(log['start']/1000).day
if log_day != past_day:
log_day = past_day
# fight_starts_ms = []
if 'fights' in log.keys():
for fight in log['fights']:
if fight['boss'] == 0 or fight['difficulty'] != 5 or \
(len(fight_starts_ms)>0 and \
np.min(abs(np.array(fight_starts_ms) - (log_start_ms+fight['start_time'])))<(30*1000)):
continue
else:
fight_starts_ms.append(log_start_ms+fight['start_time'])
if fight['boss'] != 0:
pull_df.append({'name': fight['name'],
'kill': fight['kill'],
'end_perc': fight['bossPercentage']/100,
'zoneDifficulty': fight['difficulty'],
'start_time': log_start_ms+fight['start_time'],
'end_time': log_start_ms+fight['end_time']})
if k % 25 == 0:
print(k)
return pd.DataFrame(pull_df)
def dump_to_json(df, guild_info, prog):
json_pulls = df.to_json()
if prog == 1:
print('Dumping Json to: '+guild_info['guild_name']+'_prog_pulls.json')
with open(guild_info['guild_name']+'_prog_pulls.json', 'w', encoding = 'utf-8') as f:
json.dump(json_pulls, f, ensure_ascii=False, indent = 4)
else:
print('Dumping Json to: '+guild_info['guild_name']+'_prog_pulls.json')
with open(guild_info['guild_name']+'_pulls.json', 'w', encoding = 'utf-8') as f:
json.dump(json_pulls, f, ensure_ascii=False, indent = 4)
def add_boss_nums(df):
boss_nums = [5, 3, 2, 6, 1, 10, 8, 9, 4, 7]
boss_names = [
'Shriekwing', \
'<NAME>',
'<NAME>', \
"Sun King's Salvation",
"<NAME>", \
'<NAME>', \
'The Council of Blood', \
'Sludgefist', \
'Stone Legion Generals', \
'<NAME>']
for k, item in enumerate(boss_names):
df.loc[df.index[df['name'] == item],'boss_num'] = k
return df
def get_prog_pulls(df, boss_name):
# if type(df.iloc[0]['start_time']) != 'int':
# df['start_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['start_time']]
# df['end_time'] = [time.mktime(x.to_pydatetime().timetuple()) for x in df['end_time']]
kills_df = df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('kill == True')
if len(kills_df['kill'])>0:
first_kill_time = min(kills_df['start_time'])
else:
first_kill_time = min(df.query('name == "'+boss_name+'"')['start_time'])
# return df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5').query('start_time <= '+str(first_kill_time))
return df.query('name == "'+boss_name+'"').query('zoneDifficulty == 5')
def add_pull_num(df):
df = df.sort_values(by = ['start_time'])
df.insert(loc = 0, column = 'pull_num', value = np.arange(len(df))+1)
return df
def combine_boss_df(df):
boss_names = [
'Shriekwing', \
'<NAME>',
'<NAME>', \
"Sun King's Salvation",
"<NAME>", \
'<NAME>', \
'The Council of Blood', \
'Sludgefist', \
'Stone Legion Generals', \
'<NAME>']
only_prog = pd.DataFrame()
for k, boss_name in enumerate(np.unique(df['name'])):
if boss_name in boss_names and boss_name in np.unique(df['name']):
only_prog = only_prog.append(add_pull_num(get_prog_pulls(df.copy(deep = True), boss_name)))
return only_prog
# Open guild list
with open('../get_guild_list/guild_list_hungering.json', encoding='utf-8') as f:
guilds = json.load(f)
# %% Setup the SQL Stuff
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = '<PASSWORD>'
engine = create_engine('postgresql://postgres:postgres@localhost:5432/nathria_prog')
conn = psycopg2.connect('host='+server+' dbname='+database+' user='+username+' password='+password)
curs = conn.cursor()
curs.execute('select * from "nathria_prog";')
temp_df = pd.DataFrame(curs.fetchall())
temp_df.columns = [desc[0] for desc in curs.description]
np.unique(temp_df['guild_name'])
curs.execute("select exists(select * from information_schema.tables where table_name=%s)",\
('nathria_prog_allpulls',))
if curs.fetchone()[0]:
# curs.execute("select * from nathria_prog_allpulls where guild_name = '"+str(guild_name)+"'")
curs.execute('select distinct guild_name from nathria_prog_allpulls')
already_added_guilds = [item[0] for item in curs.fetchall()]
already_added_length = len(already_added_guilds)
else:
already_added_guilds = []
already_added_length = 0
# if len(already_added_guilds) == 1:
# print('Guild: '+str(guild_name)+' already in SQL table. Continuing...')
# return None
# curs.execute('select distinct guild_name from nathria_prog_allpulls')
# already_added_guilds = [item[0] for item in curs.fetchall()]
# already_added_length = len(already_added_guilds)
# %% Get new data.
# DC is guild 725
# for guild_num in np.arange(len(guilds)):
# guild_num = 1075
for guild_num in np.arange(1,len(guilds)):
guild_info = {'guild_name': guilds[guild_num]['name'],
'realm': guilds[guild_num]['realm'].replace(' ','-').replace("'",''),
'region': guilds[guild_num]['region']}
if not guild_info['guild_name'] in already_added_guilds:
print('Pulling data from '+guild_info['guild_name']+'. Number '+str(guild_num)+'.')
with open('..//..//Warcraftlogs//api_key.txt.') as f:
api_key = f.readlines()[0]
try:
pulls = get_all_logs(guild_info = guild_info, api_key = api_key)
except:
continue
if len(pulls) != 0:
pulls['boss_num'] = np.zeros(len(pulls))
pulls = add_boss_nums(pulls)
prog_pulls = combine_boss_df(pulls.copy(deep = True))
prog_pulls['guild_name'] = guild_info['guild_name']
prog_pulls['realm'] = guild_info['realm']
prog_pulls['region'] = guild_info['region']
prog_pulls['guild_num'] = guild_num
assadfasdf
# if not guild_info['guild_name'] in np.unique(already_added_guilds):
print('Adding guild '+guild_info['guild_name']+' to nathria_prog postgressql table.')
prog_pulls.to_sql('nathria_prog_allpulls', engine, if_exists='append')
# curs.execute('select distinct guild_name from nathria_prog')
# pull_length = len([item[0] for item in curs.fetchall()])
# if already_added_length == pull_length:
# break
# else:
# already_added_length = pull_length
# except:
# print("Couldn't pull Name: "+guild_i6+52
# nfo['guild_name'] + \
# ', Realm: '+guild_info['realm'] + \
# ', Region: '+guild_info['region'])
#%% Filling in with 0's
fdsa
from sqlalchemy import create_engine
import psycopg2
server = 'localhost'
database = 'nathria_prog'
username = 'postgres'
password = '<PASSWORD>'
if 'conn' in locals(): C | [
"psycopg2.connect",
"datetime.datetime.fromtimestamp",
"numpy.unique",
"pandas.DataFrame",
"sqlalchemy.create_engine",
"requests.get",
"os.chdir",
"os.path.dirname",
"json.load",
"numpy.array",
"datetime.datetime.fromisoformat",
"os.path.abspath",
"json.dump"
] | [((284, 309), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (299, 309), False, 'import os\n'), ((318, 342), 'os.path.dirname', 'os.path.dirname', (['abspath'], {}), '(abspath)\n', (333, 342), False, 'import os\n'), ((343, 358), 'os.chdir', 'os.chdir', (['dname'], {}), '(dname)\n', (351, 358), False, 'import os\n'), ((6593, 6668), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://postgres:postgres@localhost:5432/nathria_prog"""'], {}), "('postgresql://postgres:postgres@localhost:5432/nathria_prog')\n", (6606, 6668), False, 'from sqlalchemy import create_engine\n'), ((6676, 6786), 'psycopg2.connect', 'psycopg2.connect', (["('host=' + server + ' dbname=' + database + ' user=' + username +\n ' password=' + password)"], {}), "('host=' + server + ' dbname=' + database + ' user=' +\n username + ' password=' + password)\n", (6692, 6786), False, 'import psycopg2\n'), ((6933, 6965), 'numpy.unique', 'np.unique', (["temp_df['guild_name']"], {}), "(temp_df['guild_name'])\n", (6942, 6965), True, 'import numpy as np\n'), ((1428, 1446), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (1440, 1446), False, 'import requests\n'), ((3717, 3738), 'pandas.DataFrame', 'pd.DataFrame', (['pull_df'], {}), '(pull_df)\n', (3729, 3738), True, 'import pandas as pd\n'), ((6012, 6026), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6024, 6026), True, 'import pandas as pd\n'), ((6398, 6410), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6407, 6410), False, 'import json\n'), ((2212, 2262), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(single_log['start'] / 1000)"], {}), "(single_log['start'] / 1000)\n", (2234, 2262), False, 'from datetime import datetime\n'), ((6061, 6082), 'numpy.unique', 'np.unique', (["df['name']"], {}), "(df['name'])\n", (6070, 6082), True, 'import numpy as np\n'), ((893, 910), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (905, 910), False, 'import requests\n'), ((4017, 4071), 'json.dump', 'json.dump', (['json_pulls', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(json_pulls, f, ensure_ascii=False, indent=4)\n', (4026, 4071), False, 'import json\n'), ((4268, 4322), 'json.dump', 'json.dump', (['json_pulls', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(json_pulls, f, ensure_ascii=False, indent=4)\n', (4277, 4322), False, 'import json\n'), ((1835, 1871), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2020-11-20"""'], {}), "('2020-11-20')\n", (1857, 1871), False, 'from datetime import datetime\n'), ((1914, 1950), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2021-06-05"""'], {}), "('2021-06-05')\n", (1936, 1950), False, 'from datetime import datetime\n'), ((2510, 2553), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (["(log['start'] / 1000)"], {}), "(log['start'] / 1000)\n", (2532, 2553), False, 'from datetime import datetime\n'), ((6137, 6158), 'numpy.unique', 'np.unique', (["df['name']"], {}), "(df['name'])\n", (6146, 6158), True, 'import numpy as np\n'), ((2913, 2938), 'numpy.array', 'np.array', (['fight_starts_ms'], {}), '(fight_starts_ms)\n', (2921, 2938), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import math
from glob import glob
import cv2
import random
import copy
import numpy as np
import imageio
from skimage import measure
import logging
import subprocess as sp
ROOT = os.path.abspath(os.path.dirname(__file__))
DATA_DIR = os.path.join(ROOT, 'data')
class Sample:
def __init__ (self, folder):
path = None
for p in glob(os.path.join(folder, "images/*")):
assert ".png" in p
assert path is None
path = p
pass
image = cv2.imread(path, -1) #cv2.IMREAD_COLOR)
if len(image.shape) == 2:
# Sanity check
# In case of gray image data in private phase
# case this to fail remove the assertion
assert False
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
else:
assert len(image.shape) == 3
if image.shape[2] == 4:
assert (image[:, :, 3] == 255).all()
image = image[:, :, :3]
pass
assert image.shape[2] == 3
self.image = image
mask_shape = image.shape[:2]
masks = []
for path in glob(os.path.join(folder, "masks/*")):
assert ".png" in path
mask = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
assert mask.shape == mask_shape
mask = np.clip(mask, 0, 1)
masks.append(mask)
self.masks = masks
pass
def mask_sum (self, dilate = None):
v = np.zeros(self.image.shape[:2], dtype=np.float)
for mask in self.masks:
if dilate is None:
v += mask
elif dilate > 0:
v += cv2.dilate(mask, (dilate, dilate))
elif dilate < 0:
# !!! check
v += cv2.erode(mask, (-dilate, -dilate))
return v
try:
os.remove('dsb2018.db')
except:
pass
db = picpac.Writer('dsb2018.db')
logging.warn("Importing images into PicPac database...")
for folder in glob(os.path.join(DATA_DIR, "stage1_train/*")):
print(folder)
sample = Sample(folder)
sample.stat()
image_buffer = cv2.imencode('.png', sample.image)[1].tostring()
mask = np.clip(sample.mask_sum(), 0, 1).astype(np.uint8)
mask_buffer = cv2.imencode('.png', mask)[1].tostring()
db.append(image_buffer, mask_buffer)
del db
#print(folder)
sp.check_call('./fcn-train.py --db dsb2018.db --model dsb2018.model --annotate image')
| [
"numpy.clip",
"logging.warn",
"cv2.imencode",
"subprocess.check_call",
"cv2.erode",
"os.path.join",
"os.path.dirname",
"numpy.zeros",
"cv2.cvtColor",
"cv2.dilate",
"cv2.imread",
"os.remove"
] | [((392, 418), 'os.path.join', 'os.path.join', (['ROOT', '"""data"""'], {}), "(ROOT, 'data')\n", (404, 418), False, 'import os\n'), ((2078, 2134), 'logging.warn', 'logging.warn', (['"""Importing images into PicPac database..."""'], {}), "('Importing images into PicPac database...')\n", (2090, 2134), False, 'import logging\n'), ((2514, 2605), 'subprocess.check_call', 'sp.check_call', (['"""./fcn-train.py --db dsb2018.db --model dsb2018.model --annotate image"""'], {}), "(\n './fcn-train.py --db dsb2018.db --model dsb2018.model --annotate image')\n", (2527, 2605), True, 'import subprocess as sp\n'), ((354, 379), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (369, 379), False, 'import os\n'), ((2004, 2027), 'os.remove', 'os.remove', (['"""dsb2018.db"""'], {}), "('dsb2018.db')\n", (2013, 2027), False, 'import os\n'), ((2154, 2194), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""stage1_train/*"""'], {}), "(DATA_DIR, 'stage1_train/*')\n", (2166, 2194), False, 'import os\n'), ((661, 681), 'cv2.imread', 'cv2.imread', (['path', '(-1)'], {}), '(path, -1)\n', (671, 681), False, 'import cv2\n'), ((1641, 1687), 'numpy.zeros', 'np.zeros', (['self.image.shape[:2]'], {'dtype': 'np.float'}), '(self.image.shape[:2], dtype=np.float)\n', (1649, 1687), True, 'import numpy as np\n'), ((509, 541), 'os.path.join', 'os.path.join', (['folder', '"""images/*"""'], {}), "(folder, 'images/*')\n", (521, 541), False, 'import os\n'), ((918, 957), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_GRAY2BGR'], {}), '(image, cv2.COLOR_GRAY2BGR)\n', (930, 957), False, 'import cv2\n'), ((1308, 1339), 'os.path.join', 'os.path.join', (['folder', '"""masks/*"""'], {}), "(folder, 'masks/*')\n", (1320, 1339), False, 'import os\n'), ((1395, 1433), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (1405, 1433), False, 'import cv2\n'), ((1497, 1516), 'numpy.clip', 'np.clip', (['mask', '(0)', '(1)'], {}), '(mask, 0, 1)\n', (1504, 1516), True, 'import numpy as np\n'), ((2280, 2314), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'sample.image'], {}), "('.png', sample.image)\n", (2292, 2314), False, 'import cv2\n'), ((2409, 2435), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'mask'], {}), "('.png', mask)\n", (2421, 2435), False, 'import cv2\n'), ((1827, 1861), 'cv2.dilate', 'cv2.dilate', (['mask', '(dilate, dilate)'], {}), '(mask, (dilate, dilate))\n', (1837, 1861), False, 'import cv2\n'), ((1940, 1975), 'cv2.erode', 'cv2.erode', (['mask', '(-dilate, -dilate)'], {}), '(mask, (-dilate, -dilate))\n', (1949, 1975), False, 'import cv2\n')] |
import pyqtgraph as pg
import numpy as np
x = np.arange(1000)
y = np.random.normal(size=(3, 1000))
plotWidget = pg.plot(title="Three plot curves")
for i in range(3):
plotWidget.plot(x, y[i], pen=(i,3)) ## setting pen=(i,3) automaticaly creates three different-colored pens
| [
"numpy.random.normal",
"pyqtgraph.plot",
"numpy.arange"
] | [((46, 61), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (55, 61), True, 'import numpy as np\n'), ((66, 98), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3, 1000)'}), '(size=(3, 1000))\n', (82, 98), True, 'import numpy as np\n'), ((112, 146), 'pyqtgraph.plot', 'pg.plot', ([], {'title': '"""Three plot curves"""'}), "(title='Three plot curves')\n", (119, 146), True, 'import pyqtgraph as pg\n')] |
from easydict import EasyDict as edict
import numpy as np
import torch.nn as nn
__C = edict()
cfg = __C
### Define config flags here
### Some flags are dummy, would be removed later
### Name of the config
__C.TAG = 'default'
### Training and validation
__C.GT_DEPTH_DIR = None
__C.TRAIN_SIZE = [256,512]
__C.VAL_SIZE = [370,1224]
__C.MIN_DEPTH = 1.0
__C.FLIP_AUG = False
__C.EIGEN_SFM = False
__C.ZOOM_INPUT = False
__C.SAVE_POSE = False
__C.MILESTONES = [2,5,8]
__C.TRAIN_FLOW = False
__C.STORED_POSE = False
__C.NORM_TARGET = 0.8
__C.PRED_POSE_ONLINE = True
### Deep PSNet, used as our depth estimation module
__C.PSNET_CONTEXT = True
__C.PSNET_DEP_CONTEXT = False
### RANSAC
__C.POSE_EST = 'RANSAC'
__C.ransac_iter = 5
__C.ransac_threshold = 1e-4
__C.min_matches = 20
### Deep Pose regression, for ablation study
__C.POSE_NET_TYPE = 'plain'
__C.POSE_DOWN_FEAT = 128
__C.POSENET_FLOW = False
__C.POSENET_ENTRO= False
__C.POSE_WITH_BN = True
###
__C.GENERATE_DEMON_POSE_OR_DEPTH = False
__C.ALL_VELO_RAW = False
__C.NO_MASK = False
__C.NO_SIFT = False
__C.TRUNC_SOFT = False
__C.KITTI_697 = True
__C.RANDOM_FW_BW = False
__C.RANDOM_OFFSET = False
__C.FILTERED_PAIR = True
__C.COST_BY_COLOR = False
__C.COST_BY_COLOR_WITH_FEAT = False
__C.PREDICT_BY_DEPTH = False
__C.NOT_CROP_VAL = False
__C.FILTER_OUT_RGBD = False
__C.KITTI_RAW_DATASET = False
__C.FILTER_DEMON_DATASET = False
__C.FILTER_DEMON_DATASET_FT = False
__C.FLOW_MASK = False
__C.GENERATE_DEMON_POSE_TO_SAVE = False
__C.DEMON_GENERATED_IDX = 0
__C.GENERATE_KITTI_POSE_TO_SAVE = False
__C.DEMON_DATASET = False
__C.DEMON_DATASET_SPE = 'None'
__C.FLOW_SPLIT_TRAIN = False
__C.SEQ_LEN = 5
__C.RESCALE_DEPTH = False
__C.RESCALE_DEPTH_REMASK = False
__C.REL_ABS_LOSS = False
__C.MIN_TRAIN_SCALE = 0.2
__C.MAX_TRAIN_SCALE = 3.0
__C.POSE_SEQ = [9]
__C.PRED_POSE_GT_SCALE = False
__C.RECORD_POSE = False
__C.RECORD_POSE_EVAL = False
__C.PRED_POSE_VAL_ONLINE = False
__C.CHECK_WRONG_POSE = False
__C.CONTEXT_BN = False
__C.FIX_DEPTH = False
__C.SUP_INIT = True
__C.IND_CONTEXT = False
__C.POSE_AWARE_MAX = False
__C.VALIDATE_FW = False
__C.MIXED_PREC = False
__C.NO_SMOOTH = True
__C.FLOW_EST = 'DICL'
__C.DEPTH_EST = 'PSNET'
__C.SCALE_MIN = 0.9
__C.SCALE_MAX = 1.1
__C.SCALE_STEP = 0.025
__C.FLOW_AND_JOINT_LOSS = False
__C.POSE_AWARE_MEAN = False
__C.SKIP = 1
__C.GT_POSE = False
__C.GT_POSE_NORMALIZED = False
__C.FLOW_POSE = True
__C.FLOW_BY_SIFT = False
__C.SIFT_POSE = False
__C.FLOW_CONF = -1.0
__C.SAMPLE_SP = False
####################################################################################
### Configs for DICL Flow
### To Be Removed Soon
__C.MAX_DISP = [[6,6],[6,6],[6,6]]
__C.MIN_DISP = [[-6,-6],[-6,-6],[-6,-6]]
__C.SOFT2D = True
__C.FLOWAGG = True
__C.COST_TOGETHER = False
__C.RANDOM_TRANS = 10
__C.DOWN_FEAT = False
__C.SPARSE_RESIZE = True
__C.KITTI_REMOVE130 = False
__C.SMOOTH_BY_TEMP = False
__C.CORR_BY_COS = False
__C.CLAMP_INPUT = True
__C.MIN_SCALE = 128
__C.UP_TO_RAW_SCALE = False
__C.KITTI_NO_VALID = False
__C.RAW_SINTEL_RATIO = 5
__C.USE_PCA_AUG = False
__C.SHALLOW_DOWN_SMALL = False
__C.BASIC_WITH_LEAKYRELU = False
__C.RAFT_RESIZE_CV2 = True
__C.MATCH_WITHDIS = False
__C.PAD_BY_CONS = False
__C.PAD_CONS = -1
__C.RAW_THING = False
__C.asymmetric_color_aug = False
__C.WEIGHT_DECAY = 0.0
__C.UPCONV = True
__C.DETACH_FUSION = False
__C.USE_CONTEXT6 = True
__C.USE_CONTEXT5 = True
__C.USE_SUBLOSS = False
__C.SUBLOSS_W = 0.001
__C.SHALLOW_SHARE = False
__C.SHALLOW_Down = False
__C.WITH_DIFF = False
__C.REMOVE_WARP_HOLE = True
__C.CONC_KITTI = False
__C.DROP_LAST = True
__C.TRUNCATED = False
__C.TRUNCATED_SIZE = 3
__C.CORRECT_ENTRO = False
__C.CORRECT_ENTRO_SOFT = False
__C.USE_SEQ_LOSS = False
__C.COST6_RATIO = 1.0
__C.COST5_RATIO = 1.0
__C.COST4_RATIO = 1.0
__C.COST3_RATIO = 1.0
__C.COST2_RATIO = 1.0
__C.SMOOTH_COST = False
__C.SMOOTH_LOSS = False
__C.SMOOTH_LOSS_WEIGHT = 0.1
__C.SMOOTH_SHARE = False
__C.SMOOTH_INIT_BY_ID = False
__C.FLOW_REG_BY_MAX = True
__C.SMOOTH_COST_ONLY_FLOW6 = False
__C.SMOOTH_COST_WITH_THREEMLP = False
__C.SCALE_BY_MASK = False
__C.DISTRIBUTED = False
__C.NO_SPATIAL = False
__C.NO_ERASE = False
__C.HALF_THINGS = False
__C.FIX_MATCHING = False
__C.MATCHING_USE_BN = False
__C.MATCHING_USE_RELU = False
__C.USE_CORR = False
__C.TIMES_RATIO = False
__C.VALID_RANGE = [[8,8],
[32,32],
[64,64],
[128,128]]
__C.USE_VALID_RANGE = True
__C.USE_FUSION = False
__C.FULL_SINTEL = True
__C.DETACH_FLOW = True
__C.COST_COMP_METHOD = 'compute_cost_vcn_together'
__C.LOSS_TYPE = 'L1'
__C.MultiScale_W = [1.,0.5,0.25]
__C.CROP_SIZE = [256,256]
__C.FEATURE_NET = 'SPP'
__C.MATCH_INPUTC = [128,64,64]
__C.SEATCH_RANGE = [8,12,8]
__C.AUG_BY_ROT = False
__C.DILATED_LLL = False
__C.FAC = 1.0
__C.MD = [4,4,4,4,4]
__C.SEP_LEVEL = 1
__C.ADD_FEATURE = False
__C.CTF = False
__C.CTF_CONTEXT = False
__C.CTF_CONTEXT_ONLY_FLOW2 = False
__C.REFINE = 1
__C.REFINE_DETACH = False
__C.SHARE_MATCHING = False
__C.SHARE_MATCHING_MLP = False
__C.COS_LR = False
__C.COS_TMAX = 20
__C.PSP_FEATURE = False
__C.NO_DECONV = False
__C.USE_RAW_FLOW2 = False
__C.SUP_RAW_FLOW = False
__C.SCALE_CONTEXT6 = 1.0
__C.SCALE_CONTEXT5 = 1.0
__C.SCALE_CONTEXT4 = 1.0
__C.SCALE_CONTEXT3 = 1.0
__C.SCALE_CONTEXT2 = 1.0
##########################################################################################
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]), type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(type(value), type(d[subkey]))
d[subkey] = value
def save_config_to_file(cfg, pre='cfg', logger=None):
for key, val in cfg.items():
if isinstance(cfg[key], edict):
if logger is not None:
logger.info('\n%s.%s = edict()' % (pre, key))
else:
print('\n%s.%s = edict()' % (pre, key))
save_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger)
continue
if logger is not None:
logger.info('%s.%s: %s' % (pre, key, val))
else:
print('%s.%s: %s' % (pre, key, val))
| [
"ast.literal_eval",
"easydict.EasyDict",
"numpy.array",
"yaml.load"
] | [((87, 94), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (92, 94), True, 'from easydict import EasyDict as edict\n'), ((5708, 5720), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (5717, 5720), False, 'import yaml\n'), ((7209, 7224), 'ast.literal_eval', 'literal_eval', (['v'], {}), '(v)\n', (7221, 7224), False, 'from ast import literal_eval\n'), ((6300, 6329), 'numpy.array', 'np.array', (['v'], {'dtype': 'b[k].dtype'}), '(v, dtype=b[k].dtype)\n', (6308, 6329), True, 'import numpy as np\n')] |
#!/usr/bin/env python3.5
import time
e = time.time()
import sys
debug = False
fileWrite = True
if fileWrite:
fWPath = "processed/" + sys.argv[1] + "-processed.jpg"
displayProcessed = False
import cv2
import numpy as np
import pickle
if debug:
print ("imports: " + str(format(time.time() - e, '.5f')))
start = time.time()
serialFile = "../pickle.txt"
H, S, L, R, G, B = "H", "S", "L", "R", "G", "B" # I hate typing quotes
l, u = "l", "u" # Lower & Upper
cc = {H: {l: 50, u: 93},
S: {l: 25, u: 255},
L: {l: 34, u: 149},
R: {l: 64, u: 212},
G: {l: 206, u: 255},
B: {l: 126, u: 255}}
# print (cc[H][l], cc[S][l], cc[L][l])
# print (cc[H][u], cc[S][u], cc[L][u])
# print (cc[R][l], cc[G][l], cc[B][l])
# print (cc[R][u], cc[G][u], cc[B][u])
# a = threshHSL(srcImg, [cc[H][l], cc[S][l], cc[L][l]],
# [cc[H][u], cc[S][u], cc[L][u]]) # HSL thresh lower/upper
# if debug:
# print ("HSL: " + str(format(time.time() - start, '.5f')))
# start = time.time()
# b = threshRGB(srcImg, [cc[R][l], cc[G][l], cc[B][l]],
# [cc[R][u], cc[G][u], cc[B][u]]) # RGB lower/upper
# Note: System arguments should take the form of an IP address of the video
# capture feed
# srcImg = cv2.VideoCapture() # Define srcImg as image/video capture
#
# if len(sys.argv) != 2:
# print("Error: specify an URL to connect to")
# exit(0)
#
# url = sys.argv[1]
#
# srcImg.open("http://1192.168.127.12:8080/stream.wmv")
# ret, frameImg = srcImg.read() # Test
# imgY, imgX, imgChannels = frameImg.shape
srcImg = cv2.imread("/home/solomon/frc/the-deal/pythonCV/RealFullField/" +
sys.argv[1] + ".jpg", 1)
# print (srcImg.shape)
if debug:
print ("Read image: " + str(format(time.time() - start, '.5f')))
start = time.time()
def percentFromResolution(srcImg, yTargetRes, xTargetRes):
imgY, imgX, imgChannels = srcImg.shape
modPercentX = float(xTargetRes) / imgX
modPercentY = float(yTargetRes) / imgY
return [modPercentY, modPercentX]
def imgScale(toScale, percentX, percentY):
scaledImg = cv2.resize(toScale, None, fx=percentX, fy=percentY,
interpolation=cv2.INTER_CUBIC) # MaybeTry INTER_AREA
return scaledImg
def threshHSL(imgSrc, lower, upper):
"""Returns binary mask of image based on HSL bounds"""
imgSrcHLS = cv2.cvtColor(imgSrc, cv2.COLOR_BGR2HLS)
npLower = np.array([lower[0], lower[2], lower[1]]) # Compesate for HLSvsHSL
npUpper = np.array([upper[0], upper[2], upper[1]])
tmp = cv2.inRange(imgSrcHLS, npLower, npUpper)
return tmp
def threshRGB(imgSrc, lower, upper):
"""Returns binary mask of image based on RGB bounds"""
imgSrcRGB = cv2.cvtColor(imgSrc, cv2.COLOR_BGR2RGB)
npLower = np.array([lower[0], lower[1], lower[2]])
npUpper = np.array([upper[0], upper[1], upper[2]])
tmp = cv2.inRange(imgSrcRGB, npLower, npUpper)
return tmp
def cvAdd(img1, img2):
"""Returns addition of 2 images"""
tmp = cv2.add(img1, img2)
return tmp
def findContours(img):
"""Finds contours in image, preferably binary image"""
ret, contours, hierarchy = \
cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy
if debug:
print ("function defs: " + str(format(time.time() - start, '.5f')))
start = time.time()
# srcImg = imgScale(srcImg, percentFromResolution(srcImg, 240, 320)[0],
# percentFromResolution(srcImg, 240, 320)[1])
multiplier = 1
srcImg = imgScale(srcImg, percentFromResolution(srcImg,
srcImg.shape[0]*multiplier,
srcImg.shape[1]*multiplier)[0],
percentFromResolution(srcImg,
srcImg.shape[0]*multiplier,
srcImg.shape[1]*multiplier)[1])
# srcImg = cv2.resize(srcImg, None, fx=.5, fy=.5, interpolation=cv2.INTER_CUBIC)
if debug:
print ("Scale: " + str(format(time.time() - start, '.5f')))
start = time.time()
srcImg = cv2.GaussianBlur(srcImg, (5, 5), 5)
if debug:
print ("Blur: " + str(format(time.time() - start, '.5f')))
start = time.time()
a = threshHSL(srcImg, [cc[H][l], cc[S][l], cc[L][l]],
[cc[H][u], cc[S][u], cc[L][u]]) # HSL thresh lower/upper
if debug:
print ("HSL: " + str(format(time.time() - start, '.5f')))
start = time.time()
b = threshRGB(srcImg, [cc[R][l], cc[G][l], cc[B][l]],
[cc[R][u], cc[G][u], cc[B][u]]) # RGB lower/upper
if debug:
print ("RGB: " + str(format(time.time() - start, '.5f')))
start = time.time()
c = cvAdd(a, b)
if debug:
print ("Add: " + str(format(time.time() - start, '.5f')))
start = time.time()
d = c
contours, hiearchy = findContours(d)
if debug:
print ("Contours: " + str(format(time.time() - start, '.5f')))
start = time.time()
tmpVar = 0
# while len(contours) > 1: # this inefficient mess finds the biggest contour
# # (I think)
# for z in range(0, len(contours)):
# try:
# if cv2.contourArea(contours[z]) <= tmpVar:
# contours.pop(z)
# except IndexError:
# break
# # print (str(tmpVar) + ": " + str(len(contours)) + ": " + str(z))
# tmpVar += 1
#
# if debug:
# print ("Found biggest: " + str(format(time.time() - start, '.5f')))
# start = time.time()
# for x in contours:
# print (cv2.contourArea(x))
# print("\n")
contoursSorted = sorted(contours,
key=lambda x: cv2.contourArea(x), reverse=True)
# print (contours[0])
# print (contoursSorted)
contours = contoursSorted[0:5]
if debug:
print ("Found biggest w/ better algorithm: " + str(format(time.time() -
start, '.5f')))
start = time.time()
# rect = cv2.minAreaRect(contours[0])
# box = cv2.cv.BoxPoints(rect)
# box = np.int0(box)
# cv2.drawContours(srcImg, [box], 0, (0, 255, 0), 2)
#
# rows, cols = srcImg.shape[:2]
# [vx, vy, x, y] = cv2.fitLine(contours[0], cv2.cv.CV_DIST_L2, 0, 0.01, 0.01)
# lefty = int((-x*vy/vx) + y)
# righty = int(((cols-x)*vy/vx)+y)
# cv2.line(srcImg, (cols-1, righty), (0, lefty), (255, 0, 0), 2)
hull = cv2.convexHull(contours[0], returnPoints=True)
if debug:
print ("Convex hull: " + str(format(time.time() - start, '.5f')))
start = time.time()
(count, _, _) = hull.shape
hull.ravel()
hull.shape = (count, 2)
tmpVar = 0
itera = 0
maxIter = 256
iii = len(cv2.approxPolyDP(hull, tmpVar, True))
while iii != 4:
if iii > 4:
tmpVar += 1
elif iii < 4:
tmpVar -= 1
itera += 1
if itera >= maxIter:
break
iii = len(cv2.approxPolyDP(hull, tmpVar, True))
approx = cv2.approxPolyDP(hull, tmpVar, True)
if debug:
print ("Found quadrangle: " + str(format(time.time() - start, '.5f')))
start = time.time()
# if debug:
cv2.drawContours(srcImg, contours, -1, (0, 0, 255), 1)
cv2.polylines(srcImg, np.int32([hull]), True, (0, 255, 0), 1)
cv2.drawContours(srcImg, approx, -1, (0, 255, 0), 3)
for x in range(0, len(approx)):
# print (x)
# print (approx[x][0][0])
cv2.putText(srcImg,
" " + str(x) + ": (" + str(approx[x][0][0]) +
", " + str(approx[x][0][1]) + ")",
(approx[x][0][0], approx[x][0][1]),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 255), 1)
if debug:
print ("Drew image: " + str(format(time.time() - start, '.5f')))
start = time.time()
def imgUntilQ(srcImg):
cv2.imshow('e', srcImg)
while True:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if debug:
print ("Wrote image: " + str(format(time.time() - start, '.5f')))
start = time.time()
if fileWrite:
cv2.imwrite(fWPath, srcImg)
# Starting to calculate stuff for NT publishing.
# Items to be published:
# Center of box/contour (maybe avg them)
# 4 points
# Slopes of angles of sides of box
# Box height
# Box width
# Planned output:
# [center, (p1, p2, p3, p4), (Mp1, Mp2, Mp3, Mp4), (height, width)]
p1, p2, p3, p4 = [approx[0][0][0], approx[0][0][1]], \
[approx[1][0][0], approx[1][0][1]], \
[approx[2][0][0], approx[2][0][1]], \
[approx[3][0][0], approx[3][0][1]]
xSize = 0
ySize = 0
pointArr = [p1, p2, p3, p4]
leftPoints = sorted(pointArr)[:2]
rightPoints = sorted(pointArr)[2:]
topPoints = sorted(sorted(pointArr, key=lambda x: x[1])[:2])
bottomPoints = sorted(sorted(pointArr, key=lambda x: x[1])[2:])
xSize = sorted(pointArr)[-1][0] - sorted(pointArr)[0][0]
ySize = sorted(pointArr, key=lambda x: x[1], reverse=True)[0][1] - \
sorted(pointArr, key=lambda x: x[1])[0][1]
approxMoments = cv2.moments(approx)
contourMoments = cv2.moments(contours[0])
approxCentroidY = int(approxMoments['m01']/approxMoments['m00'])
approxCentroidX = int(approxMoments['m10']/approxMoments['m00'])
cv2.circle(srcImg, (approxCentroidX, approxCentroidY), 5, (255, 0, 255))
# print (p1, p2, p3, p4)
leftSlope, rightSlope, topSlope, bottomSlope = \
format((leftPoints[1][1] - leftPoints[0][1]) /
float(leftPoints[1][0] - leftPoints[0][0]), '.2f'),\
format((rightPoints[1][1] - rightPoints[0][1]) /
float(rightPoints[1][0] - rightPoints[0][0]), '.2f'),\
format((topPoints[1][1] - topPoints[0][1]) /
float(topPoints[1][0] - topPoints[0][0]), '.2f'),\
format((bottomPoints[1][1] - bottomPoints[0][1]) /
float(bottomPoints[1][0] - bottomPoints[0][0]), '.2f')
# print (leftPoints[1][1], leftPoints[0][1])
# print (leftPoints[1][0], leftPoints[0][0])
# print (leftSlope, rightSlope, topSlope, bottomSlope)
finalDict = {}
finalDict["approxCentroidX"] = int(approxCentroidX)
finalDict["approxCentroidY"] = int(approxCentroidY)
finalDict["xSize"] = int(xSize)
finalDict["ySize"] = int(ySize)
finalDict["p1"] = (int(p1[0]), int(p1[1]))
finalDict["p2"] = (int(p2[0]), int(p2[1]))
finalDict["p3"] = (int(p3[0]), int(p3[1]))
finalDict["p4"] = (int(p4[0]), int(p4[1]))
finalDict["leftSlope"] = float(leftSlope)
finalDict["rightSlope"] = float(rightSlope)
finalDict["topSlope"] = float(topSlope)
finalDict["bottomSlope"] = float(bottomSlope)
# print (str(leftSlope) + ", " + str(rightSlope) + ", " + str(topSlope) + ", " +
# str(bottomSlope))
# Side slopes
if debug:
print ("Made dict: " + str(format(time.time() - start, '.5f')))
start = time.time()
with open(serialFile, 'wb') as j:
# pickle.dump(finalList, j)
pickle.dump(finalDict, j, 2)
if debug:
print ("Dumped pickle: " + str(format(time.time() - start, '.5f')))
start = time.time()
print ("Total time: " + str(time.time() - e))
if displayProcessed:
imgUntilQ(srcImg)
| [
"numpy.int32",
"cv2.imshow",
"numpy.array",
"cv2.approxPolyDP",
"cv2.destroyAllWindows",
"cv2.contourArea",
"cv2.waitKey",
"cv2.add",
"cv2.drawContours",
"cv2.circle",
"cv2.moments",
"cv2.cvtColor",
"cv2.resize",
"cv2.GaussianBlur",
"time.time",
"cv2.imread",
"cv2.convexHull",
"cv2... | [((41, 52), 'time.time', 'time.time', ([], {}), '()\n', (50, 52), False, 'import time\n'), ((1569, 1664), 'cv2.imread', 'cv2.imread', (["('/home/solomon/frc/the-deal/pythonCV/RealFullField/' + sys.argv[1] + '.jpg')", '(1)'], {}), "('/home/solomon/frc/the-deal/pythonCV/RealFullField/' + sys.argv[\n 1] + '.jpg', 1)\n", (1579, 1664), False, 'import cv2\n'), ((4114, 4149), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['srcImg', '(5, 5)', '(5)'], {}), '(srcImg, (5, 5), 5)\n', (4130, 4149), False, 'import cv2\n'), ((6294, 6340), 'cv2.convexHull', 'cv2.convexHull', (['contours[0]'], {'returnPoints': '(True)'}), '(contours[0], returnPoints=True)\n', (6308, 6340), False, 'import cv2\n'), ((6801, 6837), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', 'tmpVar', '(True)'], {}), '(hull, tmpVar, True)\n', (6817, 6837), False, 'import cv2\n'), ((6961, 7015), 'cv2.drawContours', 'cv2.drawContours', (['srcImg', 'contours', '(-1)', '(0, 0, 255)', '(1)'], {}), '(srcImg, contours, -1, (0, 0, 255), 1)\n', (6977, 7015), False, 'import cv2\n'), ((7078, 7130), 'cv2.drawContours', 'cv2.drawContours', (['srcImg', 'approx', '(-1)', '(0, 255, 0)', '(3)'], {}), '(srcImg, approx, -1, (0, 255, 0), 3)\n', (7094, 7130), False, 'import cv2\n'), ((8816, 8835), 'cv2.moments', 'cv2.moments', (['approx'], {}), '(approx)\n', (8827, 8835), False, 'import cv2\n'), ((8853, 8877), 'cv2.moments', 'cv2.moments', (['contours[0]'], {}), '(contours[0])\n', (8864, 8877), False, 'import cv2\n'), ((9008, 9080), 'cv2.circle', 'cv2.circle', (['srcImg', '(approxCentroidX, approxCentroidY)', '(5)', '(255, 0, 255)'], {}), '(srcImg, (approxCentroidX, approxCentroidY), 5, (255, 0, 255))\n', (9018, 9080), False, 'import cv2\n'), ((324, 335), 'time.time', 'time.time', ([], {}), '()\n', (333, 335), False, 'import time\n'), ((1794, 1805), 'time.time', 'time.time', ([], {}), '()\n', (1803, 1805), False, 'import time\n'), ((2095, 2182), 'cv2.resize', 'cv2.resize', (['toScale', 'None'], {'fx': 'percentX', 'fy': 'percentY', 'interpolation': 'cv2.INTER_CUBIC'}), '(toScale, None, fx=percentX, fy=percentY, interpolation=cv2.\n INTER_CUBIC)\n', (2105, 2182), False, 'import cv2\n'), ((2363, 2402), 'cv2.cvtColor', 'cv2.cvtColor', (['imgSrc', 'cv2.COLOR_BGR2HLS'], {}), '(imgSrc, cv2.COLOR_BGR2HLS)\n', (2375, 2402), False, 'import cv2\n'), ((2417, 2457), 'numpy.array', 'np.array', (['[lower[0], lower[2], lower[1]]'], {}), '([lower[0], lower[2], lower[1]])\n', (2425, 2457), True, 'import numpy as np\n'), ((2498, 2538), 'numpy.array', 'np.array', (['[upper[0], upper[2], upper[1]]'], {}), '([upper[0], upper[2], upper[1]])\n', (2506, 2538), True, 'import numpy as np\n'), ((2549, 2589), 'cv2.inRange', 'cv2.inRange', (['imgSrcHLS', 'npLower', 'npUpper'], {}), '(imgSrcHLS, npLower, npUpper)\n', (2560, 2589), False, 'import cv2\n'), ((2719, 2758), 'cv2.cvtColor', 'cv2.cvtColor', (['imgSrc', 'cv2.COLOR_BGR2RGB'], {}), '(imgSrc, cv2.COLOR_BGR2RGB)\n', (2731, 2758), False, 'import cv2\n'), ((2773, 2813), 'numpy.array', 'np.array', (['[lower[0], lower[1], lower[2]]'], {}), '([lower[0], lower[1], lower[2]])\n', (2781, 2813), True, 'import numpy as np\n'), ((2828, 2868), 'numpy.array', 'np.array', (['[upper[0], upper[1], upper[2]]'], {}), '([upper[0], upper[1], upper[2]])\n', (2836, 2868), True, 'import numpy as np\n'), ((2879, 2919), 'cv2.inRange', 'cv2.inRange', (['imgSrcRGB', 'npLower', 'npUpper'], {}), '(imgSrcRGB, npLower, npUpper)\n', (2890, 2919), False, 'import cv2\n'), ((3009, 3028), 'cv2.add', 'cv2.add', (['img1', 'img2'], {}), '(img1, img2)\n', (3016, 3028), False, 'import cv2\n'), ((3169, 3234), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3185, 3234), False, 'import cv2\n'), ((3361, 3372), 'time.time', 'time.time', ([], {}), '()\n', (3370, 3372), False, 'import time\n'), ((4093, 4104), 'time.time', 'time.time', ([], {}), '()\n', (4102, 4104), False, 'import time\n'), ((4235, 4246), 'time.time', 'time.time', ([], {}), '()\n', (4244, 4246), False, 'import time\n'), ((4458, 4469), 'time.time', 'time.time', ([], {}), '()\n', (4467, 4469), False, 'import time\n'), ((4673, 4684), 'time.time', 'time.time', ([], {}), '()\n', (4682, 4684), False, 'import time\n'), ((4785, 4796), 'time.time', 'time.time', ([], {}), '()\n', (4794, 4796), False, 'import time\n'), ((4929, 4940), 'time.time', 'time.time', ([], {}), '()\n', (4938, 4940), False, 'import time\n'), ((5887, 5898), 'time.time', 'time.time', ([], {}), '()\n', (5896, 5898), False, 'import time\n'), ((6433, 6444), 'time.time', 'time.time', ([], {}), '()\n', (6442, 6444), False, 'import time\n'), ((6557, 6593), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', 'tmpVar', '(True)'], {}), '(hull, tmpVar, True)\n', (6573, 6593), False, 'import cv2\n'), ((6936, 6947), 'time.time', 'time.time', ([], {}), '()\n', (6945, 6947), False, 'import time\n'), ((7038, 7054), 'numpy.int32', 'np.int32', (['[hull]'], {}), '([hull])\n', (7046, 7054), True, 'import numpy as np\n'), ((7552, 7563), 'time.time', 'time.time', ([], {}), '()\n', (7561, 7563), False, 'import time\n'), ((7593, 7616), 'cv2.imshow', 'cv2.imshow', (['"""e"""', 'srcImg'], {}), "('e', srcImg)\n", (7603, 7616), False, 'import cv2\n'), ((7701, 7724), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7722, 7724), False, 'import cv2\n'), ((7818, 7829), 'time.time', 'time.time', ([], {}), '()\n', (7827, 7829), False, 'import time\n'), ((7849, 7876), 'cv2.imwrite', 'cv2.imwrite', (['fWPath', 'srcImg'], {}), '(fWPath, srcImg)\n', (7860, 7876), False, 'import cv2\n'), ((10515, 10526), 'time.time', 'time.time', ([], {}), '()\n', (10524, 10526), False, 'import time\n'), ((10598, 10626), 'pickle.dump', 'pickle.dump', (['finalDict', 'j', '(2)'], {}), '(finalDict, j, 2)\n', (10609, 10626), False, 'import pickle\n'), ((10722, 10733), 'time.time', 'time.time', ([], {}), '()\n', (10731, 10733), False, 'import time\n'), ((6753, 6789), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['hull', 'tmpVar', '(True)'], {}), '(hull, tmpVar, True)\n', (6769, 6789), False, 'import cv2\n'), ((5597, 5615), 'cv2.contourArea', 'cv2.contourArea', (['x'], {}), '(x)\n', (5612, 5615), False, 'import cv2\n'), ((7644, 7658), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7655, 7658), False, 'import cv2\n'), ((10766, 10777), 'time.time', 'time.time', ([], {}), '()\n', (10775, 10777), False, 'import time\n'), ((286, 297), 'time.time', 'time.time', ([], {}), '()\n', (295, 297), False, 'import time\n'), ((1752, 1763), 'time.time', 'time.time', ([], {}), '()\n', (1761, 1763), False, 'import time\n'), ((3319, 3330), 'time.time', 'time.time', ([], {}), '()\n', (3328, 3330), False, 'import time\n'), ((4051, 4062), 'time.time', 'time.time', ([], {}), '()\n', (4060, 4062), False, 'import time\n'), ((4193, 4204), 'time.time', 'time.time', ([], {}), '()\n', (4202, 4204), False, 'import time\n'), ((4416, 4427), 'time.time', 'time.time', ([], {}), '()\n', (4425, 4427), False, 'import time\n'), ((4631, 4642), 'time.time', 'time.time', ([], {}), '()\n', (4640, 4642), False, 'import time\n'), ((4743, 4754), 'time.time', 'time.time', ([], {}), '()\n', (4752, 4754), False, 'import time\n'), ((4887, 4898), 'time.time', 'time.time', ([], {}), '()\n', (4896, 4898), False, 'import time\n'), ((5783, 5794), 'time.time', 'time.time', ([], {}), '()\n', (5792, 5794), False, 'import time\n'), ((6391, 6402), 'time.time', 'time.time', ([], {}), '()\n', (6400, 6402), False, 'import time\n'), ((6894, 6905), 'time.time', 'time.time', ([], {}), '()\n', (6903, 6905), False, 'import time\n'), ((7510, 7521), 'time.time', 'time.time', ([], {}), '()\n', (7519, 7521), False, 'import time\n'), ((7776, 7787), 'time.time', 'time.time', ([], {}), '()\n', (7785, 7787), False, 'import time\n'), ((10473, 10484), 'time.time', 'time.time', ([], {}), '()\n', (10482, 10484), False, 'import time\n'), ((10680, 10691), 'time.time', 'time.time', ([], {}), '()\n', (10689, 10691), False, 'import time\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from cleverhans.attacks import Attack
class TestAttackClassInitArguments(unittest.TestCase):
def test_model(self):
import tensorflow as tf
sess = tf.Session()
# Exception is thrown when model does not have __call__ attribute
with self.assertRaises(Exception) as context:
model = tf.placeholder(tf.float32, shape=(None, 10))
Attack(model, back='tf', sess=sess)
self.assertTrue(context.exception)
def test_back(self):
# Define empty model
def model():
return True
# Exception is thrown when back is not tf or th
with self.assertRaises(Exception) as context:
Attack(model, back='test', sess=None)
self.assertTrue(context.exception)
def test_sess(self):
# Define empty model
def model():
return True
# Exception is thrown when no session provided with TF
with self.assertRaises(Exception) as context:
Attack(model, back='tf', sess=None)
self.assertTrue(context.exception)
class TestAttackGenerate(unittest.TestCase):
def test_inf_loop(self):
def model(x):
return x
import numpy as np
import tensorflow as tf
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=(1,))
test_attack = Attack(model, back='tf', sess=sess)
adv_x = test_attack.generate(x)
with self.assertRaises(Exception) as context:
sess.run(adv_x, feed_dict={x: np.asarray(1.0).reshape((1,))})
self.assertTrue(context.exception)
class TestAttackGenerateNp(unittest.TestCase):
def test_inf_loop(self):
def model(x):
return x
import numpy as np
import tensorflow as tf
sess = tf.Session()
x_val = np.zeros((10, 5, 5, 1))
test_attack = Attack(model, back='tf', sess=sess)
with self.assertRaises(Exception) as context:
test_attack.generate_np(x_val)
self.assertTrue(context.exception)
class TestParseParams(unittest.TestCase):
def test_parse(self):
def model():
return True
import tensorflow as tf
sess = tf.Session()
test_attack = Attack(model, back='tf', sess=sess)
self.assertTrue(test_attack.parse_params({}))
if __name__ == '__main__':
unittest.main()
| [
"tensorflow.Session",
"tensorflow.placeholder",
"numpy.asarray",
"numpy.zeros",
"unittest.main",
"cleverhans.attacks.Attack"
] | [((2545, 2560), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2558, 2560), False, 'import unittest\n'), ((335, 347), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (345, 347), True, 'import tensorflow as tf\n'), ((1440, 1452), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1450, 1452), True, 'import tensorflow as tf\n'), ((1465, 1503), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(1,)'}), '(tf.float32, shape=(1,))\n', (1479, 1503), True, 'import tensorflow as tf\n'), ((1527, 1562), 'cleverhans.attacks.Attack', 'Attack', (['model'], {'back': '"""tf"""', 'sess': 'sess'}), "(model, back='tf', sess=sess)\n", (1533, 1562), False, 'from cleverhans.attacks import Attack\n'), ((1971, 1983), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1981, 1983), True, 'import tensorflow as tf\n'), ((2000, 2023), 'numpy.zeros', 'np.zeros', (['(10, 5, 5, 1)'], {}), '((10, 5, 5, 1))\n', (2008, 2023), True, 'import numpy as np\n'), ((2047, 2082), 'cleverhans.attacks.Attack', 'Attack', (['model'], {'back': '"""tf"""', 'sess': 'sess'}), "(model, back='tf', sess=sess)\n", (2053, 2082), False, 'from cleverhans.attacks import Attack\n'), ((2386, 2398), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2396, 2398), True, 'import tensorflow as tf\n'), ((2422, 2457), 'cleverhans.attacks.Attack', 'Attack', (['model'], {'back': '"""tf"""', 'sess': 'sess'}), "(model, back='tf', sess=sess)\n", (2428, 2457), False, 'from cleverhans.attacks import Attack\n'), ((497, 541), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 10)'}), '(tf.float32, shape=(None, 10))\n', (511, 541), True, 'import tensorflow as tf\n'), ((554, 589), 'cleverhans.attacks.Attack', 'Attack', (['model'], {'back': '"""tf"""', 'sess': 'sess'}), "(model, back='tf', sess=sess)\n", (560, 589), False, 'from cleverhans.attacks import Attack\n'), ((856, 893), 'cleverhans.attacks.Attack', 'Attack', (['model'], {'back': '"""test"""', 'sess': 'None'}), "(model, back='test', sess=None)\n", (862, 893), False, 'from cleverhans.attacks import Attack\n'), ((1167, 1202), 'cleverhans.attacks.Attack', 'Attack', (['model'], {'back': '"""tf"""', 'sess': 'None'}), "(model, back='tf', sess=None)\n", (1173, 1202), False, 'from cleverhans.attacks import Attack\n'), ((1700, 1715), 'numpy.asarray', 'np.asarray', (['(1.0)'], {}), '(1.0)\n', (1710, 1715), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
def solver_constrained_newton(f, x0, maxiter=10000, tol=1e-6,
delta_step=0.9999,
max_step=1.0,
print_frequency=None):
delta_step = 0.9999
control_value = 10**-200
x = x0.copy()
for i in range(maxiter):
res, jac = f(x)
if np.max(np.abs(res)) < tol:
break
try:
delta_x = np.linalg.solve(jac, -res)
except np.linalg.LinAlgError:
print("Solver jacobian is singular. Returning value and residual as it is")
return x, res
control_index = np.abs(delta_x) > control_value
x_step = x[control_index]
delta_x_step = delta_x[control_index]
step_ = -delta_step*x_step/delta_x_step*((x_step + delta_x_step) <= 0) + \
max_step*(x_step+delta_x_step > 0)
step = np.min(step_)
x_new = x + step*delta_x
x_new[x_new < control_value] = control_value
if print_frequency is not None:
if (i+1) % print_frequency == 0:
print('------')
print(x)
print(res)
print(i)
print('------')
x = x_new
return x, res
| [
"numpy.abs",
"numpy.linalg.solve",
"numpy.min"
] | [((928, 941), 'numpy.min', 'np.min', (['step_'], {}), '(step_)\n', (934, 941), True, 'import numpy as np\n'), ((468, 494), 'numpy.linalg.solve', 'np.linalg.solve', (['jac', '(-res)'], {}), '(jac, -res)\n', (483, 494), True, 'import numpy as np\n'), ((671, 686), 'numpy.abs', 'np.abs', (['delta_x'], {}), '(delta_x)\n', (677, 686), True, 'import numpy as np\n'), ((395, 406), 'numpy.abs', 'np.abs', (['res'], {}), '(res)\n', (401, 406), True, 'import numpy as np\n')] |
from keras.models import load_model
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
import pickle as pk
import os
from keras.utils import to_categorical ,Sequence
import pandas as pd
from sklearn.metrics import accuracy_score
pd.options.mode.chained_assignment = None # default='warn'
map_dict = pk.load(open('data/map.pkl' , 'rb'))
df = pd.read_csv('data/train_label.csv')
head = list(range(0,41))
head = [str(x) for x in head]
head.insert(0,"fname")
predict_path = 'predict_valid_csv'
predict_path_un = 'predict_unverified_csv'
if not os.path.exists(predict_path):
os.mkdir(predict_path)
if not os.path.exists(predict_path_un):
os.mkdir(predict_path_un)
# predict 10 fold manu validation data
for k in range(1,11):
save_name = 'mike_resnet'
save_name_un = 'mike_resnet_unverified'
model = load_model('resnet_varified/best_{}.h5'.format(k))
X_valid = np.load('data/ten_fold_data/X_valid_{}.npy'.format(k))
Y_valid = np.load('data/ten_fold_data/Y_valid_{}.npy'.format(k))
valid_fame = np.load('data/ten_fold_data/valid_fname_{}.npy'.format(k))
result = model.predict(X_valid , verbose = 1 )
df = pd.DataFrame(result)
df.insert(0, 'fname', valid_fame)
df.to_csv('{}/{}_{}.csv'.format(predict_path,save_name,k), index=False,header=head)
Y_ans = np.argmax(Y_valid, axis=-1)
pred = np.argmax(result, axis=-1)
acc = accuracy_score(Y_ans, pred)
print('\nfold {} accuracy : {}'.format(k ,acc))
# predict unverified
un_X = np.load('data/ten_fold_data/X_unverified.npy')
un_Y = np.load('data/ten_fold_data/Y_unverified.npy')
un_fname = np.load('data/ten_fold_data/fname_unverified.npy')
un_result = model.predict(un_X , verbose = 1 )
df = pd.DataFrame(un_result)
df.insert(0, 'fname', un_fname)
df.to_csv('{}/{}_{}.csv'.format(predict_path_un,save_name_un,k), index=False,header=head)
Y_ans_un = np.argmax(un_Y, axis=-1)
pred = np.argmax(un_result, axis=-1)
acc = accuracy_score(Y_ans_un, pred)
print('\nfold {} _ unvsrified accuracy : {}'.format(k ,acc))
| [
"os.path.exists",
"pandas.read_csv",
"numpy.argmax",
"os.mkdir",
"pandas.DataFrame",
"numpy.load",
"sklearn.metrics.accuracy_score"
] | [((379, 414), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_label.csv"""'], {}), "('data/train_label.csv')\n", (390, 414), True, 'import pandas as pd\n'), ((582, 610), 'os.path.exists', 'os.path.exists', (['predict_path'], {}), '(predict_path)\n', (596, 610), False, 'import os\n'), ((616, 638), 'os.mkdir', 'os.mkdir', (['predict_path'], {}), '(predict_path)\n', (624, 638), False, 'import os\n'), ((647, 678), 'os.path.exists', 'os.path.exists', (['predict_path_un'], {}), '(predict_path_un)\n', (661, 678), False, 'import os\n'), ((684, 709), 'os.mkdir', 'os.mkdir', (['predict_path_un'], {}), '(predict_path_un)\n', (692, 709), False, 'import os\n'), ((1192, 1212), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (1204, 1212), True, 'import pandas as pd\n'), ((1351, 1378), 'numpy.argmax', 'np.argmax', (['Y_valid'], {'axis': '(-1)'}), '(Y_valid, axis=-1)\n', (1360, 1378), True, 'import numpy as np\n'), ((1390, 1416), 'numpy.argmax', 'np.argmax', (['result'], {'axis': '(-1)'}), '(result, axis=-1)\n', (1399, 1416), True, 'import numpy as np\n'), ((1427, 1454), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_ans', 'pred'], {}), '(Y_ans, pred)\n', (1441, 1454), False, 'from sklearn.metrics import accuracy_score\n'), ((1544, 1590), 'numpy.load', 'np.load', (['"""data/ten_fold_data/X_unverified.npy"""'], {}), "('data/ten_fold_data/X_unverified.npy')\n", (1551, 1590), True, 'import numpy as np\n'), ((1602, 1648), 'numpy.load', 'np.load', (['"""data/ten_fold_data/Y_unverified.npy"""'], {}), "('data/ten_fold_data/Y_unverified.npy')\n", (1609, 1648), True, 'import numpy as np\n'), ((1664, 1714), 'numpy.load', 'np.load', (['"""data/ten_fold_data/fname_unverified.npy"""'], {}), "('data/ten_fold_data/fname_unverified.npy')\n", (1671, 1714), True, 'import numpy as np\n'), ((1775, 1798), 'pandas.DataFrame', 'pd.DataFrame', (['un_result'], {}), '(un_result)\n', (1787, 1798), True, 'import pandas as pd\n'), ((1944, 1968), 'numpy.argmax', 'np.argmax', (['un_Y'], {'axis': '(-1)'}), '(un_Y, axis=-1)\n', (1953, 1968), True, 'import numpy as np\n'), ((1980, 2009), 'numpy.argmax', 'np.argmax', (['un_result'], {'axis': '(-1)'}), '(un_result, axis=-1)\n', (1989, 2009), True, 'import numpy as np\n'), ((2020, 2050), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_ans_un', 'pred'], {}), '(Y_ans_un, pred)\n', (2034, 2050), False, 'from sklearn.metrics import accuracy_score\n')] |
"""
Random walker segmentation algorithm
from *Random walks for image segmentation*, <NAME>, IEEE Trans
Pattern Anal Mach Intell. 2006 Nov;28(11):1768-83.
This code is mostly adapted from scikit-image 0.11.3 release.
Location of file in scikit image: random_walker function and its supporting
sub functions in skimage.segmentation
"""
import warnings
import numpy as np
from scipy import sparse, ndimage as ndi
from sklearn.utils import as_float_array
from scipy.sparse.linalg import cg
def _make_graph_edges_3d(n_x, n_y, n_z):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x : integer
The size of the grid in the x direction.
n_y : integer
The size of the grid in the y direction.
n_z : integer
The size of the grid in the z direction.
Returns
-------
edges : (2, N) ndarray
With the total number of edges:
N = n_x * n_y * (nz - 1) +
n_x * (n_y - 1) * nz +
(n_x - 1) * n_y * nz
Graph edges with each column describing a node-id pair.
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_weights_3d(data, spacing, beta=130, eps=1.e-6):
# Weight calculation is main difference in multispectral version
# Original gradient**2 replaced with sum of gradients ** 2
gradients = 0
for channel in range(0, data.shape[-1]):
gradients += _compute_gradients_3d(data[..., channel],
spacing) ** 2
# All channels considered together in this standard deviation
beta /= 10 * data.std()
gradients *= beta
weights = np.exp(- gradients)
weights += eps
return weights
def _compute_gradients_3d(data, spacing):
gr_deep = np.abs(data[:, :, :-1] - data[:, :, 1:]).ravel() / spacing[2]
gr_right = np.abs(data[:, :-1] - data[:, 1:]).ravel() / spacing[1]
gr_down = np.abs(data[:-1] - data[1:]).ravel() / spacing[0]
return np.r_[gr_deep, gr_right, gr_down]
def _make_laplacian_sparse(edges, weights):
"""
Sparse implementation
"""
pixel_nb = edges.max() + 1
diag = np.arange(pixel_nb)
i_indices = np.hstack((edges[0], edges[1]))
j_indices = np.hstack((edges[1], edges[0]))
data = np.hstack((-weights, -weights))
lap = sparse.coo_matrix((data, (i_indices, j_indices)),
shape=(pixel_nb, pixel_nb))
connect = - np.ravel(lap.sum(axis=1))
lap = sparse.coo_matrix(
(np.hstack((data, connect)), (np.hstack((i_indices, diag)),
np.hstack((j_indices, diag)))),
shape=(pixel_nb, pixel_nb))
return lap.tocsr()
def _clean_labels_ar(X, labels):
X = X.astype(labels.dtype)
labels = np.ravel(labels)
labels[labels == 0] = X
return labels
def _buildAB(lap_sparse, labels):
"""
Build the matrix A and rhs B of the linear system to solve.
A and B are two block of the laplacian of the image graph.
"""
labels = labels[labels >= 0]
indices = np.arange(labels.size)
unlabeled_indices = indices[labels == 0]
seeds_indices = indices[labels > 0]
# The following two lines take most of the time in this function
B = lap_sparse[unlabeled_indices][:, seeds_indices]
lap_sparse = lap_sparse[unlabeled_indices][:, unlabeled_indices]
nlabels = labels.max()
rhs = []
for lab in range(1, nlabels + 1):
mask = (labels[seeds_indices] == lab)
fs = sparse.csr_matrix(mask)
fs = fs.transpose()
rhs.append(B * fs)
return lap_sparse, rhs
def _mask_edges_weights(edges, weights, mask):
"""
Remove edges of the graph connected to masked nodes, as well as
corresponding weights of the edges.
"""
mask0 = np.hstack((mask[:, :, :-1].ravel(), mask[:, :-1].ravel(),
mask[:-1].ravel()))
mask1 = np.hstack((mask[:, :, 1:].ravel(), mask[:, 1:].ravel(),
mask[1:].ravel()))
ind_mask = np.logical_and(mask0, mask1)
edges, weights = edges[:, ind_mask], weights[ind_mask]
max_node_index = edges.max()
# Reassign edges labels to 0, 1, ... edges_number - 1
order = np.searchsorted(np.unique(edges.ravel()),
np.arange(max_node_index + 1))
edges = order[edges.astype(np.int64)]
return edges, weights
def _build_laplacian(data, spacing, mask=None, beta=50):
l_x, l_y, l_z = tuple(data.shape[i] for i in range(3))
edges = _make_graph_edges_3d(l_x, l_y, l_z)
weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10)
if mask is not None:
edges, weights = _mask_edges_weights(edges, weights, mask)
lap = _make_laplacian_sparse(edges, weights)
del edges, weights
return lap
def _random_walker(data, labels, beta=130, tol=1.e-3, copy=True, spacing=None):
"""Random walker algorithm for segmentation from markers.
Parameters
----------
data : array_like
Image to be segmented in phases. Data spacing is assumed isotropic unless
the `spacing` keyword argument is used.
labels : array of ints, of same shape as `data` without channels dimension
Array of seed markers labeled with different positive integers
for different phases. Zero-labeled pixels are unlabeled pixels.
Negative labels correspond to inactive pixels that are not taken
into account (they are removed from the graph). If labels are not
consecutive integers, the labels array will be transformed so that
labels are consecutive.
beta : float, optional
Penalization coefficient for the random walker motion
(the greater `beta`, the more difficult the diffusion).
Default=130.
tol : float, optional
Tolerance to achieve when solving the linear system, in
cg' mode. Default=1e-3.
copy : bool, optional
If copy is False, the `labels` array will be overwritten with
the result of the segmentation. Use copy=False if you want to
save on memory. Default=True.
spacing : iterable of floats, optional
Spacing between voxels in each spatial dimension. If `None`, then
the spacing between pixels/voxels in each dimension is assumed 1.
Returns
-------
output : ndarray
An array of ints of same shape as `data`, in which each pixel has
been labeled according to the marker that reached the pixel first
by anisotropic diffusion.
Notes
-----
The `spacing` argument is specifically for anisotropic datasets, where
data points are spaced differently in one or more spatial dimensions.
Anisotropic data is commonly encountered in medical imaging.
The algorithm was first proposed in [1]_.
The algorithm solves the diffusion equation at infinite times for
sources placed on markers of each phase in turn. A pixel is labeled with
the phase that has the greatest probability to diffuse first to the pixel.
The diffusion equation is solved by minimizing x.T L x for each phase,
where L is the Laplacian of the weighted graph of the image, and x is
the probability that a marker of the given phase arrives first at a pixel
by diffusion (x=1 on markers of the phase, x=0 on the other markers, and
the other coefficients are looked for). Each pixel is attributed the label
for which it has a maximal value of x. The Laplacian L of the image
is defined as:
- L_ii = d_i, the number of neighbors of pixel i (the degree of i)
- L_ij = -w_ij if i and j are adjacent pixels
The weight w_ij is a decreasing function of the norm of the local gradient.
This ensures that diffusion is easier between pixels of similar values.
When the Laplacian is decomposed into blocks of marked and unmarked
pixels::
L = M B.T
B A
with first indices corresponding to marked pixels, and then to unmarked
pixels, minimizing x.T L x for one phase amount to solving::
A x = - B x_m
where x_m = 1 on markers of the given phase, and 0 on other markers.
This linear system is solved in the algorithm using a direct method for
small images, and an iterative method for larger images.
References
----------
.. [1] Random walks for image segmentation, <NAME>y, IEEE Trans Pattern
Anal Mach Intell. 2006 Nov;28(11):1768-83.
"""
out_labels = np.copy(labels)
if (labels != 0).all():
warnings.warn('Random walker only segments unlabeled areas, where '
'labels == 0. No zero valued areas in labels were '
'found. Returning provided labels.')
return out_labels
if (labels == 0).all():
warnings.warn('Random walker received no seed label. Returning provided labels.')
return out_labels
# We take multichannel as always False since we are not strictly using
# for image processing as such with RGB values.
multichannel = False
if not multichannel:
if data.ndim < 2 or data.ndim > 3:
raise ValueError('For non-multichannel input, data must be of '
'dimension 2 or 3.')
dims = data.shape # To reshape final labeled result
data = np.atleast_3d(as_float_array(data))[..., np.newaxis]
# Spacing kwarg checks
if spacing is None:
spacing = np.asarray((1.,) * 3)
elif len(spacing) == len(dims):
if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim
spacing = np.r_[spacing, 1.]
else: # Convert to array
spacing = np.asarray(spacing)
else:
raise ValueError('Input argument `spacing` incorrect, should be an '
'iterable with one number per spatial dimension.')
if copy:
labels = np.copy(labels)
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 0
labels[mask] = np.searchsorted(np.unique(labels[mask]),
labels[mask]).astype(labels.dtype)
labels = labels.astype(np.int32)
# If the array has pruned zones, we can have two problematic situations:
# - isolated zero-labeled pixels that cannot be determined because they
# are not connected to any seed.
# - isolated seeds, that is pixels with labels > 0 in connected components
# without any zero-labeled pixel to determine. This causes errors when
# computing the Laplacian of the graph.
# For both cases, the problematic pixels are ignored (label is set to -1).
if np.any(labels < 0):
# Handle the isolated zero-labeled pixels first
filled = ndi.binary_propagation(labels > 0, mask=labels >= 0)
labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1
del filled
# Handle the isolated seeds
filled = ndi.binary_propagation(labels == 0, mask=labels >= 0)
isolated = np.logical_and(labels > 0, np.logical_not(filled))
labels[isolated] = -1
del filled
# If the operations above yield only -1 pixels
if (labels == -1).all():
warnings.warn('Random walker only segments unlabeled areas, where '
'labels == 0. Data provided only contains isolated seeds '
'and isolated pixels. Returning provided labels.')
return out_labels
labels = np.atleast_3d(labels)
if np.any(labels < 0):
lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0, beta=beta)
else:
lap_sparse = _build_laplacian(data, spacing, beta=beta)
lap_sparse, B = _buildAB(lap_sparse, labels)
# We solve the linear system
# lap_sparse X = B
# where X[i, j] is the probability that a marker of label i arrives
# first at pixel j by anisotropic diffusion.
X = _solve_cg(lap_sparse, B, tol=tol)
# Clean up results
X = _clean_labels_ar(X + 1, labels).reshape(dims)
return X
def _solve_cg(lap_sparse, B, tol):
"""
Solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method. For each pixel, the label i corresponding to the
maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
X = []
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol)[0]
X.append(x0)
X = np.array(X)
X = np.argmax(X, axis=0)
return X
| [
"numpy.hstack",
"numpy.logical_not",
"numpy.array",
"numpy.arange",
"numpy.asarray",
"numpy.diff",
"numpy.exp",
"scipy.sparse.coo_matrix",
"warnings.warn",
"scipy.sparse.csr_matrix",
"numpy.abs",
"numpy.argmax",
"numpy.any",
"numpy.copy",
"sklearn.utils.as_float_array",
"numpy.unique",... | [((1475, 1523), 'numpy.hstack', 'np.hstack', (['(edges_deep, edges_right, edges_down)'], {}), '((edges_deep, edges_right, edges_down))\n', (1484, 1523), True, 'import numpy as np\n'), ((2049, 2067), 'numpy.exp', 'np.exp', (['(-gradients)'], {}), '(-gradients)\n', (2055, 2067), True, 'import numpy as np\n'), ((2537, 2556), 'numpy.arange', 'np.arange', (['pixel_nb'], {}), '(pixel_nb)\n', (2546, 2556), True, 'import numpy as np\n'), ((2573, 2604), 'numpy.hstack', 'np.hstack', (['(edges[0], edges[1])'], {}), '((edges[0], edges[1]))\n', (2582, 2604), True, 'import numpy as np\n'), ((2621, 2652), 'numpy.hstack', 'np.hstack', (['(edges[1], edges[0])'], {}), '((edges[1], edges[0]))\n', (2630, 2652), True, 'import numpy as np\n'), ((2664, 2695), 'numpy.hstack', 'np.hstack', (['(-weights, -weights)'], {}), '((-weights, -weights))\n', (2673, 2695), True, 'import numpy as np\n'), ((2706, 2783), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(data, (i_indices, j_indices))'], {'shape': '(pixel_nb, pixel_nb)'}), '((data, (i_indices, j_indices)), shape=(pixel_nb, pixel_nb))\n', (2723, 2783), False, 'from scipy import sparse, ndimage as ndi\n'), ((3159, 3175), 'numpy.ravel', 'np.ravel', (['labels'], {}), '(labels)\n', (3167, 3175), True, 'import numpy as np\n'), ((3448, 3470), 'numpy.arange', 'np.arange', (['labels.size'], {}), '(labels.size)\n', (3457, 3470), True, 'import numpy as np\n'), ((4404, 4432), 'numpy.logical_and', 'np.logical_and', (['mask0', 'mask1'], {}), '(mask0, mask1)\n', (4418, 4432), True, 'import numpy as np\n'), ((8857, 8872), 'numpy.copy', 'np.copy', (['labels'], {}), '(labels)\n', (8864, 8872), True, 'import numpy as np\n'), ((10323, 10340), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (10332, 10340), True, 'import numpy as np\n'), ((11143, 11161), 'numpy.any', 'np.any', (['(labels < 0)'], {}), '(labels < 0)\n', (11149, 11161), True, 'import numpy as np\n'), ((11958, 11979), 'numpy.atleast_3d', 'np.atleast_3d', (['labels'], {}), '(labels)\n', (11971, 11979), True, 'import numpy as np\n'), ((11987, 12005), 'numpy.any', 'np.any', (['(labels < 0)'], {}), '(labels < 0)\n', (11993, 12005), True, 'import numpy as np\n'), ((12909, 12920), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (12917, 12920), True, 'import numpy as np\n'), ((12929, 12949), 'numpy.argmax', 'np.argmax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (12938, 12949), True, 'import numpy as np\n'), ((3887, 3910), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['mask'], {}), '(mask)\n', (3904, 3910), False, 'from scipy import sparse, ndimage as ndi\n'), ((4665, 4694), 'numpy.arange', 'np.arange', (['(max_node_index + 1)'], {}), '(max_node_index + 1)\n', (4674, 4694), True, 'import numpy as np\n'), ((8909, 9069), 'warnings.warn', 'warnings.warn', (['"""Random walker only segments unlabeled areas, where labels == 0. No zero valued areas in labels were found. Returning provided labels."""'], {}), "(\n 'Random walker only segments unlabeled areas, where labels == 0. No zero valued areas in labels were found. Returning provided labels.'\n )\n", (8922, 9069), False, 'import warnings\n'), ((9173, 9259), 'warnings.warn', 'warnings.warn', (['"""Random walker received no seed label. Returning provided labels."""'], {}), "(\n 'Random walker received no seed label. Returning provided labels.')\n", (9186, 9259), False, 'import warnings\n'), ((9827, 9849), 'numpy.asarray', 'np.asarray', (['((1.0,) * 3)'], {}), '((1.0,) * 3)\n', (9837, 9849), True, 'import numpy as np\n'), ((10288, 10303), 'numpy.copy', 'np.copy', (['labels'], {}), '(labels)\n', (10295, 10303), True, 'import numpy as np\n'), ((11236, 11288), 'scipy.ndimage.binary_propagation', 'ndi.binary_propagation', (['(labels > 0)'], {'mask': '(labels >= 0)'}), '(labels > 0, mask=labels >= 0)\n', (11258, 11288), True, 'from scipy import sparse, ndimage as ndi\n'), ((11434, 11487), 'scipy.ndimage.binary_propagation', 'ndi.binary_propagation', (['(labels == 0)'], {'mask': '(labels >= 0)'}), '(labels == 0, mask=labels >= 0)\n', (11456, 11487), True, 'from scipy import sparse, ndimage as ndi\n'), ((11696, 11877), 'warnings.warn', 'warnings.warn', (['"""Random walker only segments unlabeled areas, where labels == 0. Data provided only contains isolated seeds and isolated pixels. Returning provided labels."""'], {}), "(\n 'Random walker only segments unlabeled areas, where labels == 0. Data provided only contains isolated seeds and isolated pixels. Returning provided labels.'\n )\n", (11709, 11877), False, 'import warnings\n'), ((1113, 1139), 'numpy.arange', 'np.arange', (['(n_x * n_y * n_z)'], {}), '(n_x * n_y * n_z)\n', (1122, 1139), True, 'import numpy as np\n'), ((2892, 2918), 'numpy.hstack', 'np.hstack', (['(data, connect)'], {}), '((data, connect))\n', (2901, 2918), True, 'import numpy as np\n'), ((10422, 10443), 'numpy.diff', 'np.diff', (['label_values'], {}), '(label_values)\n', (10429, 10443), True, 'import numpy as np\n'), ((11534, 11556), 'numpy.logical_not', 'np.logical_not', (['filled'], {}), '(filled)\n', (11548, 11556), True, 'import numpy as np\n'), ((2165, 2205), 'numpy.abs', 'np.abs', (['(data[:, :, :-1] - data[:, :, 1:])'], {}), '(data[:, :, :-1] - data[:, :, 1:])\n', (2171, 2205), True, 'import numpy as np\n'), ((2242, 2276), 'numpy.abs', 'np.abs', (['(data[:, :-1] - data[:, 1:])'], {}), '(data[:, :-1] - data[:, 1:])\n', (2248, 2276), True, 'import numpy as np\n'), ((2312, 2340), 'numpy.abs', 'np.abs', (['(data[:-1] - data[1:])'], {}), '(data[:-1] - data[1:])\n', (2318, 2340), True, 'import numpy as np\n'), ((2921, 2949), 'numpy.hstack', 'np.hstack', (['(i_indices, diag)'], {}), '((i_indices, diag))\n', (2930, 2949), True, 'import numpy as np\n'), ((2989, 3017), 'numpy.hstack', 'np.hstack', (['(j_indices, diag)'], {}), '((j_indices, diag))\n', (2998, 3017), True, 'import numpy as np\n'), ((9718, 9738), 'sklearn.utils.as_float_array', 'as_float_array', (['data'], {}), '(data)\n', (9732, 9738), False, 'from sklearn.utils import as_float_array\n'), ((10074, 10093), 'numpy.asarray', 'np.asarray', (['spacing'], {}), '(spacing)\n', (10084, 10093), True, 'import numpy as np\n'), ((11319, 11341), 'numpy.logical_not', 'np.logical_not', (['filled'], {}), '(filled)\n', (11333, 11341), True, 'import numpy as np\n'), ((10517, 10540), 'numpy.unique', 'np.unique', (['labels[mask]'], {}), '(labels[mask])\n', (10526, 10540), True, 'import numpy as np\n')] |
from scipy import signal
import numpy as np
import pyqtgraph
# Create the data
fs = 10e3
N = 1e5
amp = 2 * np.sqrt(2)
# noise_power = 0.01 * fs / 2
time = np.arange(N) / float(fs)
mod = 500*np.cos(2*np.pi*0.25*time)
carrier = amp * np.sin(2*np.pi*3e3*time + mod)
# noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
# noise *= np.exp(-time/5)
# x = carrier + noise
# filename = librosa.util.example_audio_file()
# data, sr = librosa.load(filename, sr=None)
f, t, Sxx = signal.spectrogram(carrier, fs)
# Interpret image data as row-major instead of col-major
pyqtgraph.setConfigOptions(imageAxisOrder='row-major')
# window and plot
pyqtgraph.mkQApp()
win = pyqtgraph.GraphicsLayoutWidget()
p1 = win.addPlot()
# Item for displaying image data
img = pyqtgraph.ImageItem()
img.setImage(Sxx)
img.scale(t[-1]/np.size(Sxx, axis=1),
f[-1]/np.size(Sxx, axis=0))
p1.addItem(img)
# color
hist = pyqtgraph.HistogramLUTItem()
hist.setImageItem(img)
hist.setLevels(np.min(Sxx), np.max(Sxx))
hist.gradient.restoreState(
{'mode': 'rgb',
'ticks': [(0.5, (0, 182, 188, 255)),
(1.0, (246, 111, 0, 255)),
(0.0, (75, 0, 113, 255))]})
# show
win.show()
pyqtgraph.Qt.QtGui.QApplication.instance().exec_()
| [
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.sqrt",
"scipy.signal.spectrogram",
"pyqtgraph.HistogramLUTItem",
"pyqtgraph.ImageItem",
"numpy.min",
"numpy.size",
"pyqtgraph.setConfigOptions",
"numpy.max",
"pyqtgraph.mkQApp",
"numpy.cos",
"pyqtgraph.GraphicsLayoutWidget",
"numpy.sin",
"... | [((490, 521), 'scipy.signal.spectrogram', 'signal.spectrogram', (['carrier', 'fs'], {}), '(carrier, fs)\n', (508, 521), False, 'from scipy import signal\n'), ((580, 634), 'pyqtgraph.setConfigOptions', 'pyqtgraph.setConfigOptions', ([], {'imageAxisOrder': '"""row-major"""'}), "(imageAxisOrder='row-major')\n", (606, 634), False, 'import pyqtgraph\n'), ((654, 672), 'pyqtgraph.mkQApp', 'pyqtgraph.mkQApp', ([], {}), '()\n', (670, 672), False, 'import pyqtgraph\n'), ((679, 711), 'pyqtgraph.GraphicsLayoutWidget', 'pyqtgraph.GraphicsLayoutWidget', ([], {}), '()\n', (709, 711), False, 'import pyqtgraph\n'), ((771, 792), 'pyqtgraph.ImageItem', 'pyqtgraph.ImageItem', ([], {}), '()\n', (790, 792), False, 'import pyqtgraph\n'), ((919, 947), 'pyqtgraph.HistogramLUTItem', 'pyqtgraph.HistogramLUTItem', ([], {}), '()\n', (945, 947), False, 'import pyqtgraph\n'), ((108, 118), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (115, 118), True, 'import numpy as np\n'), ((156, 168), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (165, 168), True, 'import numpy as np\n'), ((191, 222), 'numpy.cos', 'np.cos', (['(2 * np.pi * 0.25 * time)'], {}), '(2 * np.pi * 0.25 * time)\n', (197, 222), True, 'import numpy as np\n'), ((233, 272), 'numpy.sin', 'np.sin', (['(2 * np.pi * 3000.0 * time + mod)'], {}), '(2 * np.pi * 3000.0 * time + mod)\n', (239, 272), True, 'import numpy as np\n'), ((986, 997), 'numpy.min', 'np.min', (['Sxx'], {}), '(Sxx)\n', (992, 997), True, 'import numpy as np\n'), ((999, 1010), 'numpy.max', 'np.max', (['Sxx'], {}), '(Sxx)\n', (1005, 1010), True, 'import numpy as np\n'), ((827, 847), 'numpy.size', 'np.size', (['Sxx'], {'axis': '(1)'}), '(Sxx, axis=1)\n', (834, 847), True, 'import numpy as np\n'), ((865, 885), 'numpy.size', 'np.size', (['Sxx'], {'axis': '(0)'}), '(Sxx, axis=0)\n', (872, 885), True, 'import numpy as np\n'), ((1222, 1264), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'pyqtgraph.Qt.QtGui.QApplication.instance', ([], {}), '()\n', (1262, 1264), False, 'import pyqtgraph\n')] |
import random
import numpy as np
import torch
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def cuda_if(torch_object, cuda):
return torch_object.cuda() if cuda else torch_object
def gae(rewards, masks, values, gamma, lambd):
""" Generalized Advantage Estimation
Args:
rewards (FloatTensor): rewards shaped [T x N x 1]
masks (FloatTensor): continuation masks shaped [T x N x 1]
zero at done timesteps, one otherwise
values (Variable): value predictions shaped [(T + 1) x N x 1]
gamma (float): discount factor
lambd (float): GAE lambda parameter
Returns:
advantages (FloatTensor): advantages shaped [T x N x 1]
returns (FloatTensor): returns shaped [T x N x 1]
"""
T, N, _ = rewards.size()
cuda = rewards.is_cuda
advantages = torch.zeros(T, N, 1)
advantages = cuda_if(advantages, cuda)
advantage_t = torch.zeros(N, 1)
advantage_t = cuda_if(advantage_t, cuda)
for t in reversed(range(T)):
delta = rewards[t] + values[t + 1].data * gamma * masks[t] - values[t].data
advantage_t = delta + advantage_t * gamma * lambd * masks[t]
advantages[t] = advantage_t
returns = values[:T].data + advantages
return advantages, returns
def mean_std_groups(x, y, group_size):
num_groups = int(len(x) / group_size)
x, x_tail = x[:group_size * num_groups], x[group_size * num_groups:]
x = x.reshape((num_groups, group_size))
y, y_tail = y[:group_size * num_groups], y[group_size * num_groups:]
y = y.reshape((num_groups, group_size))
x_means = x.mean(axis=1)
x_stds = x.std(axis=1)
if len(x_tail) > 0:
x_means = np.concatenate([x_means, x_tail.mean(axis=0, keepdims=True)])
x_stds = np.concatenate([x_stds, x_tail.std(axis=0, keepdims=True)])
y_means = y.mean(axis=1)
y_stds = y.std(axis=1)
if len(y_tail) > 0:
y_means = np.concatenate([y_means, y_tail.mean(axis=0, keepdims=True)])
y_stds = np.concatenate([y_stds, y_tail.std(axis=0, keepdims=True)])
return x_means, x_stds, y_means, y_stds
def set_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| [
"torch.manual_seed",
"torch.zeros",
"numpy.random.seed",
"random.seed"
] | [((71, 88), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (82, 88), False, 'import random\n'), ((93, 113), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (107, 113), True, 'import numpy as np\n'), ((118, 141), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (135, 141), False, 'import torch\n'), ((880, 900), 'torch.zeros', 'torch.zeros', (['T', 'N', '(1)'], {}), '(T, N, 1)\n', (891, 900), False, 'import torch\n'), ((962, 979), 'torch.zeros', 'torch.zeros', (['N', '(1)'], {}), '(N, 1)\n', (973, 979), False, 'import torch\n')] |
import numpy as np
def split(dataset, splits_p):
splits = len(dataset)*np.array(splits_p)
splits = [int(p) for p in list(splits)]
return splits | [
"numpy.array"
] | [((76, 94), 'numpy.array', 'np.array', (['splits_p'], {}), '(splits_p)\n', (84, 94), True, 'import numpy as np\n')] |
import gym
import torch
from collections import deque
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from ppo import PPOAgent
from pathlib import Path
from datetime import datetime
import utils
# create environment
env = gym.make("Pendulum-v0")
# set random seeds
seed = 123456
torch.manual_seed(seed)
env.seed(seed)
np.random.seed(seed)
# set hyperparameters
lr_policy=0.01
lr_critic=0.0005
gam=0.96
lam=0.93
eps=0.20
batch_size=100#150
policy_updates=80#100
v_updates=50
update_freq=1
# initilize PPO agent
agent = PPOAgent(
nS = env.observation_space.shape[0],
nA = env.action_space.shape[0],
lr_policy=lr_policy,
lr_critic=lr_critic,
gam=gam,
lam=lam,
eps=eps,
batch_size=batch_size,
policy_updates=policy_updates,
v_updates=v_updates,
update_freq=update_freq
)
# set counter for number of episodes and list to store last 100 returns
n_episode = 0
returns = deque(maxlen=100)
# initilize parameters for saving training
save_freq = 10
run_name = "test3"
checkpoint_path = 'checkpoints/' + run_name + '_best' + '.tar'
checkpoint_file = Path(checkpoint_path)
# load already existing agent if possible
if checkpoint_file.is_file():
agent, n_episode = utils.load_agent(agent, checkpoint_path)
while True:
n_episode += 1
# reset environment and reward for the current episode
episode_reward = 0
state = env.reset()
while True:
# render environment
env.render()
# action needs to be a list since this accepts Box Actions
# the reward is of the same type as the action that we pass in
# choose action
action = [agent.choose_action(torch.tensor(state).unsqueeze(0).float()).item()]
# apply the action in the environment and store the outcomes
next_state, reward, done, info = env.step(action)
# update current state and return
state = next_state
episode_reward += reward
# when the episode is over
if done:
# add to the list of last 100 rewards
returns.append(episode_reward)
# print some basic stats in the terminal
print("Episode n. {:6d} Return: {:9.2f} Avg. Return: {:9.2f}".format(n_episode, episode_reward, np.mean(returns)))
break
# close the environment and tensorboard
env.close()
| [
"torch.manual_seed",
"numpy.mean",
"collections.deque",
"pathlib.Path",
"torch.tensor",
"numpy.random.seed",
"ppo.PPOAgent",
"gym.make",
"utils.load_agent"
] | [((245, 268), 'gym.make', 'gym.make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (253, 268), False, 'import gym\n'), ((303, 326), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (320, 326), False, 'import torch\n'), ((342, 362), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (356, 362), True, 'import numpy as np\n'), ((544, 799), 'ppo.PPOAgent', 'PPOAgent', ([], {'nS': 'env.observation_space.shape[0]', 'nA': 'env.action_space.shape[0]', 'lr_policy': 'lr_policy', 'lr_critic': 'lr_critic', 'gam': 'gam', 'lam': 'lam', 'eps': 'eps', 'batch_size': 'batch_size', 'policy_updates': 'policy_updates', 'v_updates': 'v_updates', 'update_freq': 'update_freq'}), '(nS=env.observation_space.shape[0], nA=env.action_space.shape[0],\n lr_policy=lr_policy, lr_critic=lr_critic, gam=gam, lam=lam, eps=eps,\n batch_size=batch_size, policy_updates=policy_updates, v_updates=\n v_updates, update_freq=update_freq)\n', (552, 799), False, 'from ppo import PPOAgent\n'), ((934, 951), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (939, 951), False, 'from collections import deque\n'), ((1112, 1133), 'pathlib.Path', 'Path', (['checkpoint_path'], {}), '(checkpoint_path)\n', (1116, 1133), False, 'from pathlib import Path\n'), ((1230, 1270), 'utils.load_agent', 'utils.load_agent', (['agent', 'checkpoint_path'], {}), '(agent, checkpoint_path)\n', (1246, 1270), False, 'import utils\n'), ((2273, 2289), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (2280, 2289), True, 'import numpy as np\n'), ((1679, 1698), 'torch.tensor', 'torch.tensor', (['state'], {}), '(state)\n', (1691, 1698), False, 'import torch\n')] |
import os
import abc
import cv2
import numpy as np
import tensorflow as tf
class TFRecordGenerator(abc.ABC):
def __init__(self, tfrecord_path, labels, dir_paths=None, file_paths=None):
# tfrecord_path : record tfrecord_path
# dir_paths : dir paths of different image sources
# labels : label for each dir path
# file_paths : files that each contains list of images
self.tfrecord_path = None
self.file_paths = None
self.labels = None
self.file_count = None
self.update_record_paths(tfrecord_path, labels, dir_paths, file_paths)
def update_record_paths(self, tfrecord_path, labels, dir_paths=None, file_paths=None):
if file_paths is None and dir_paths is None:
raise ValueError("Both dir_paths and file_paths are none")
elif file_paths is None:
if len(dir_paths) != len(labels):
raise ValueError("Length of file_paths and labels are not equal")
file_paths = self._convert_dir_to_file_path(dir_paths)
files_count = np.array([len(files) for files in file_paths])
if not np.all(files_count == files_count[0]):
raise ValueError("File paths have different number of files")
self.tfrecord_path = tfrecord_path
self.file_paths = zip(*file_paths)
self.labels = labels
self.file_count = files_count[0]
def generate_record(self):
with tf.io.TFRecordWriter(self.tfrecord_path) as writer:
for count, img_paths in enumerate(self.file_paths):
example = self._convert_one_example(img_paths)
writer.write(example.SerializeToString())
print("complete {:0>4d}/{:0>4d} example".format(count+1, self.file_count))
def _convert_dir_to_file_path(self, dir_paths):
file_paths = []
for dir_path in dir_paths:
file_path = [os.path.join(dir_path, f) for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]
# sort file path, make sure files in different folders are matched
file_path.sort()
file_paths.append(file_path)
return file_paths
@abc.abstractmethod
def _convert_one_example(self, img_paths):
""" define how each example should be processed
"""
class TFRecordGeneratorforTH(TFRecordGenerator):
def __init__(self, tfrecord_path, labels, dir_paths=None, file_paths=None):
super(TFRecordGeneratorforTH, self).__init__(tfrecord_path, labels, dir_paths, file_paths)
def _convert_one_example(self, img_paths):
features = dict()
# all images have same shape
for count, img_path in enumerate(img_paths):
# save exr image as float32 1d-array in **NCHW** format
# for best GPU inference performance
tmp = np.transpose(cv2.imread(img_path, -1), [2,0,1])
if self.labels[count].startswith("depth"):
# keep depth image as single channel to reduce memory cost
tmp = tmp[0,:,:]
tmp = tmp.flatten()
features[self.labels[count]] = tf.train.Feature(float_list = tf.train.FloatList(value=tmp))
example = tf.train.Example(features = tf.train.Features(feature = features))
return example
class TFRecordExtractor(abc.ABC):
def __init__(self, tfrecord_path, dataset_params, labels):
# tfrecord_path : record tfrecord_path
# dataset_params : parameters for constructing dataset pipeline
# labels : label for each image
self.tfrecord_path = None
self.dataset_params = None
self.labels = None
self.iterator = None
self.update_record_path(tfrecord_path, dataset_params, labels)
def update_record_path(self, tfrecord_path, dataset_params, labels):
self.tfrecord_path = os.path.abspath(tfrecord_path)
self.dataset_params = dataset_params
self.labels = labels
def _extract_fn(self, tfrecord):
""" define how each example should be parsed
"""
def build_dataset(self):
# Pipeline of dataset and iterator
dataset = tf.data.TFRecordDataset([self.tfrecord_path])
dataset = dataset.shuffle(buffer_size=self.dataset_params["shuffle_buffer_size"])
if self.dataset_params["repeat"]:
dataset = dataset.repeat()
dataset = dataset.map(self._extract_fn, num_parallel_calls=self.dataset_params["num_parallel_calls"])
dataset = dataset.batch(self.dataset_params["batch"])
dataset = dataset.prefetch(buffer_size=self.dataset_params["prefetch_buffer_size"])
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
self.iterator = iterator
return iterator
class TFRecordExtractorforTH(TFRecordExtractor):
def __init__(self, tfrecord_path, dataset_params, labels):
super(TFRecordExtractorforTH, self).__init__(tfrecord_path, dataset_params, labels)
def _extract_fn(self, tfrecord):
# Extract features using the keys set during creation
features = dict()
for element in self.labels:
# restore image in to 3d with provided shape
if element.startswith("depth"):
# load as single channel image
features[element] = tf.io.FixedLenFeature((1, self.dataset_params["res_h"], self.dataset_params["res_w"]), tf.float32)
else:
features[element] = tf.io.FixedLenFeature((3, self.dataset_params["res_h"], self.dataset_params["res_w"]), tf.float32)
# Extract the data record
imgs = tf.io.parse_single_example(tfrecord, features)
return imgs | [
"tensorflow.data.TFRecordDataset",
"os.listdir",
"tensorflow.io.parse_single_example",
"tensorflow.compat.v1.data.make_one_shot_iterator",
"tensorflow.io.TFRecordWriter",
"os.path.join",
"tensorflow.train.Features",
"tensorflow.io.FixedLenFeature",
"tensorflow.train.FloatList",
"os.path.abspath",
... | [((3902, 3932), 'os.path.abspath', 'os.path.abspath', (['tfrecord_path'], {}), '(tfrecord_path)\n', (3917, 3932), False, 'import os\n'), ((4202, 4247), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['[self.tfrecord_path]'], {}), '([self.tfrecord_path])\n', (4225, 4247), True, 'import tensorflow as tf\n'), ((4702, 4751), 'tensorflow.compat.v1.data.make_one_shot_iterator', 'tf.compat.v1.data.make_one_shot_iterator', (['dataset'], {}), '(dataset)\n', (4742, 4751), True, 'import tensorflow as tf\n'), ((5666, 5712), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['tfrecord', 'features'], {}), '(tfrecord, features)\n', (5692, 5712), True, 'import tensorflow as tf\n'), ((1155, 1192), 'numpy.all', 'np.all', (['(files_count == files_count[0])'], {}), '(files_count == files_count[0])\n', (1161, 1192), True, 'import numpy as np\n'), ((1470, 1510), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['self.tfrecord_path'], {}), '(self.tfrecord_path)\n', (1490, 1510), True, 'import tensorflow as tf\n'), ((1935, 1960), 'os.path.join', 'os.path.join', (['dir_path', 'f'], {}), '(dir_path, f)\n', (1947, 1960), False, 'import os\n'), ((2894, 2918), 'cv2.imread', 'cv2.imread', (['img_path', '(-1)'], {}), '(img_path, -1)\n', (2904, 2918), False, 'import cv2\n'), ((3275, 3310), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (3292, 3310), True, 'import tensorflow as tf\n'), ((5362, 5465), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (["(1, self.dataset_params['res_h'], self.dataset_params['res_w'])", 'tf.float32'], {}), "((1, self.dataset_params['res_h'], self.dataset_params\n ['res_w']), tf.float32)\n", (5383, 5465), True, 'import tensorflow as tf\n'), ((5516, 5619), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (["(3, self.dataset_params['res_h'], self.dataset_params['res_w'])", 'tf.float32'], {}), "((3, self.dataset_params['res_h'], self.dataset_params\n ['res_w']), tf.float32)\n", (5537, 5619), True, 'import tensorflow as tf\n'), ((1970, 1990), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (1980, 1990), False, 'import os\n'), ((3197, 3226), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'tmp'}), '(value=tmp)\n', (3215, 3226), True, 'import tensorflow as tf\n'), ((2009, 2034), 'os.path.join', 'os.path.join', (['dir_path', 'f'], {}), '(dir_path, f)\n', (2021, 2034), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# NeuralCorefRes main
#
# Author: <NAME> <<EMAIL>>
#
# For license information, see LICENSE
import argparse
import gc
import os
import pprint
import re
import sys
from itertools import zip_longest
from typing import List
import nltk
import numpy as np
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from progress.bar import IncrementalBar
from tensorflow.keras.preprocessing import sequence
sys.path.append(os.path.abspath(f'{os.path.dirname(os.path.abspath(__file__))}/../'))
import neuralcorefres.parsedata.gap_parser as GAPParse
from neuralcorefres.common import Sentence
from neuralcorefres.feature_extraction.gender_classifier import (
GENDERED_NOUN_PREFIXES, GenderClassifier)
from neuralcorefres.model.cluster_network import ClusterNetwork
from neuralcorefres.model.coreference_network import CoreferenceNetwork
from neuralcorefres.model.word_embedding import WordEmbedding
from neuralcorefres.parsedata.parse_clusters import ParseClusters
from neuralcorefres.parsedata.preco_parser import PreCoDataType, PreCoParser
from neuralcorefres.util.data_storage import write_dependency_file
pretty_printer = pprint.PrettyPrinter()
REMOVED_STOPWORDS = set(['my', 'he', 'you\'ll', 'her', 'i', 'hers', 'who', 'your',
'himself', 'yourself', 'own', 'you\'re', 'you\'d', 'we',
'myself', 'yourselves', 'yours', 'ours', 'she', 'she\'s',
'his', 'you\'ve', 'me', 'they', 'him', 'whom', 'them',
'their', 'theirs', 'herself', 'themselves', 'you',
'ourselves', 'itself', 'our', 'this', 'that', 'those'])
STOPWORDS = set.difference(set(stopwords.words('english')), REMOVED_STOPWORDS)
def gender_demo(sent: str):
classifier = GenderClassifier()
tagged = nltk.pos_tag(nltk.word_tokenize(sent))
for word in tagged:
if word[1] in GENDERED_NOUN_PREFIXES:
print(word, classifier.get_gender(word[0]))
print(classifier.get_gender('marine'))
def write_deps():
sents: List[GAPParse.GAPCoreferenceDatapoint] = GAPParse.get_gap_data(GAPParse.GAPDataType.TRAIN, class_type=Sentence)
bar = IncrementalBar('Parsing Sentences...', max=len(sents))
for sent in sents:
sent.parse()
bar.next()
write_dependency_file([sent._dep_parse for sent in sents], identifiers=[sent._id for sent in sents])
def word_embeddings():
""" Deprecated. Use the PreCo dataset. """
sents = GAPParse.get_gap_data([GAPParse.GAPDataType.TRAIN, GAPParse.GAPDataType.VALIDATION], class_type=Sentence)
model = WordEmbedding(model_path='.././data/models/word_embeddings/google-vectors.model', sents=sents)
texts = [sent.alphanumeric_text for sent in sents]
nid = []
total_tokens = []
for text in texts:
tokenized = word_tokenize(text)
for i, token in enumerate(tokenized):
if not model.embedding_model.__contains__(token):
embedding = model.estimate_embedding(tokenized[i-5:i+5], token)
print(f'{token}: {model.embedding_model.similar_by_vector(embedding, topn=1)}')
nid = set(nid)
def word_embeddings_demo():
""" Demo of word embeddings using a pre-trained model on PreCo data. """
embedding_model = WordEmbedding(model_path='.././data/models/word_embeddings/preco-vectors.model')
print(embedding_model.embedding_model.most_similar(positive=['water', 'sand']))
def preco_parser_demo(data):
INPUT_MAXLEN = 200
OUTPUT_MAXLEN = 200
# embedding_model = WordEmbedding(model_path='.././data/models/word_embeddings/preco-vectors.model')
embedding_model = WordEmbedding(model_path='.././data/models/word_embeddings/google-vectors.model')
data = PreCoParser.prep_for_nn(data)
x_train, y_train = PreCoParser.get_train_data(data, INPUT_MAXLEN, OUTPUT_MAXLEN, embedding_model)
gc.collect()
np.set_printoptions(threshold=sys.maxsize)
cluster_network = ClusterNetwork(x_train[:190], y_train[:190], x_train[190:],
y_train[190:], inputmaxlen=INPUT_MAXLEN, outputlen=OUTPUT_MAXLEN)
cluster_network.train()
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def parse_clusters():
data = PreCoParser.get_preco_data(PreCoDataType.TRAIN)[4000:]
total_clusters = dict(zip(range(len(data[1])), data[1]))
print()
bar = IncrementalBar('*\tConverting PreCo dataset to custom dataset form', max=len(data))
reductions: List[Tuple[List[str], Dict[int, List[List[int]]]]] = []
batches = grouper(data, 1000)
for i, batch in enumerate(batches):
reductions: List[Tuple[List[str], Dict[int, List[List[int]]]]] = []
for dp in batch:
reductions.append((dp[0], ParseClusters.get_reduced_clusters(dp[0], dict(zip(range(len(dp[1])), dp[1])))))
bar.next()
ParseClusters.write_custom_to_file(reductions, f'../data/PreCo_1.0/custom_dps/train_b{i+4}.json')
def train_model(samples: int):
def generate_data():
""" For batching data when training on entire dataset. """
while True:
for i in range(35):
for j in range(1, 11):
sents, clusters = ParseClusters.get_from_file(f'../data/PreCo_1.0/custom_dps/train_b{i}.json')
x_train, y_train = CoreferenceNetwork.custom_cluster_to_nn_input(
sents[(j-1)*100:j*100], clusters[(j-1)*100:j*100])
yield x_train, y_train
print(x_train.shape, y_train.shape)
INPUT_MAXLEN = 128
OUTPUT_MAXLEN = 128
sents, clusters = ParseClusters.get_from_file('../data/PreCo_1.0/custom_dps/train_b0.json')
x_train, y_train = CoreferenceNetwork.custom_cluster_to_nn_input(sents[:samples], clusters[:samples])
print('\n * x_train, y_train shape before:', x_train.shape, y_train.shape)
coreference_network = CoreferenceNetwork(x_train[:int(len(x_train)*0.9)], y_train[:int(len(x_train)*0.9)], x_train[int(len(x_train)*0.9):],
y_train[int(len(x_train)*0.9):], inputmaxlen=INPUT_MAXLEN, outputlen=OUTPUT_MAXLEN)
eval = coreference_network.train()
print(eval)
def predict_from_model(sent: str = None):
coreference_network = CoreferenceNetwork()
preds = coreference_network.predict(sent)
print(sent)
pprint.pprint(preds)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--predict', type=str, help='Sentence to predict on')
parser.add_argument('-t', '--train', action='store_true', help='Train a model')
parser.add_argument('--samples', type=int, default=sys.maxsize, help='Limit the training samples')
args = parser.parse_args()
if args.train:
train_model(args.samples)
elif args.predict:
predict_from_model(args.predict)
| [
"neuralcorefres.model.coreference_network.CoreferenceNetwork.custom_cluster_to_nn_input",
"neuralcorefres.parsedata.preco_parser.PreCoParser.get_preco_data",
"neuralcorefres.parsedata.preco_parser.PreCoParser.prep_for_nn",
"pprint.pprint",
"neuralcorefres.model.cluster_network.ClusterNetwork",
"neuralcore... | [((1168, 1190), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (1188, 1190), False, 'import pprint\n'), ((1804, 1822), 'neuralcorefres.feature_extraction.gender_classifier.GenderClassifier', 'GenderClassifier', ([], {}), '()\n', (1820, 1822), False, 'from neuralcorefres.feature_extraction.gender_classifier import GENDERED_NOUN_PREFIXES, GenderClassifier\n'), ((2117, 2187), 'neuralcorefres.parsedata.gap_parser.get_gap_data', 'GAPParse.get_gap_data', (['GAPParse.GAPDataType.TRAIN'], {'class_type': 'Sentence'}), '(GAPParse.GAPDataType.TRAIN, class_type=Sentence)\n', (2138, 2187), True, 'import neuralcorefres.parsedata.gap_parser as GAPParse\n'), ((2321, 2426), 'neuralcorefres.util.data_storage.write_dependency_file', 'write_dependency_file', (['[sent._dep_parse for sent in sents]'], {'identifiers': '[sent._id for sent in sents]'}), '([sent._dep_parse for sent in sents], identifiers=[\n sent._id for sent in sents])\n', (2342, 2426), False, 'from neuralcorefres.util.data_storage import write_dependency_file\n'), ((2506, 2616), 'neuralcorefres.parsedata.gap_parser.get_gap_data', 'GAPParse.get_gap_data', (['[GAPParse.GAPDataType.TRAIN, GAPParse.GAPDataType.VALIDATION]'], {'class_type': 'Sentence'}), '([GAPParse.GAPDataType.TRAIN, GAPParse.GAPDataType.\n VALIDATION], class_type=Sentence)\n', (2527, 2616), True, 'import neuralcorefres.parsedata.gap_parser as GAPParse\n'), ((2624, 2723), 'neuralcorefres.model.word_embedding.WordEmbedding', 'WordEmbedding', ([], {'model_path': '""".././data/models/word_embeddings/google-vectors.model"""', 'sents': 'sents'}), "(model_path=\n '.././data/models/word_embeddings/google-vectors.model', sents=sents)\n", (2637, 2723), False, 'from neuralcorefres.model.word_embedding import WordEmbedding\n'), ((3305, 3390), 'neuralcorefres.model.word_embedding.WordEmbedding', 'WordEmbedding', ([], {'model_path': '""".././data/models/word_embeddings/preco-vectors.model"""'}), "(model_path='.././data/models/word_embeddings/preco-vectors.model'\n )\n", (3318, 3390), False, 'from neuralcorefres.model.word_embedding import WordEmbedding\n'), ((3675, 3761), 'neuralcorefres.model.word_embedding.WordEmbedding', 'WordEmbedding', ([], {'model_path': '""".././data/models/word_embeddings/google-vectors.model"""'}), "(model_path=\n '.././data/models/word_embeddings/google-vectors.model')\n", (3688, 3761), False, 'from neuralcorefres.model.word_embedding import WordEmbedding\n'), ((3768, 3797), 'neuralcorefres.parsedata.preco_parser.PreCoParser.prep_for_nn', 'PreCoParser.prep_for_nn', (['data'], {}), '(data)\n', (3791, 3797), False, 'from neuralcorefres.parsedata.preco_parser import PreCoDataType, PreCoParser\n'), ((3821, 3899), 'neuralcorefres.parsedata.preco_parser.PreCoParser.get_train_data', 'PreCoParser.get_train_data', (['data', 'INPUT_MAXLEN', 'OUTPUT_MAXLEN', 'embedding_model'], {}), '(data, INPUT_MAXLEN, OUTPUT_MAXLEN, embedding_model)\n', (3847, 3899), False, 'from neuralcorefres.parsedata.preco_parser import PreCoDataType, PreCoParser\n'), ((3905, 3917), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3915, 3917), False, 'import gc\n'), ((3922, 3964), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize'}), '(threshold=sys.maxsize)\n', (3941, 3964), True, 'import numpy as np\n'), ((3988, 4117), 'neuralcorefres.model.cluster_network.ClusterNetwork', 'ClusterNetwork', (['x_train[:190]', 'y_train[:190]', 'x_train[190:]', 'y_train[190:]'], {'inputmaxlen': 'INPUT_MAXLEN', 'outputlen': 'OUTPUT_MAXLEN'}), '(x_train[:190], y_train[:190], x_train[190:], y_train[190:],\n inputmaxlen=INPUT_MAXLEN, outputlen=OUTPUT_MAXLEN)\n', (4002, 4117), False, 'from neuralcorefres.model.cluster_network import ClusterNetwork\n'), ((4266, 4305), 'itertools.zip_longest', 'zip_longest', (['*args'], {'fillvalue': 'fillvalue'}), '(*args, fillvalue=fillvalue)\n', (4277, 4305), False, 'from itertools import zip_longest\n'), ((5711, 5784), 'neuralcorefres.parsedata.parse_clusters.ParseClusters.get_from_file', 'ParseClusters.get_from_file', (['"""../data/PreCo_1.0/custom_dps/train_b0.json"""'], {}), "('../data/PreCo_1.0/custom_dps/train_b0.json')\n", (5738, 5784), False, 'from neuralcorefres.parsedata.parse_clusters import ParseClusters\n'), ((5808, 5895), 'neuralcorefres.model.coreference_network.CoreferenceNetwork.custom_cluster_to_nn_input', 'CoreferenceNetwork.custom_cluster_to_nn_input', (['sents[:samples]', 'clusters[:samples]'], {}), '(sents[:samples], clusters[:\n samples])\n', (5853, 5895), False, 'from neuralcorefres.model.coreference_network import CoreferenceNetwork\n'), ((6370, 6390), 'neuralcorefres.model.coreference_network.CoreferenceNetwork', 'CoreferenceNetwork', ([], {}), '()\n', (6388, 6390), False, 'from neuralcorefres.model.coreference_network import CoreferenceNetwork\n'), ((6458, 6478), 'pprint.pprint', 'pprint.pprint', (['preds'], {}), '(preds)\n', (6471, 6478), False, 'import pprint\n'), ((6521, 6546), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6544, 6546), False, 'import argparse\n'), ((1709, 1735), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (1724, 1735), False, 'from nltk.corpus import stopwords\n'), ((1850, 1874), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sent'], {}), '(sent)\n', (1868, 1874), False, 'import nltk\n'), ((2853, 2872), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (2866, 2872), False, 'from nltk.tokenize import word_tokenize\n'), ((4341, 4388), 'neuralcorefres.parsedata.preco_parser.PreCoParser.get_preco_data', 'PreCoParser.get_preco_data', (['PreCoDataType.TRAIN'], {}), '(PreCoDataType.TRAIN)\n', (4367, 4388), False, 'from neuralcorefres.parsedata.preco_parser import PreCoDataType, PreCoParser\n'), ((4962, 5065), 'neuralcorefres.parsedata.parse_clusters.ParseClusters.write_custom_to_file', 'ParseClusters.write_custom_to_file', (['reductions', 'f"""../data/PreCo_1.0/custom_dps/train_b{i + 4}.json"""'], {}), "(reductions,\n f'../data/PreCo_1.0/custom_dps/train_b{i + 4}.json')\n", (4996, 5065), False, 'from neuralcorefres.parsedata.parse_clusters import ParseClusters\n'), ((496, 521), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (511, 521), False, 'import os\n'), ((5314, 5390), 'neuralcorefres.parsedata.parse_clusters.ParseClusters.get_from_file', 'ParseClusters.get_from_file', (['f"""../data/PreCo_1.0/custom_dps/train_b{i}.json"""'], {}), "(f'../data/PreCo_1.0/custom_dps/train_b{i}.json')\n", (5341, 5390), False, 'from neuralcorefres.parsedata.parse_clusters import ParseClusters\n'), ((5430, 5542), 'neuralcorefres.model.coreference_network.CoreferenceNetwork.custom_cluster_to_nn_input', 'CoreferenceNetwork.custom_cluster_to_nn_input', (['sents[(j - 1) * 100:j * 100]', 'clusters[(j - 1) * 100:j * 100]'], {}), '(sents[(j - 1) * 100:j * 100],\n clusters[(j - 1) * 100:j * 100])\n', (5475, 5542), False, 'from neuralcorefres.model.coreference_network import CoreferenceNetwork\n')] |
import numpy as np
import sys
import random
import os
import time
import argparse
import glob
import matplotlib.pyplot as plt
from functools import partial
try:
from mayavi import mlab as mayalab
except:
pass
np.random.seed(2)
# from contact_point_dataset_torch_multi_label import MyDataset
from hang_dataset import MyDataset
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
UTILS_DIR = os.path.abspath(os.path.join(BASE_DIR, '..', 'utils'))
sys.path.append(UTILS_DIR)
from data_helper import *
from coord_helper import *
from rotation_lib import *
from bullet_helper import *
from s2_utils import *
from simple_dataset import MyDataset
import pybullet as p
def print_helper(a):
return '{} {} {}'.format(np.mean(a), np.max(a), np.min(a), np.std(a))
def cem_transform_pc_batch(pc_o, rotation_center_o, transl, aa):
# TODO visualize
if len(rotation_center_o.shape) == 1:
rotation_center_o = rotation_center_o[np.newaxis, np.newaxis, :]
if len(rotation_center_o.shape) == 2:
rotation_center_o = np.expand_dims(rotation_center_o, axis=1)
ret = transform_pc_batch(pc_o - rotation_center_o, transl, aa) + rotation_center_o
return ret
import s3_bullet_checker_eval as bullet_checker_eval
import s3_bullet_checker as bullet_checker
def bullet_check(bi, bullet_check_one_pose, transl, aa, p_list, result_file_name, hook_urdf, object_urdf, fcl_hook_model=None, fcl_object_model=None, gui=False):
quat_tmp = quaternion_from_angle_axis(aa[bi])
transl_tmp = transl[bi]
hook_world_pos = np.array([0.7, 0., 1])
p_tmp = p_list[bi]
if not p_tmp.isConnected():
p_list[bi] = p_reset_multithread(bi, p_list, gui=gui)
p_enable_physics(p_tmp)
hook_bullet_id_tmp = p_tmp.loadURDF(hook_urdf[bi], basePosition=[0, 0, 0], baseOrientation=[0, 0, 0, 1], useFixedBase=True)
object_bullet_id_tmp = p_tmp.loadURDF(object_urdf[bi], basePosition=[0, 0, 0], baseOrientation=[0, 0, 0, 1], useFixedBase=False)
p_tmp.changeDynamics(hook_bullet_id_tmp, -1, contactStiffness=1.0, contactDamping=0.01)
p_tmp.changeDynamics(hook_bullet_id_tmp, 0, contactStiffness=0.5, contactDamping=0.01)
p_tmp.changeDynamics(object_bullet_id_tmp, -1, contactStiffness=0.05, contactDamping=0.01)
p_tmp.resetBasePositionAndOrientation(object_bullet_id_tmp, transl_tmp + hook_world_pos, quat_tmp)
p_tmp.resetBaseVelocity(object_bullet_id_tmp, linearVelocity=[0, 0, 0], angularVelocity=[0, 0, 0])
tmp = input('rw')
if tmp == 's':
p_tmp.removeBody(hook_bullet_id_tmp)
p_tmp.removeBody(object_bullet_id_tmp)
return None, None, None
flag, final_pose = bullet_check_one_pose(
p_tmp,
hook_world_pos,
hook_bullet_id_tmp,
object_bullet_id_tmp,
transl_tmp,
quat_tmp,
hook_urdf[bi],
object_urdf[bi],
fcl_hook_model[bi],
fcl_object_model[bi])
print('{} done {}'.format(bi, flag))
p_tmp.removeBody(hook_bullet_id_tmp)
p_tmp.removeBody(object_bullet_id_tmp)
return flag, final_pose[:3], final_pose[3:]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument("--exp_name", default="")
parser.add_argument("--eval_epoch", type=int, default=-1)
parser.add_argument("--eval_ct", type=int, default=-1)
parser.add_argument('--test_list', default='train_list')
parser.add_argument('--train_list', default='test_list')
parser.add_argument('--n_gt_sample', type=int, default=128)
parser.add_argument('--overfit', action='store_true')
parser.add_argument('--use_bullet_checker', action='store_true')
parser.add_argument('--restrict_object_cat', default='')
args = parser.parse_args()
assert (args.eval_ct != -1) or (args.eval_epoch != -1)
cp_result_folder_dir= os.path.join(args.home_dir_data,'dataset_cp')
train_list_dir = os.path.join(cp_result_folder_dir,'labels','train_list.txt')
test_list_dir = os.path.join(cp_result_folder_dir,'labels','test_list.txt')
runs_dir = 'runs/exp_s3'
p_env = p_Env(args.home_dir_data, gui=True, physics=False)
train_set = MyDataset(args.home_dir_data, train_list_dir, use_fcl=False, args=args)
test_set = MyDataset(args.home_dir_data, test_list_dir, use_fcl=False, args=args)
hook_world_pos = np.array([0.7, 0., 1])
total_ct = 0
for i, run_folder_dir in enumerate(glob.glob('{}/*{}'.format(runs_dir, args.exp_name))):
# assert i == 0, run_folder_dir
result_folder = run_folder_dir
if args.eval_ct != -1:
eval_file_dir_arr = glob.glob('{}/eval/*_ct_{}.json'.format(run_folder_dir, args.eval_ct))
elif args.eval_epoch != -1:
eval_file_dir_arr = glob.glob('{}/eval/*eval_epoch_{}_test.json'.format(run_folder_dir, args.eval_epoch))
assert len(eval_file_dir_arr) == 1, eval_file_dir_arr
eval_file_dir = eval_file_dir_arr[0]
eval_result_dict = load_json(eval_file_dir)
for j, result_file_name in enumerate(eval_result_dict):
# if result_file_name != 'hook_wall_1_headphone_5':
# if result_file_name != 'hook_wall_60_mug_146':
# continue
# if j < 15:
# continue
if result_file_name in train_set.all_result_file_names:
dataset = train_set
else:
dataset = test_set
hook_name, object_name = split_result_file_name(result_file_name)
for eval_i, one_result_tmp in enumerate(eval_result_dict[result_file_name]):
total_ct += 1
# if total_ct % 10 == 0:
# p_env.p = p_reset(p_env.p, gui=True)
one_result = {}
for tmp in one_result_tmp:
if type(one_result_tmp[tmp]) == list:
one_result[tmp] = np.array(one_result_tmp[tmp])
if 'idx' in tmp:
one_result[tmp] = one_result[tmp].astype(int)
else:
one_result[tmp] = one_result_tmp[tmp]
pc_o = np.load(dataset.partial_pc_dir[result_file_name]['object'])[:, :3]
pc_h = np.load(dataset.partial_pc_dir[result_file_name]['hook'])[:, :3]
s1_transl = one_result['s1_transl']
s1_aa = one_result['s1_aa']
cem_init_transl = one_result['cem_init_transl']
cem_init_aa = one_result['cem_init_aa']
final_pred_transl = one_result['final_pred_transl']
final_pred_aa = one_result['final_pred_aa']
cem_out_transl = one_result['cem_out_transl']
cem_out_aa = one_result['cem_out_aa']
cem_rotation_center_o = one_result['cem_rotation_center_o']
corr_idx_top_k_o = one_result['corr_idx_top_k_o']
corr_idx_top_k_h = one_result['corr_idx_top_k_h']
cem_elite_pose = one_result['cem_elite_pose']
cem_elite_pose_scores = one_result['cem_elite_pose_scores']
print('stage 1, 2', result_file_name)
# p_env.load_pair_w_pose(result_file_name, s1_transl, s1_aa, aa=True)
# flag = input('rw')
# if flag == 's':
# break
# p_env.load_pair_w_pose(result_file_name, cem_init_transl, cem_init_aa, aa=True)
# rotation_center_bullet_id = p_draw_ball(p_env.p, cem_rotation_center_o + hook_world_pos, radius=0.005)
# bb_radius = 0.005
# bb = [cem_init_transl - bb_radius + hook_world_pos, cem_init_transl + bb_radius + hook_world_pos]
# drawAABB(bb, p_env.p)
# input('cem init pose')
# # pc_o_s1_transform = transform_pc(pc_o, s1_transl, s1_aa)
# # plot_pc(pc_o_s1_transform)
# # plot_pc(pc_o_s1_transform[corr_idx_top_k_o], color=[1, 0, 0], scale=0.002)
# # plot_pc(pc_h)
# # plot_pc(pc_h[corr_idx_top_k_h], color=[0, 1, 0], scale=0.002)
# # plot_pc(cem_rotation_center_o[np.newaxis, :], color=[0, 0, 1], scale=0.002)
# # mayalab.show()
# # print('stage 3', result_file_name, one_result['succ'])
# pc_o_cem_init = transform_pc(pc_o, cem_init_transl, cem_init_aa)
# # # assert np.allclose( pc_h[corr_idx_top_k_h])
# # print('object mean', np.mean(pc_o_cem_init[corr_idx_top_k_o], axis=0))
# # print('hook mean', np.mean(pc_h[corr_idx_top_k_h], axis=0))
# # p_env.load_pair_w_pose(result_file_name, s1_transl, s1_aa, aa=True)
# # input('rw')
# # p_env.load_pair_w_pose(result_file_name, cem_init_transl, cem_init_aa, aa=True)
# # plot_pc(pc_h)
# # plot_pc(pc_h[corr_idx_top_k_h], color=[0, 1, 0], scale=0.002)
# # plot_pc(cem_rotation_center_o[np.newaxis, :], color=[0, 0, 1], scale=0.002)
# # plot_pc(pc_o_cem_init)
# # input('rw')
# # print('cem')
# # print(np.max(pc_h, axis=0) - np.min(pc_h, axis=0))
# for ii in range(cem_elite_pose.shape[0]):
# pose_tmp = cem_elite_pose[ii]
# # pose_tmp[:3] = [0, 0, 0]
# pc_o_tmp = cem_transform_pc_batch(pc_o_cem_init[np.newaxis, :, :], cem_rotation_center_o, pose_tmp[:3][np.newaxis, :], pose_tmp[3:][np.newaxis, :])
# pose_transl, pose_aa = best_fit_transform(pc_o, pc_o_tmp[0])
# p_env.load_pair_w_pose(result_file_name, pose_transl, pose_aa, aa=True)
# # bb_center =
# print('score', cem_elite_pose_scores[ii], one_result['succ'])
# print('cem pose', pose_tmp[:3], pose_tmp[3:])
# input('rw')
# # print(pose_tmp)
# # plot_pc(pc_o_tmp[0])
# # plot_pc(cem_rotation_center_o[np.newaxis, :], color=[0, 0, 1], scale=0.002)
# # plot_pc(pc_h)
# # plot_pc(cem_rotation_center_o[np.newaxis, :], color=[0, 0, 1])
# # mayalab.show()
if one_result['succ'] != 1:
continue
print('succ', one_result['succ'])
# run bullet check
# bullet_check_one_pose = bullet_checker_eval.check_one_pose_simple if args.use_bullet_checker else bullet_checker.check_one_pose_simple
# # bullet check
# bullet_check_func = partial(
# bullet_check,
# bullet_check_one_pose=bullet_check_one_pose,
# transl=[final_pred_transl],
# aa=[final_pred_aa],
# p_list=[p_env.p],
# result_file_name=[result_file_name],
# hook_urdf=[p_env.hook_dict[hook_name]['urdf']],
# object_urdf=[p_env.object_dict[object_name]['urdf']],
# fcl_hook_model=[None],
# fcl_object_model=[None],
# gui=True,
# )
# flag, _, _ = bullet_check_func(0)
# print(final_pred_transl, final_pred_aa)
p_env.load_pair_w_pose(result_file_name, final_pred_transl, final_pred_aa, aa=True)
print(cem_elite_pose_scores)
print('score', cem_elite_pose_scores[-1], one_result['succ'])
input('rw')
# p_env.p.removeBody(rotation_center_bullet_id)
p_env.p.removeAllUserDebugItems() | [
"numpy.mean",
"argparse.ArgumentParser",
"numpy.std",
"os.path.join",
"simple_dataset.MyDataset",
"numpy.max",
"numpy.array",
"numpy.random.seed",
"numpy.expand_dims",
"numpy.min",
"os.path.abspath",
"numpy.load",
"sys.path.append"
] | [((213, 230), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (227, 230), True, 'import numpy as np\n'), ((455, 481), 'sys.path.append', 'sys.path.append', (['UTILS_DIR'], {}), '(UTILS_DIR)\n', (470, 481), False, 'import sys\n'), ((361, 386), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (376, 386), False, 'import os\n'), ((416, 453), 'os.path.join', 'os.path.join', (['BASE_DIR', '""".."""', '"""utils"""'], {}), "(BASE_DIR, '..', 'utils')\n", (428, 453), False, 'import os\n'), ((1505, 1528), 'numpy.array', 'np.array', (['[0.7, 0.0, 1]'], {}), '([0.7, 0.0, 1])\n', (1513, 1528), True, 'import numpy as np\n'), ((2955, 2980), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2978, 2980), False, 'import argparse\n'), ((3670, 3716), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""dataset_cp"""'], {}), "(args.home_dir_data, 'dataset_cp')\n", (3682, 3716), False, 'import os\n'), ((3735, 3797), 'os.path.join', 'os.path.join', (['cp_result_folder_dir', '"""labels"""', '"""train_list.txt"""'], {}), "(cp_result_folder_dir, 'labels', 'train_list.txt')\n", (3747, 3797), False, 'import os\n'), ((3813, 3874), 'os.path.join', 'os.path.join', (['cp_result_folder_dir', '"""labels"""', '"""test_list.txt"""'], {}), "(cp_result_folder_dir, 'labels', 'test_list.txt')\n", (3825, 3874), False, 'import os\n'), ((3976, 4047), 'simple_dataset.MyDataset', 'MyDataset', (['args.home_dir_data', 'train_list_dir'], {'use_fcl': '(False)', 'args': 'args'}), '(args.home_dir_data, train_list_dir, use_fcl=False, args=args)\n', (3985, 4047), False, 'from simple_dataset import MyDataset\n'), ((4060, 4130), 'simple_dataset.MyDataset', 'MyDataset', (['args.home_dir_data', 'test_list_dir'], {'use_fcl': '(False)', 'args': 'args'}), '(args.home_dir_data, test_list_dir, use_fcl=False, args=args)\n', (4069, 4130), False, 'from simple_dataset import MyDataset\n'), ((4149, 4172), 'numpy.array', 'np.array', (['[0.7, 0.0, 1]'], {}), '([0.7, 0.0, 1])\n', (4157, 4172), True, 'import numpy as np\n'), ((721, 731), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (728, 731), True, 'import numpy as np\n'), ((733, 742), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (739, 742), True, 'import numpy as np\n'), ((744, 753), 'numpy.min', 'np.min', (['a'], {}), '(a)\n', (750, 753), True, 'import numpy as np\n'), ((755, 764), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (761, 764), True, 'import numpy as np\n'), ((1017, 1058), 'numpy.expand_dims', 'np.expand_dims', (['rotation_center_o'], {'axis': '(1)'}), '(rotation_center_o, axis=1)\n', (1031, 1058), True, 'import numpy as np\n'), ((5607, 5666), 'numpy.load', 'np.load', (["dataset.partial_pc_dir[result_file_name]['object']"], {}), "(dataset.partial_pc_dir[result_file_name]['object'])\n", (5614, 5666), True, 'import numpy as np\n'), ((5685, 5742), 'numpy.load', 'np.load', (["dataset.partial_pc_dir[result_file_name]['hook']"], {}), "(dataset.partial_pc_dir[result_file_name]['hook'])\n", (5692, 5742), True, 'import numpy as np\n'), ((5434, 5463), 'numpy.array', 'np.array', (['one_result_tmp[tmp]'], {}), '(one_result_tmp[tmp])\n', (5442, 5463), True, 'import numpy as np\n')] |
"""Main script for controlling the calculation of the IS spectrum.
Calculate spectra from specified parameters as shown in the examples given in the class
methods, create a new set-up with the `Reproduce` abstract base class in `reproduce.py` or
use one of the pre-defined classes from `reproduce.py`.
"""
# The start method of the multiprocessing module was changed from python3.7 to python3.8
# (macOS). Instead of using 'fork', 'spawn' is the new default. To be able to use global
# variables across all parallel processes, the start method must be reset to 'fork'. See
# https://tinyurl.com/yyxxfxst for more info.
import multiprocessing as mp
mp.set_start_method("fork")
import matplotlib # pylint: disable=C0413
import matplotlib.pyplot as plt # pylint: disable=C0413
import numpy as np # pylint: disable=C0413
import isr_spectrum.inputs.config as cf
from isr_spectrum.plotting import hello_kitty as hk
from isr_spectrum.plotting import reproduce
from isr_spectrum.plotting.plot_class import PlotClass
# Customize matplotlib
matplotlib.rcParams.update(
{
"text.usetex": True,
"font.family": "serif",
"axes.unicode_minus": False,
"pgf.texsystem": "pdflatex",
}
)
class Simulation:
def __init__(self):
self.from_file = False
self.f = np.ndarray([])
self.data = []
self.meta_data = []
self.legend_txt = []
self.ridge_txt = []
self.plot = PlotClass()
# self.r = reproduce.PlotNumerical(self.plot) # $\label{lst:spectra}$
# self.r = reproduce.PlotTestDebye(self.plot)
self.r = reproduce.PlotSpectra(self.plot)
# self.r = reproduce.PlotIonLine(self.plot)
# self.r = reproduce.PlotPlasmaLine(self.plot)
# self.r = reproduce.PlotTemperature(self.plot)
# self.r = reproduce.PlotHKExtremes(self.plot)
def create_data(self):
"""Create IS spectra.
The spectra should be appended to the `self.data` list, giving a
list of spectra that are themselves `np.ndarrays`, or into a list
of such lists as the aforementioned.
A list of spectra can be plotted in `plot_normal`, while a list of
lists can be plotted by `plot_ridge`. When using `plot_ridge`, it is
assumed that all the lists in the outer list is of equal length.
The list `self.ridge_txt` should be the same length as the length
of the outer list when plotting with `plot_ridge`, since this text
will go on the left of every ridge. The list `self.legend_txt` should
be the same length as the length of the inner lists, and will give
the legend for the spectra given in the inner lists.
Notes:
::
Possible items in the sys_set dictionary include:
K_RADAR -- Radar wavenumber
(= -4pi(radar frequency)/(speed of light)) [m^(-1)]
B -- Magnetic field strength [T]
MI -- Ion mass in atomic mass units [u]
NE -- Electron number density [m^(-3)]
NU_E -- Electron collision frequency [Hz]
NU_I -- Ion collision frequency [Hz]
T_E -- Electron temperature [K]
T_I -- Ion temperature [K]
T_ES -- Temperature of suprathermal electrons in the
gauss_shell VDF [K]
THETA -- Aspect angle [1]
Z -- Height of real data [100, 599] [km]
mat_file -- Important when using real data and decides
the time of day
pitch_angle -- list of integers that determine which slices
of the pitch angles are used. 'all' uses all
Examples:
::
```
TEMPS = [2000, 5000]
methods = ['maxwell', 'kappa']
sys_set = {'B': 5e-4, 'MI': 16, 'NE': 2e11, 'NU_E': 0, 'NU_I': 0,
'T_E': 5000, 'T_I': 2000, 'T_ES': 90000,
'THETA': 40 * np.pi / 180, 'Z': 599,
'mat_file': 'fe_zmuE-01.mat'}
params = {'kappa': 3, 'vdf': 'kappa', 'area': False}
for T in TEMPS:
ridge = []
sys_set['T_E'] = T
self.ridge_txt.append(f'$T_e = {T}$ K')
for m in methods:
self.f, s, meta_data = isr.isr_spectrum(m, sys_set, **params)
self.meta_data.append(meta_data)
ridge.append(s)
self.data.append(ridge)
# For a nicer legend, this is added manually
self.legend_txt.append('Maxwellian')
self.legend_txt.append('Kappa')
```
"""
# self.from_file = True
if self.from_file is False:
print(
f"Parallelising integral using the {'numba' if cf.NJIT else 'multiprocessing'} module."
)
self.r.create_it("../figures/temp_ridge.npz", from_file=self.from_file)
self.f = self.r.f
self.data = self.r.data
self.legend_txt = self.r.legend_txt
self.ridge_txt = self.r.ridge_txt
self.meta_data = self.r.meta_data
def plot_data(self):
"""Plot the created data from `self.data`.
If you want to only plot the plasma line, set
```
self.plot.plasma = True
```
`self.plot.plot_normal()` accepts a list of `np.ndarray`s and
`self.plot.plot_ridge()` accepts a list of lists of `np.ndarray`s,
i.e. a list of the type you send to `self.plot.plot_normal()`.
Examples:
::
```
# Given the example in self.create_data()
# self.plot.plasma = True
self.plot.plot_normal(self.f, self.data[0], 'plot',
self.legend_txt)
self.plot.plot_normal(self.f, self.data[0], 'semilogy',
self.legend_txt)
self.plot.plot_ridge(self.f, self.data, 'plot', self.legend_txt,
self.ridge_txt)
self.plot.plot_ridge(self.f, self.data, 'semilogy',
self.legend_txt, self.ridge_txt)
```
"""
self.r.plot_it()
def save_handle(self, mode):
if mode == "setUp":
if self.plot.save in ["y", "yes"]:
self.plot.save_it(
self.f, self.data, self.legend_txt, self.ridge_txt, self.meta_data
)
elif mode == "tearDown":
if self.plot.save in ["y", "yes"]:
self.plot.pdffig.close()
plt.show()
def run(self):
self.create_data()
self.save_handle("setUp")
self.plot_data()
self.save_handle("tearDown")
def main():
Simulation().run()
if False:
hk.HelloKitty(1).run() # $\label{lst:hk}$
if __name__ == "__main__":
main()
| [
"isr_spectrum.plotting.reproduce.PlotSpectra",
"matplotlib.rcParams.update",
"isr_spectrum.plotting.hello_kitty.HelloKitty",
"isr_spectrum.plotting.plot_class.PlotClass",
"numpy.ndarray",
"multiprocessing.set_start_method",
"matplotlib.pyplot.show"
] | [((652, 679), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""fork"""'], {}), "('fork')\n", (671, 679), True, 'import multiprocessing as mp\n'), ((1041, 1176), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'text.usetex': True, 'font.family': 'serif', 'axes.unicode_minus': False,\n 'pgf.texsystem': 'pdflatex'}"], {}), "({'text.usetex': True, 'font.family': 'serif',\n 'axes.unicode_minus': False, 'pgf.texsystem': 'pdflatex'})\n", (1067, 1176), False, 'import matplotlib\n'), ((1310, 1324), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (1320, 1324), True, 'import numpy as np\n'), ((1453, 1464), 'isr_spectrum.plotting.plot_class.PlotClass', 'PlotClass', ([], {}), '()\n', (1462, 1464), False, 'from isr_spectrum.plotting.plot_class import PlotClass\n'), ((1615, 1647), 'isr_spectrum.plotting.reproduce.PlotSpectra', 'reproduce.PlotSpectra', (['self.plot'], {}), '(self.plot)\n', (1636, 1647), False, 'from isr_spectrum.plotting import reproduce\n'), ((6628, 6638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6636, 6638), True, 'import matplotlib.pyplot as plt\n'), ((6841, 6857), 'isr_spectrum.plotting.hello_kitty.HelloKitty', 'hk.HelloKitty', (['(1)'], {}), '(1)\n', (6854, 6857), True, 'from isr_spectrum.plotting import hello_kitty as hk\n')] |
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, IndepVarComp, Group
from openmdao.utils.assert_utils import assert_check_partials
from CADRE.orbit_dymos.ori_comp import ORIComp
class TestOrbitEOM(unittest.TestCase):
@classmethod
def setUpClass(cls):
nn = 4
cls.p = Problem(model=Group())
ivc = cls.p.model.add_subsystem('ivc', IndepVarComp(), promotes_outputs=['*'])
ivc.add_output('r_e2b_I', val=np.ones((nn, 3)))
ivc.add_output('v_e2b_I', val=np.ones((nn, 3)))
# ivc.add_output('hunit_e2b_I', val=np.ones((nn, 3)))
cls.p.model.add_subsystem('ori_comp', ORIComp(num_nodes=nn),
promotes_inputs=['*'], promotes_outputs=['*'])
cls.p.setup(check=True, force_alloc_complex=True)
cls.p['r_e2b_I'] = np.random.rand(nn, 3)*10000
cls.p['v_e2b_I'] = np.random.rand(nn, 3)*10
# cls.p['hunit_e2b_I'] = np.random.rand(nn, 3)
cls.p.run_model()
def test_results(self):
pass
def test_partials(self):
np.set_printoptions(linewidth=1024, edgeitems=1000)
cpd = self.p.check_partials(method='fd')
assert_check_partials(cpd)
| [
"CADRE.orbit_dymos.ori_comp.ORIComp",
"numpy.random.rand",
"numpy.ones",
"openmdao.utils.assert_utils.assert_check_partials",
"openmdao.api.IndepVarComp",
"openmdao.api.Group",
"numpy.set_printoptions"
] | [((1121, 1172), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(1024)', 'edgeitems': '(1000)'}), '(linewidth=1024, edgeitems=1000)\n', (1140, 1172), True, 'import numpy as np\n'), ((1230, 1256), 'openmdao.utils.assert_utils.assert_check_partials', 'assert_check_partials', (['cpd'], {}), '(cpd)\n', (1251, 1256), False, 'from openmdao.utils.assert_utils import assert_check_partials\n'), ((426, 440), 'openmdao.api.IndepVarComp', 'IndepVarComp', ([], {}), '()\n', (438, 440), False, 'from openmdao.api import Problem, IndepVarComp, Group\n'), ((687, 708), 'CADRE.orbit_dymos.ori_comp.ORIComp', 'ORIComp', ([], {'num_nodes': 'nn'}), '(num_nodes=nn)\n', (694, 708), False, 'from CADRE.orbit_dymos.ori_comp import ORIComp\n'), ((878, 899), 'numpy.random.rand', 'np.random.rand', (['nn', '(3)'], {}), '(nn, 3)\n', (892, 899), True, 'import numpy as np\n'), ((933, 954), 'numpy.random.rand', 'np.random.rand', (['nn', '(3)'], {}), '(nn, 3)\n', (947, 954), True, 'import numpy as np\n'), ((369, 376), 'openmdao.api.Group', 'Group', ([], {}), '()\n', (374, 376), False, 'from openmdao.api import Problem, IndepVarComp, Group\n'), ((504, 520), 'numpy.ones', 'np.ones', (['(nn, 3)'], {}), '((nn, 3))\n', (511, 520), True, 'import numpy as np\n'), ((560, 576), 'numpy.ones', 'np.ones', (['(nn, 3)'], {}), '((nn, 3))\n', (567, 576), True, 'import numpy as np\n')] |
# Slightly modified from original Lucid library
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods for displaying images from Numpy arrays."""
from __future__ import absolute_import, division, print_function
# from io import BytesIO
import base64
from string import Template
import numpy as np
import IPython.display
from lucent.misc.io.serialize_array import serialize_array, array_to_jsbuffer
from lucent.misc.io.collapse_channels import collapse_channels
# create logger with module name, e.g. lucid.misc.io.showing
# log = logging.getLogger(__name__)
def _display_html(html_str):
IPython.display.display(IPython.display.HTML(html_str))
def _image_url(array, fmt='png', mode="data", quality=90, domain=None):
"""Create a data URL representing an image from a PIL.Image.
Args:
image: a numpy array
mode: presently only supports "data" for data URL
Returns:
URL representing image
"""
supported_modes = ("data")
if mode not in supported_modes:
message = "Unsupported mode '%s', should be one of '%s'."
raise ValueError(message, mode, supported_modes)
image_data = serialize_array(array, fmt=fmt, quality=quality, domain=domain)
base64_byte_string = base64.b64encode(image_data).decode('ascii')
return "data:image/" + fmt.upper() + ";base64," + base64_byte_string
# public functions
def _image_html(array, width=None, domain=None, fmt='png'):
url = _image_url(array, domain=domain, fmt=fmt)
style = "image-rendering: pixelated;"
if width is not None:
style += "width: {width}px;".format(width=width)
return """<img src="{url}" style="{style}">""".format(url=url, style=style)
def image(array, domain=None, width=None, fmt='png'):
"""Display an image.
Args:
array: NumPy array representing the image
fmt: Image format e.g. png, jpeg
domain: Domain of pixel values, inferred from min & max values if None
width: width of output image, scaled using nearest neighbor interpolation.
size unchanged if None
"""
_display_html(
_image_html(array, width=width, domain=domain, fmt=fmt)
)
def images(arrays, labels=None, domain=None, width=None):
"""Display a list of images with optional labels.
Args:
arrays: A list of NumPy arrays representing images
labels: A list of strings to label each image.
Defaults to show index if None
domain: Domain of pixel values, inferred from min & max values if None
width: width of output image, scaled using nearest neighbor interpolation.
size unchanged if None
"""
string = '<div style="display: flex; flex-direction: row;">'
for i, array in enumerate(arrays):
label = labels[i] if labels is not None else i
img_html = _image_html(array, width=width, domain=domain)
string += """<div style="margin-right:10px; margin-top: 4px;">
{label} <br/>
{img_html}
</div>""".format(label=label, img_html=img_html)
string += "</div>"
_display_html(string)
def show(thing, domain=(0, 1), **kwargs):
"""Display a numpy array without having to specify what it represents.
This module will attempt to infer how to display your tensor based on its
rank, shape and dtype. rank 4 tensors will be displayed as image grids, rank
2 and 3 tensors as images.
For tensors of rank 3 or 4, the innermost dimension is interpreted as channel.
Depending on the size of that dimension, different types of images will be
generated:
shp[-1]
= 1 -- Black and white image.
= 2 -- See >4
= 3 -- RGB image.
= 4 -- RGBA image.
> 4 -- Collapse into an RGB image.
If all positive: each dimension gets an evenly spaced hue.
If pos and neg: each dimension gets two hues
(180 degrees apart) for positive and negative.
Common optional arguments:
domain: range values can be between, for displaying normal images
None = infer domain with heuristics
(a,b) = clip values to be between a (min) and b (max).
w: width of displayed images
None = display 1 pixel per value
int = display n pixels per value (often used for small images)
labels: if displaying multiple objects, label for each object.
None = label with index
[] = no labels
[...] = label with corresponding list item
"""
def collapse_if_needed(arr):
channels = arr.shape[-1]
if channels not in [1, 3, 4]:
# log.debug("Collapsing %s channels into 3 RGB channels." % K)
return collapse_channels(arr)
return arr
if isinstance(thing, np.ndarray):
rank = len(thing.shape)
if rank in [3, 4]:
thing = collapse_if_needed(thing)
if rank == 4:
# log.debug("Show is assuming rank 4 tensor to be a list of images.")
images(thing, domain=domain, **kwargs)
elif rank in (2, 3):
# log.debug("Show is assuming rank 2 or 3 tensor to be an image.")
image(thing, domain=domain, **kwargs)
else:
# log.warning("Show only supports numpy arrays of rank 2-4. Using repr().")
print(repr(thing))
elif isinstance(thing, (list, tuple)):
# log.debug("Show is assuming list or tuple to be a collection of images.")
if isinstance(thing[0], np.ndarray) and len(thing[0].shape) == 3:
thing = [collapse_if_needed(t) for t in thing]
images(thing, domain=domain, **kwargs)
else:
# log.warning("Show only supports numpy arrays so far. Using repr().")
print(repr(thing))
def textured_mesh(mesh, texture, background='0xffffff'):
texture_data_url = _image_url(texture, fmt='jpeg', quality=90)
code = Template('''
<input id="unfoldBox" type="checkbox" class="control">Unfold</input>
<input id="shadeBox" type="checkbox" class="control">Shade</input>
<script src="https://cdn.rawgit.com/mrdoob/three.js/r89/build/three.min.js"></script>
<script src="https://cdn.rawgit.com/mrdoob/three.js/r89/examples/js/controls/OrbitControls.js"></script>
<script type="x-shader/x-vertex" id="vertexShader">
uniform float viewAspect;
uniform float unfolding_perc;
uniform float shadeFlag;
varying vec2 text_coord;
varying float shading;
void main () {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
vec4 plane_position = vec4((uv.x*2.0-1.0)/viewAspect, (uv.y*2.0-1.0), 0, 1);
gl_Position = mix(gl_Position, plane_position, unfolding_perc);
//not normalized on purpose to simulate the rotation
shading = 1.0;
if (shadeFlag > 0.5) {
vec3 light_vector = mix(normalize(cameraPosition-position), normal, unfolding_perc);
shading = dot(normal, light_vector);
}
text_coord = uv;
}
</script>
<script type="x-shader/x-fragment" id="fragmentShader">
uniform float unfolding_perc;
varying vec2 text_coord;
varying float shading;
uniform sampler2D texture;
void main() {
gl_FragColor = texture2D(texture, text_coord);
gl_FragColor.rgb *= shading;
}
</script>
<script>
"use strict";
const el = id => document.getElementById(id);
const unfoldDuration = 1000.0;
var camera, scene, renderer, controls, material;
var unfolded = false;
var unfoldStart = -unfoldDuration*10.0;
init();
animate(0.0);
function init() {
var width = 800, height = 600;
scene = new THREE.Scene();
camera = new THREE.PerspectiveCamera(42, width / height, 0.1, 100);
camera.position.z = 3.3;
scene.add(camera);
controls = new THREE.OrbitControls( camera );
var geometry = new THREE.BufferGeometry();
geometry.addAttribute( 'position', new THREE.BufferAttribute($verts, 3 ) );
geometry.addAttribute( 'uv', new THREE.BufferAttribute($uvs, 2) );
geometry.setIndex(new THREE.BufferAttribute($faces, 1 ));
geometry.computeVertexNormals();
var texture = new THREE.TextureLoader().load('$tex_data_url', update);
material = new THREE.ShaderMaterial( {
uniforms: {
viewAspect: {value: width/height},
unfolding_perc: { value: 0.0 },
shadeFlag: { value: 0.0 },
texture: { type: 't', value: texture },
},
side: THREE.DoubleSide,
vertexShader: el( 'vertexShader' ).textContent,
fragmentShader: el( 'fragmentShader' ).textContent
});
var mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
scene.background = new THREE.Color( $background );
renderer = new THREE.WebGLRenderer({antialias: true});
renderer.setSize(width, height);
document.body.appendChild(renderer.domElement);
// render on change only
controls.addEventListener('change', function() {
// fold mesh back if user wants to interact
el('unfoldBox').checked = false;
update();
});
document.querySelectorAll('.control').forEach(e=>{
e.addEventListener('change', update);
});
}
function update() {
requestAnimationFrame(animate);
}
function ease(x) {
x = Math.min(Math.max(x, 0.0), 1.0);
return x*x*(3.0 - 2.0*x);
}
function animate(time) {
var unfoldFlag = el('unfoldBox').checked;
if (unfolded != unfoldFlag) {
unfolded = unfoldFlag;
unfoldStart = time - Math.max(unfoldStart+unfoldDuration-time, 0.0);
}
var unfoldTime = (time-unfoldStart) / unfoldDuration;
if (unfoldTime < 1.0) {
update();
}
var unfoldVal = ease(unfoldTime);
unfoldVal = unfolded ? unfoldVal : 1.0 - unfoldVal;
material.uniforms.unfolding_perc.value = unfoldVal;
material.uniforms.shadeFlag.value = el('shadeBox').checked ? 1.0 : 0.0;
controls.update();
renderer.render(scene, camera);
}
</script>
''').substitute(
verts=array_to_jsbuffer(mesh['position'].ravel()),
uvs=array_to_jsbuffer(mesh['uv'].ravel()),
faces=array_to_jsbuffer(np.uint32(mesh['face'].ravel())),
tex_data_url=texture_data_url,
background=background,
)
_display_html(code)
def animate_sequence(sequence, domain=(0, 1), fmt='png'):
steps, height, width, _ = sequence.shape
sequence = np.concatenate(sequence, 1)
code = Template('''
<style>
#animation {
width: ${width}px;
height: ${height}px;
background: url('$image_url') left center;
animation: play 1s steps($steps) infinite alternate;
}
@keyframes play {
100% { background-position: -${sequence_width}px; }
}
</style><div id='animation'></div>
''').substitute(
image_url=_image_url(sequence, domain=domain, fmt=fmt),
sequence_width=width*steps,
width=width,
height=height,
steps=steps,
)
_display_html(code)
| [
"string.Template",
"base64.b64encode",
"lucent.misc.io.collapse_channels.collapse_channels",
"numpy.concatenate",
"lucent.misc.io.serialize_array.serialize_array"
] | [((1795, 1858), 'lucent.misc.io.serialize_array.serialize_array', 'serialize_array', (['array'], {'fmt': 'fmt', 'quality': 'quality', 'domain': 'domain'}), '(array, fmt=fmt, quality=quality, domain=domain)\n', (1810, 1858), False, 'from lucent.misc.io.serialize_array import serialize_array, array_to_jsbuffer\n'), ((11642, 11669), 'numpy.concatenate', 'np.concatenate', (['sequence', '(1)'], {}), '(sequence, 1)\n', (11656, 11669), True, 'import numpy as np\n'), ((1884, 1912), 'base64.b64encode', 'base64.b64encode', (['image_data'], {}), '(image_data)\n', (1900, 1912), False, 'import base64\n'), ((5527, 5549), 'lucent.misc.io.collapse_channels.collapse_channels', 'collapse_channels', (['arr'], {}), '(arr)\n', (5544, 5549), False, 'from lucent.misc.io.collapse_channels import collapse_channels\n'), ((6724, 11243), 'string.Template', 'Template', (['"""\n <input id="unfoldBox" type="checkbox" class="control">Unfold</input>\n <input id="shadeBox" type="checkbox" class="control">Shade</input>\n\n <script src="https://cdn.rawgit.com/mrdoob/three.js/r89/build/three.min.js"></script>\n <script src="https://cdn.rawgit.com/mrdoob/three.js/r89/examples/js/controls/OrbitControls.js"></script>\n\n <script type="x-shader/x-vertex" id="vertexShader">\n uniform float viewAspect;\n uniform float unfolding_perc;\n uniform float shadeFlag;\n varying vec2 text_coord;\n varying float shading;\n void main () {\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n vec4 plane_position = vec4((uv.x*2.0-1.0)/viewAspect, (uv.y*2.0-1.0), 0, 1);\n gl_Position = mix(gl_Position, plane_position, unfolding_perc);\n\n //not normalized on purpose to simulate the rotation\n shading = 1.0;\n if (shadeFlag > 0.5) {\n vec3 light_vector = mix(normalize(cameraPosition-position), normal, unfolding_perc);\n shading = dot(normal, light_vector);\n }\n\n text_coord = uv;\n }\n </script>\n\n <script type="x-shader/x-fragment" id="fragmentShader">\n uniform float unfolding_perc;\n varying vec2 text_coord;\n varying float shading;\n uniform sampler2D texture;\n\n void main() {\n gl_FragColor = texture2D(texture, text_coord);\n gl_FragColor.rgb *= shading;\n }\n </script>\n\n <script>\n "use strict";\n\n const el = id => document.getElementById(id);\n\n const unfoldDuration = 1000.0;\n var camera, scene, renderer, controls, material;\n var unfolded = false;\n var unfoldStart = -unfoldDuration*10.0;\n\n init();\n animate(0.0);\n\n function init() {\n var width = 800, height = 600;\n\n scene = new THREE.Scene();\n\n camera = new THREE.PerspectiveCamera(42, width / height, 0.1, 100);\n camera.position.z = 3.3;\n scene.add(camera);\n\n controls = new THREE.OrbitControls( camera );\n\n var geometry = new THREE.BufferGeometry();\n geometry.addAttribute( \'position\', new THREE.BufferAttribute($verts, 3 ) );\n geometry.addAttribute( \'uv\', new THREE.BufferAttribute($uvs, 2) );\n geometry.setIndex(new THREE.BufferAttribute($faces, 1 ));\n geometry.computeVertexNormals();\n\n var texture = new THREE.TextureLoader().load(\'$tex_data_url\', update);\n material = new THREE.ShaderMaterial( {\n uniforms: {\n viewAspect: {value: width/height},\n unfolding_perc: { value: 0.0 },\n shadeFlag: { value: 0.0 },\n texture: { type: \'t\', value: texture },\n },\n side: THREE.DoubleSide,\n vertexShader: el( \'vertexShader\' ).textContent,\n fragmentShader: el( \'fragmentShader\' ).textContent\n });\n\n var mesh = new THREE.Mesh(geometry, material);\n scene.add(mesh);\n scene.background = new THREE.Color( $background );\n\n renderer = new THREE.WebGLRenderer({antialias: true});\n renderer.setSize(width, height);\n document.body.appendChild(renderer.domElement);\n\n // render on change only\n controls.addEventListener(\'change\', function() {\n // fold mesh back if user wants to interact\n el(\'unfoldBox\').checked = false;\n update();\n });\n document.querySelectorAll(\'.control\').forEach(e=>{\n e.addEventListener(\'change\', update);\n });\n }\n\n function update() {\n requestAnimationFrame(animate);\n }\n\n function ease(x) {\n x = Math.min(Math.max(x, 0.0), 1.0);\n return x*x*(3.0 - 2.0*x);\n }\n\n function animate(time) {\n var unfoldFlag = el(\'unfoldBox\').checked;\n if (unfolded != unfoldFlag) {\n unfolded = unfoldFlag;\n unfoldStart = time - Math.max(unfoldStart+unfoldDuration-time, 0.0);\n }\n var unfoldTime = (time-unfoldStart) / unfoldDuration;\n if (unfoldTime < 1.0) {\n update();\n }\n var unfoldVal = ease(unfoldTime);\n unfoldVal = unfolded ? unfoldVal : 1.0 - unfoldVal;\n material.uniforms.unfolding_perc.value = unfoldVal;\n\n material.uniforms.shadeFlag.value = el(\'shadeBox\').checked ? 1.0 : 0.0;\n controls.update();\n renderer.render(scene, camera);\n }\n </script>\n """'], {}), '(\n """\n <input id="unfoldBox" type="checkbox" class="control">Unfold</input>\n <input id="shadeBox" type="checkbox" class="control">Shade</input>\n\n <script src="https://cdn.rawgit.com/mrdoob/three.js/r89/build/three.min.js"></script>\n <script src="https://cdn.rawgit.com/mrdoob/three.js/r89/examples/js/controls/OrbitControls.js"></script>\n\n <script type="x-shader/x-vertex" id="vertexShader">\n uniform float viewAspect;\n uniform float unfolding_perc;\n uniform float shadeFlag;\n varying vec2 text_coord;\n varying float shading;\n void main () {\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n vec4 plane_position = vec4((uv.x*2.0-1.0)/viewAspect, (uv.y*2.0-1.0), 0, 1);\n gl_Position = mix(gl_Position, plane_position, unfolding_perc);\n\n //not normalized on purpose to simulate the rotation\n shading = 1.0;\n if (shadeFlag > 0.5) {\n vec3 light_vector = mix(normalize(cameraPosition-position), normal, unfolding_perc);\n shading = dot(normal, light_vector);\n }\n\n text_coord = uv;\n }\n </script>\n\n <script type="x-shader/x-fragment" id="fragmentShader">\n uniform float unfolding_perc;\n varying vec2 text_coord;\n varying float shading;\n uniform sampler2D texture;\n\n void main() {\n gl_FragColor = texture2D(texture, text_coord);\n gl_FragColor.rgb *= shading;\n }\n </script>\n\n <script>\n "use strict";\n\n const el = id => document.getElementById(id);\n\n const unfoldDuration = 1000.0;\n var camera, scene, renderer, controls, material;\n var unfolded = false;\n var unfoldStart = -unfoldDuration*10.0;\n\n init();\n animate(0.0);\n\n function init() {\n var width = 800, height = 600;\n\n scene = new THREE.Scene();\n\n camera = new THREE.PerspectiveCamera(42, width / height, 0.1, 100);\n camera.position.z = 3.3;\n scene.add(camera);\n\n controls = new THREE.OrbitControls( camera );\n\n var geometry = new THREE.BufferGeometry();\n geometry.addAttribute( \'position\', new THREE.BufferAttribute($verts, 3 ) );\n geometry.addAttribute( \'uv\', new THREE.BufferAttribute($uvs, 2) );\n geometry.setIndex(new THREE.BufferAttribute($faces, 1 ));\n geometry.computeVertexNormals();\n\n var texture = new THREE.TextureLoader().load(\'$tex_data_url\', update);\n material = new THREE.ShaderMaterial( {\n uniforms: {\n viewAspect: {value: width/height},\n unfolding_perc: { value: 0.0 },\n shadeFlag: { value: 0.0 },\n texture: { type: \'t\', value: texture },\n },\n side: THREE.DoubleSide,\n vertexShader: el( \'vertexShader\' ).textContent,\n fragmentShader: el( \'fragmentShader\' ).textContent\n });\n\n var mesh = new THREE.Mesh(geometry, material);\n scene.add(mesh);\n scene.background = new THREE.Color( $background );\n\n renderer = new THREE.WebGLRenderer({antialias: true});\n renderer.setSize(width, height);\n document.body.appendChild(renderer.domElement);\n\n // render on change only\n controls.addEventListener(\'change\', function() {\n // fold mesh back if user wants to interact\n el(\'unfoldBox\').checked = false;\n update();\n });\n document.querySelectorAll(\'.control\').forEach(e=>{\n e.addEventListener(\'change\', update);\n });\n }\n\n function update() {\n requestAnimationFrame(animate);\n }\n\n function ease(x) {\n x = Math.min(Math.max(x, 0.0), 1.0);\n return x*x*(3.0 - 2.0*x);\n }\n\n function animate(time) {\n var unfoldFlag = el(\'unfoldBox\').checked;\n if (unfolded != unfoldFlag) {\n unfolded = unfoldFlag;\n unfoldStart = time - Math.max(unfoldStart+unfoldDuration-time, 0.0);\n }\n var unfoldTime = (time-unfoldStart) / unfoldDuration;\n if (unfoldTime < 1.0) {\n update();\n }\n var unfoldVal = ease(unfoldTime);\n unfoldVal = unfolded ? unfoldVal : 1.0 - unfoldVal;\n material.uniforms.unfolding_perc.value = unfoldVal;\n\n material.uniforms.shadeFlag.value = el(\'shadeBox\').checked ? 1.0 : 0.0;\n controls.update();\n renderer.render(scene, camera);\n }\n </script>\n """\n )\n', (6732, 11243), False, 'from string import Template\n'), ((11681, 12079), 'string.Template', 'Template', (['"""\n <style> \n #animation {\n width: ${width}px;\n height: ${height}px;\n background: url(\'$image_url\') left center;\n animation: play 1s steps($steps) infinite alternate;\n }\n @keyframes play {\n 100% { background-position: -${sequence_width}px; }\n }\n </style><div id=\'animation\'></div>\n """'], {}), '(\n """\n <style> \n #animation {\n width: ${width}px;\n height: ${height}px;\n background: url(\'$image_url\') left center;\n animation: play 1s steps($steps) infinite alternate;\n }\n @keyframes play {\n 100% { background-position: -${sequence_width}px; }\n }\n </style><div id=\'animation\'></div>\n """\n )\n', (11689, 12079), False, 'from string import Template\n')] |
from keras.utils import Sequence
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import json
from multiprocessing import Pool
class DataGenerator(Sequence):
def __init__(self, filepaths: str, encoded_labels: dict, max_length: int, batch_size: int=32, shuffle: bool=True, mp: bool=True):
self.filepaths = np.array(filepaths)
print(self.filepaths)
self.encoded_labels = encoded_labels
self.max_length = max_length
self.batch_size = batch_size
self.shuffle = shuffle
self.mp = mp
self.on_epoch_end()
def __len__(self) -> int:
return int(np.floor(len(self.filepaths) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.filepaths))
if self.shuffle is True:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size:self.batch_size * (index + 1)]
file_paths_temp = [self.filepaths[k] for k in indexes]
X, y = self.__data_generation(file_paths_temp)
return X, y
def _process_data(self, filepath):
with open(filepath, "r") as infile:
data = json.load(infile)
seq = np.array(data["ngrammed_sequence"])
con = self.encoded_labels[data["COG"][0]]
return seq, con
def __data_generation(self, file_paths_temp):
if self.mp:
p = Pool(None)
d = p.map(self._process_data, file_paths_temp)
else:
d = [self._process_data(x) for x in file_paths_temp]
X, y = [x[0] for x in d], [x[1] for x in d]
X = pad_sequences(np.array(X), self.max_length)
return X, np.array(y)
| [
"json.load",
"numpy.array",
"multiprocessing.Pool",
"numpy.random.shuffle"
] | [((345, 364), 'numpy.array', 'np.array', (['filepaths'], {}), '(filepaths)\n', (353, 364), True, 'import numpy as np\n'), ((824, 855), 'numpy.random.shuffle', 'np.random.shuffle', (['self.indexes'], {}), '(self.indexes)\n', (841, 855), True, 'import numpy as np\n'), ((1224, 1241), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (1233, 1241), False, 'import json\n'), ((1260, 1295), 'numpy.array', 'np.array', (["data['ngrammed_sequence']"], {}), "(data['ngrammed_sequence'])\n", (1268, 1295), True, 'import numpy as np\n'), ((1463, 1473), 'multiprocessing.Pool', 'Pool', (['None'], {}), '(None)\n', (1467, 1473), False, 'from multiprocessing import Pool\n'), ((1709, 1720), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1717, 1720), True, 'import numpy as np\n'), ((1758, 1769), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1766, 1769), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import scipy.stats as stats
import itertools
import matplotlib
from matplotlib import cm
from matplotlib.ticker import FuncFormatter
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import sklearn as sk
import sklearn.linear_model
from volcanic.helpers import bround
from volcanic.tof import calc_tof, calc_es, calc_s_es
from volcanic.exceptions import MissingDataError
def get_reg_targets(idx1, idx2, d, tags, coeff, regress, mode="k"):
"""Separate regression targets and regressor variables."""
tag1 = tags[idx1]
tag2 = tags[idx2]
tags = tags[regress]
X1 = d[:, idx1].reshape(-1)
X2 = d[:, idx2].reshape(-1)
d1 = d[:, regress]
d2 = d[:, ~regress]
coeff = coeff[regress]
if mode == "t":
d1 = d1[:, ~coeff]
tags = tags[~coeff]
return X1, X2, tag1, tag2, tags, d1, d2, coeff
def plot_ci_manual(t, s_err, n, x, x2, y2, ax=None):
if ax is None:
ax = plt.gca()
ci = (
t
* s_err
* np.sqrt(1 / n + (x2 - np.mean(x)) ** 2 / np.sum((x - np.mean(x)) ** 2))
)
ax.fill_between(x2, y2 + ci, y2 - ci, color="#b9cfe7", alpha=0.6)
return ax
def plot_3d_lsfer(
idx1,
idx2,
d,
tags,
coeff,
regress,
cb="white",
ms="o",
lmargin=5,
rmargin=5,
npoints=100,
plotmode=1,
verb=0,
):
x1base = 20
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
d_refill = np.zeros_like(d)
d_refill[~np.isnan(d)] = d[~np.isnan(d)]
lnsteps = range(d.shape[1])
mape = 100
for j in lnsteps[1:-1]:
if verb > 0:
print(f"Plotting regression of {tags[j]}.")
XY = np.vstack([X1, X2, d[:, j]]).T
if isinstance(cb, np.ndarray):
cbi = np.array(cb)[~np.isnan(XY).any(axis=1)]
else:
cbi = cb
if isinstance(ms, np.ndarray):
msi = np.array(ms)[~np.isnan(XY).any(axis=1)]
else:
msi = ms
XYm = XY[np.isnan(XY).any(axis=1)]
XY = XY[~np.isnan(XY).any(axis=1)]
Xm = XYm[:, :2]
Ym = XYm[:, 2]
X = XY[:, :2]
Y = XY[:, 2]
xmax = bround(Y.max() + rmargin, x1base)
xmin = bround(Y.min() - lmargin, x1base)
xint = np.sort(Y)
reg = sk.linear_model.LinearRegression().fit(X, Y)
if verb > 2:
print(
f"Linear model has coefficients : {reg.coef_} \n and intercept {reg.intercept_}"
)
Y_pred = reg.predict(X)
p = reg.coef_
currmape = sk.metrics.mean_absolute_percentage_error(Y, Y_pred)
for k, y in enumerate(Ym):
if not np.isnan(Xm[k, 0]) and not np.isnan(Xm[k, 1]) and np.isnan(Ym[k]):
Ym[k] = reg.predict(Xm[k])
d_refill[np.isnan(d).any(axis=1)][:, j][k] = Ym[k]
elif not np.isnan(Ym[k]) and not np.isnan(Xm[k, 0]):
if currmape < mape:
Xm[k, 1] = (
Ym[k] - reg.intercept_ - reg.coeff_[0] * X[k][0]
) / reg.coeff_[1]
d_refill[np.isnan(d).any(axis=1)][:, idx2][k] = Xm[k, 1]
mape = currmape
elif not np.isnan(Ym[k]) and not np.isnan(Xm[k, 1]):
if currmape < mape:
Xm[k, 0] = (
Ym[k] - reg.intercept_ - reg.coeff_[1] * X[k][1]
) / reg.coeff_[0]
d_refill[np.isnan(d).any(axis=1)][:, idx1][k] = Xm[k, 0]
mape = currmape
else:
raise MissingDataError(
"Both descriptor and regression target are undefined. This should have been fixed before this point. Exiting."
)
n = Y.size
m = p.size
dof = n - m
t = stats.t.ppf(0.95, dof)
resid = Y - Y_pred
chi2 = np.sum((resid / Y_pred) ** 2)
s_err = np.sqrt(np.sum(resid ** 2) / dof)
fig, ax = plt.subplots(
frameon=False, figsize=[3, 3], dpi=300, constrained_layout=True
)
yint = np.sort(Y_pred)
plot_ci_manual(t, s_err, n, X, xint, yint, ax=ax)
pi = (
t
* s_err
* np.sqrt(
1 + 1 / n + (xint - np.mean(X)) ** 2 / np.sum((X - np.mean(X)) ** 2)
)
)
ax.plot(xint, yint, "-", linewidth=1, color="#000a75", alpha=0.85)
for i in range(len(X)):
ax.scatter(
Y_pred[i],
Y[i],
s=12.5,
c=cbi[i],
marker=msi[i],
linewidths=0.15,
edgecolors="black",
)
# Border
ax.spines["top"].set_color("black")
ax.spines["bottom"].set_color("black")
ax.spines["left"].set_color("black")
ax.spines["right"].set_color("black")
ax.get_xaxis().set_tick_params(direction="out")
ax.get_yaxis().set_tick_params(direction="out")
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
# Labels and key
plt.xlabel(f"Function of {tag1} and {tag2}")
plt.ylabel(f"{tags[j]} [kcal/mol]")
plt.xlim(xmin, xmax)
plt.savefig(f"{tags[j]}.png")
return np.hstack((d_refill, d2))
def plot_3d_t_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="t"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
ridmax = np.zeros_like(gridj, dtype=int)
ridmin = np.zeros_like(gridj, dtype=int)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids][:-1]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l], ridmax[k, l], ridmin[k, l], diff = calc_s_es(
profile, dgr, esp=True
)
rid = np.hstack([ridmin, ridmax])
if verb > 0:
pass
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
dgr = d[i][-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = "-ΔG(pds) [kcal/mol]"
filename = f"t_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"t_volcano_{tag1}_{tag2}.csv"
print(f"Saving volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, -\D_pds",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def plot_3d_k_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
ridmax = np.zeros_like(gridj, dtype=int)
ridmin = np.zeros_like(gridj, dtype=int)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids][:-1]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l], ridmax[k, l], ridmin[k, l], diff = calc_s_es(
profile, dgr, esp=True
)
rid = np.hstack([ridmin, ridmax])
if verb > 0:
pass
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = "-ΔG(kds) [kcal/mol]"
filename = f"k_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"k_volcano_{tag1}_{tag2}.csv"
print(f"Saving volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, -\D_kds",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def plot_3d_es_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
ridmax = np.zeros_like(gridj, dtype=int)
ridmin = np.zeros_like(gridj, dtype=int)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids][:-1]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l], ridmax[k, l], ridmin[k, l], diff = calc_es(
profile, dgr, esp=True
)
rid = np.hstack([ridmin, ridmax])
if verb > 0:
pass
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = r"-δ$E$ [kcal/mol]"
filename = f"es_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"es_volcano_{tag1}_{tag2}.csv"
print(f"Saving volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, -\d_Ges",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def plot_3d_tof_volcano(
idx1,
idx2,
d,
tags,
coeff,
regress,
dgr,
T=298.15,
cb="white",
ms="o",
lmargin=15,
rmargin=15,
npoints=200,
plotmode=1,
verb=0,
):
x1base = 25
x2base = 20
X1, X2, tag1, tag2, tags, d, d2, coeff = get_reg_targets(
idx1, idx2, d, tags, coeff, regress, mode="k"
)
lnsteps = range(d.shape[1])
x1max = bround(X1.max() + rmargin, x1base)
x1min = bround(X1.min() - lmargin, x1base)
x2max = bround(X2.max() + rmargin, x2base)
x2min = bround(X2.min() - lmargin, x2base)
if verb > 1:
print(
f"Range of descriptors set to [ {x1min} , {x1max} ] and [ {x2min} , {x2max} ]"
)
xint = np.linspace(x1min, x1max, npoints)
yint = np.linspace(x2min, x2max, npoints)
grids = []
for i, j in enumerate(lnsteps):
XY = np.vstack([X1, X2, d[:, j]]).T
X = XY[:, :2]
Y = XY[:, 2]
reg = sk.linear_model.LinearRegression().fit(X, Y)
Y_pred = reg.predict(X)
gridj = np.zeros((npoints, npoints))
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
x1x2 = np.vstack([x1, x2]).reshape(1, -1)
gridj[k, l] = reg.predict(x1x2)
grids.append(gridj)
grid = np.zeros_like(gridj)
rb = np.zeros_like(gridj, dtype=int)
for k, x1 in enumerate(xint):
for l, x2 in enumerate(yint):
profile = [gridj[k, l] for gridj in grids]
dgr = [gridj[k, l] for gridj in grids][-1]
grid[k, l] = np.log10(calc_tof(profile, dgr, T, coeff, exact=True)[0])
ymin = grid.min()
ymax = grid.max()
px = np.zeros_like(d[:, 0])
py = np.zeros_like(d[:, 0])
for i in range(d.shape[0]):
profile = d[i, :-1]
px[i] = X1[i]
py[i] = X2[i]
x1label = f"{tag1} [kcal/mol]"
x2label = f"{tag2} [kcal/mol]"
ylabel = "log(TOF) [1/s]"
filename = f"tof_volcano_{tag1}_{tag2}.png"
if verb > 0:
csvname = f"tof_volcano_{tag1}_{tag2}.csv"
print(f"Saving TOF volcano data to file {csvname}")
x = np.zeros_like(grid.reshape(-1))
y = np.zeros_like(grid.reshape(-1))
for i, xy in enumerate(itertools.product(xint, yint)):
x[i] = xy[0]
y[i] = xy[1]
zdata = list(zip(x, y, grid.reshape(-1)))
np.savetxt(
csvname,
zdata,
fmt="%.4e",
delimiter=",",
header="Descriptor 1, Descriptor 2, log10(TOF)",
)
if plotmode == 2:
plot_3d_contour(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
else:
plot_3d_scatter(
xint,
yint,
grid.T,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label=x1label,
x2label=x2label,
ylabel=ylabel,
filename=filename,
cb=cb,
ms=ms,
plotmode=plotmode,
)
return xint, yint, grid, px, py
def beautify_ax(ax):
# Border
ax.spines["top"].set_color("black")
ax.spines["bottom"].set_color("black")
ax.spines["left"].set_color("black")
ax.spines["right"].set_color("black")
ax.get_xaxis().set_tick_params(direction="out")
ax.get_yaxis().set_tick_params(direction="out")
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
return ax
def plot_3d_contour(
xint,
yint,
grid,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label="X1-axis",
x2label="X2-axis",
ylabel="Y-axis",
filename="plot.png",
cb="white",
ms="o",
plotmode=2,
):
fig, ax = plt.subplots(
frameon=False, figsize=[4.2, 3], dpi=300, constrained_layout=True
)
grid = np.clip(grid, ymin, ymax)
norm = cm.colors.Normalize(vmax=ymax, vmin=ymin)
levels = np.arange(ymin - 5, ymax + 5, 2.5)
ax = beautify_ax(ax)
cset = ax.contourf(
xint,
yint,
grid,
levels=levels,
norm=norm,
cmap=cm.get_cmap("jet", len(levels)),
)
# Labels and key
plt.xlabel(x1label)
plt.ylabel(x2label)
plt.xlim(x1min, x1max)
plt.ylim(x2max, x2min)
plt.xticks(np.arange(x1min, x1max + 0.1, x1base))
plt.yticks(np.arange(x2min, x2max + 0.1, x2base))
ax.contour(xint, yint, grid, cset.levels, colors="black", linewidths=0.3)
fmt = lambda x, pos: "%.0f" % x
cbar = fig.colorbar(cset, format=FuncFormatter(fmt))
cbar.set_label(ylabel, labelpad=15, rotation=270)
for i in range(len(px)):
ax.scatter(
px[i],
py[i],
s=12.5,
c=cb[i],
marker=ms[i],
linewidths=0.15,
edgecolors="black",
)
plt.savefig(filename)
def plot_3d_scatter(
xint,
yint,
grid,
px,
py,
ymin,
ymax,
x1min,
x1max,
x2min,
x2max,
x1base,
x2base,
x1label="X1-axis",
x2label="X2-axis",
ylabel="Y-axis",
filename="plot.png",
cb="white",
ms="o",
plotmode=0,
):
fig, ax = plt.subplots(
frameon=False, figsize=[4.2, 3], dpi=300, constrained_layout=True
)
grid = np.clip(grid, ymin, ymax)
norm = cm.colors.Normalize(vmax=ymax, vmin=ymin)
ax = beautify_ax(ax)
cset = ax.imshow(
grid,
interpolation="antialiased",
extent=[x1min, x1max, x2min, x2max],
origin="lower",
cmap=cm.jet,
aspect="auto",
)
# Labels and key
plt.xlabel(x1label)
plt.ylabel(x2label)
plt.xlim(x1min, x1max)
plt.ylim(x2max, x2min)
plt.xticks(np.arange(x1min, x1max + 0.1, x1base))
plt.yticks(np.arange(x2min, x2max + 0.1, x2base))
fmt = lambda x, pos: "%.0f" % x
cbar = fig.colorbar(cset, format=FuncFormatter(fmt))
cbar.set_label(ylabel, labelpad=15, rotation=270)
if plotmode == 1:
for i in range(len(px)):
ax.scatter(
px[i],
py[i],
s=12.5,
c=cb[i],
marker=ms[i],
linewidths=0.15,
edgecolors="black",
)
plt.savefig(filename)
| [
"numpy.clip",
"matplotlib.cm.colors.Normalize",
"volcanic.tof.calc_tof",
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.arange",
"numpy.mean",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.xlabel",
"numpy.sort",
"itertools.product",
"numpy.linspace",
"numpy.vstac... | [((176, 197), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (190, 197), False, 'import matplotlib\n'), ((1549, 1565), 'numpy.zeros_like', 'np.zeros_like', (['d'], {}), '(d)\n', (1562, 1565), True, 'import numpy as np\n'), ((5379, 5404), 'numpy.hstack', 'np.hstack', (['(d_refill, d2)'], {}), '((d_refill, d2))\n', (5388, 5404), True, 'import numpy as np\n'), ((6126, 6160), 'numpy.linspace', 'np.linspace', (['x1min', 'x1max', 'npoints'], {}), '(x1min, x1max, npoints)\n', (6137, 6160), True, 'import numpy as np\n'), ((6172, 6206), 'numpy.linspace', 'np.linspace', (['x2min', 'x2max', 'npoints'], {}), '(x2min, x2max, npoints)\n', (6183, 6206), True, 'import numpy as np\n'), ((6706, 6726), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {}), '(gridj)\n', (6719, 6726), True, 'import numpy as np\n'), ((6740, 6771), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (6753, 6771), True, 'import numpy as np\n'), ((6785, 6816), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (6798, 6816), True, 'import numpy as np\n'), ((6826, 6857), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (6839, 6857), True, 'import numpy as np\n'), ((7178, 7205), 'numpy.hstack', 'np.hstack', (['[ridmin, ridmax]'], {}), '([ridmin, ridmax])\n', (7187, 7205), True, 'import numpy as np\n'), ((7289, 7311), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (7302, 7311), True, 'import numpy as np\n'), ((7321, 7343), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (7334, 7343), True, 'import numpy as np\n'), ((9883, 9917), 'numpy.linspace', 'np.linspace', (['x1min', 'x1max', 'npoints'], {}), '(x1min, x1max, npoints)\n', (9894, 9917), True, 'import numpy as np\n'), ((9929, 9963), 'numpy.linspace', 'np.linspace', (['x2min', 'x2max', 'npoints'], {}), '(x2min, x2max, npoints)\n', (9940, 9963), True, 'import numpy as np\n'), ((10463, 10483), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {}), '(gridj)\n', (10476, 10483), True, 'import numpy as np\n'), ((10497, 10528), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (10510, 10528), True, 'import numpy as np\n'), ((10542, 10573), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (10555, 10573), True, 'import numpy as np\n'), ((10583, 10614), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (10596, 10614), True, 'import numpy as np\n'), ((10935, 10962), 'numpy.hstack', 'np.hstack', (['[ridmin, ridmax]'], {}), '([ridmin, ridmax])\n', (10944, 10962), True, 'import numpy as np\n'), ((11046, 11068), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (11059, 11068), True, 'import numpy as np\n'), ((11078, 11100), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (11091, 11100), True, 'import numpy as np\n'), ((13618, 13652), 'numpy.linspace', 'np.linspace', (['x1min', 'x1max', 'npoints'], {}), '(x1min, x1max, npoints)\n', (13629, 13652), True, 'import numpy as np\n'), ((13664, 13698), 'numpy.linspace', 'np.linspace', (['x2min', 'x2max', 'npoints'], {}), '(x2min, x2max, npoints)\n', (13675, 13698), True, 'import numpy as np\n'), ((14198, 14218), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {}), '(gridj)\n', (14211, 14218), True, 'import numpy as np\n'), ((14232, 14263), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (14245, 14263), True, 'import numpy as np\n'), ((14277, 14308), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (14290, 14308), True, 'import numpy as np\n'), ((14318, 14349), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (14331, 14349), True, 'import numpy as np\n'), ((14668, 14695), 'numpy.hstack', 'np.hstack', (['[ridmin, ridmax]'], {}), '([ridmin, ridmax])\n', (14677, 14695), True, 'import numpy as np\n'), ((14779, 14801), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (14792, 14801), True, 'import numpy as np\n'), ((14811, 14833), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (14824, 14833), True, 'import numpy as np\n'), ((17366, 17400), 'numpy.linspace', 'np.linspace', (['x1min', 'x1max', 'npoints'], {}), '(x1min, x1max, npoints)\n', (17377, 17400), True, 'import numpy as np\n'), ((17412, 17446), 'numpy.linspace', 'np.linspace', (['x2min', 'x2max', 'npoints'], {}), '(x2min, x2max, npoints)\n', (17423, 17446), True, 'import numpy as np\n'), ((17946, 17966), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {}), '(gridj)\n', (17959, 17966), True, 'import numpy as np\n'), ((17976, 18007), 'numpy.zeros_like', 'np.zeros_like', (['gridj'], {'dtype': 'int'}), '(gridj, dtype=int)\n', (17989, 18007), True, 'import numpy as np\n'), ((18326, 18348), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (18339, 18348), True, 'import numpy as np\n'), ((18358, 18380), 'numpy.zeros_like', 'np.zeros_like', (['d[:, 0]'], {}), '(d[:, 0])\n', (18371, 18380), True, 'import numpy as np\n'), ((20864, 20943), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'frameon': '(False)', 'figsize': '[4.2, 3]', 'dpi': '(300)', 'constrained_layout': '(True)'}), '(frameon=False, figsize=[4.2, 3], dpi=300, constrained_layout=True)\n', (20876, 20943), True, 'import matplotlib.pyplot as plt\n'), ((20969, 20994), 'numpy.clip', 'np.clip', (['grid', 'ymin', 'ymax'], {}), '(grid, ymin, ymax)\n', (20976, 20994), True, 'import numpy as np\n'), ((21006, 21047), 'matplotlib.cm.colors.Normalize', 'cm.colors.Normalize', ([], {'vmax': 'ymax', 'vmin': 'ymin'}), '(vmax=ymax, vmin=ymin)\n', (21025, 21047), False, 'from matplotlib import cm\n'), ((21061, 21095), 'numpy.arange', 'np.arange', (['(ymin - 5)', '(ymax + 5)', '(2.5)'], {}), '(ymin - 5, ymax + 5, 2.5)\n', (21070, 21095), True, 'import numpy as np\n'), ((21306, 21325), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x1label'], {}), '(x1label)\n', (21316, 21325), True, 'import matplotlib.pyplot as plt\n'), ((21330, 21349), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['x2label'], {}), '(x2label)\n', (21340, 21349), True, 'import matplotlib.pyplot as plt\n'), ((21354, 21376), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x1min', 'x1max'], {}), '(x1min, x1max)\n', (21362, 21376), True, 'import matplotlib.pyplot as plt\n'), ((21381, 21403), 'matplotlib.pyplot.ylim', 'plt.ylim', (['x2max', 'x2min'], {}), '(x2max, x2min)\n', (21389, 21403), True, 'import matplotlib.pyplot as plt\n'), ((21966, 21987), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (21977, 21987), True, 'import matplotlib.pyplot as plt\n'), ((22298, 22377), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'frameon': '(False)', 'figsize': '[4.2, 3]', 'dpi': '(300)', 'constrained_layout': '(True)'}), '(frameon=False, figsize=[4.2, 3], dpi=300, constrained_layout=True)\n', (22310, 22377), True, 'import matplotlib.pyplot as plt\n'), ((22403, 22428), 'numpy.clip', 'np.clip', (['grid', 'ymin', 'ymax'], {}), '(grid, ymin, ymax)\n', (22410, 22428), True, 'import numpy as np\n'), ((22440, 22481), 'matplotlib.cm.colors.Normalize', 'cm.colors.Normalize', ([], {'vmax': 'ymax', 'vmin': 'ymin'}), '(vmax=ymax, vmin=ymin)\n', (22459, 22481), False, 'from matplotlib import cm\n'), ((22724, 22743), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x1label'], {}), '(x1label)\n', (22734, 22743), True, 'import matplotlib.pyplot as plt\n'), ((22748, 22767), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['x2label'], {}), '(x2label)\n', (22758, 22767), True, 'import matplotlib.pyplot as plt\n'), ((22772, 22794), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x1min', 'x1max'], {}), '(x1min, x1max)\n', (22780, 22794), True, 'import matplotlib.pyplot as plt\n'), ((22799, 22821), 'matplotlib.pyplot.ylim', 'plt.ylim', (['x2max', 'x2min'], {}), '(x2max, x2min)\n', (22807, 22821), True, 'import matplotlib.pyplot as plt\n'), ((23368, 23389), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (23379, 23389), True, 'import matplotlib.pyplot as plt\n'), ((971, 980), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (978, 980), True, 'import matplotlib.pyplot as plt\n'), ((2360, 2370), 'numpy.sort', 'np.sort', (['Y'], {}), '(Y)\n', (2367, 2370), True, 'import numpy as np\n'), ((2654, 2706), 'sklearn.metrics.mean_absolute_percentage_error', 'sk.metrics.mean_absolute_percentage_error', (['Y', 'Y_pred'], {}), '(Y, Y_pred)\n', (2695, 2706), True, 'import sklearn as sk\n'), ((3931, 3953), 'scipy.stats.t.ppf', 'stats.t.ppf', (['(0.95)', 'dof'], {}), '(0.95, dof)\n', (3942, 3953), True, 'import scipy.stats as stats\n'), ((3996, 4025), 'numpy.sum', 'np.sum', (['((resid / Y_pred) ** 2)'], {}), '((resid / Y_pred) ** 2)\n', (4002, 4025), True, 'import numpy as np\n'), ((4094, 4171), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'frameon': '(False)', 'figsize': '[3, 3]', 'dpi': '(300)', 'constrained_layout': '(True)'}), '(frameon=False, figsize=[3, 3], dpi=300, constrained_layout=True)\n', (4106, 4171), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4224), 'numpy.sort', 'np.sort', (['Y_pred'], {}), '(Y_pred)\n', (4216, 4224), True, 'import numpy as np\n'), ((5212, 5256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""Function of {tag1} and {tag2}"""'], {}), "(f'Function of {tag1} and {tag2}')\n", (5222, 5256), True, 'import matplotlib.pyplot as plt\n'), ((5265, 5300), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""{tags[j]} [kcal/mol]"""'], {}), "(f'{tags[j]} [kcal/mol]')\n", (5275, 5300), True, 'import matplotlib.pyplot as plt\n'), ((5309, 5329), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (5317, 5329), True, 'import matplotlib.pyplot as plt\n'), ((5338, 5367), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{tags[j]}.png"""'], {}), "(f'{tags[j]}.png')\n", (5349, 5367), True, 'import matplotlib.pyplot as plt\n'), ((6452, 6480), 'numpy.zeros', 'np.zeros', (['(npoints, npoints)'], {}), '((npoints, npoints))\n', (6460, 6480), True, 'import numpy as np\n'), ((8003, 8108), 'numpy.savetxt', 'np.savetxt', (['csvname', 'zdata'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'header': '"""Descriptor 1, Descriptor 2, -\\\\D_pds"""'}), "(csvname, zdata, fmt='%.4e', delimiter=',', header=\n 'Descriptor 1, Descriptor 2, -\\\\D_pds')\n", (8013, 8108), True, 'import numpy as np\n'), ((10209, 10237), 'numpy.zeros', 'np.zeros', (['(npoints, npoints)'], {}), '((npoints, npoints))\n', (10217, 10237), True, 'import numpy as np\n'), ((11737, 11842), 'numpy.savetxt', 'np.savetxt', (['csvname', 'zdata'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'header': '"""Descriptor 1, Descriptor 2, -\\\\D_kds"""'}), "(csvname, zdata, fmt='%.4e', delimiter=',', header=\n 'Descriptor 1, Descriptor 2, -\\\\D_kds')\n", (11747, 11842), True, 'import numpy as np\n'), ((13944, 13972), 'numpy.zeros', 'np.zeros', (['(npoints, npoints)'], {}), '((npoints, npoints))\n', (13952, 13972), True, 'import numpy as np\n'), ((15470, 15575), 'numpy.savetxt', 'np.savetxt', (['csvname', 'zdata'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'header': '"""Descriptor 1, Descriptor 2, -\\\\d_Ges"""'}), "(csvname, zdata, fmt='%.4e', delimiter=',', header=\n 'Descriptor 1, Descriptor 2, -\\\\d_Ges')\n", (15480, 15575), True, 'import numpy as np\n'), ((17692, 17720), 'numpy.zeros', 'np.zeros', (['(npoints, npoints)'], {}), '((npoints, npoints))\n', (17700, 17720), True, 'import numpy as np\n'), ((19020, 19127), 'numpy.savetxt', 'np.savetxt', (['csvname', 'zdata'], {'fmt': '"""%.4e"""', 'delimiter': '""","""', 'header': '"""Descriptor 1, Descriptor 2, log10(TOF)"""'}), "(csvname, zdata, fmt='%.4e', delimiter=',', header=\n 'Descriptor 1, Descriptor 2, log10(TOF)')\n", (19030, 19127), True, 'import numpy as np\n'), ((21419, 21456), 'numpy.arange', 'np.arange', (['x1min', '(x1max + 0.1)', 'x1base'], {}), '(x1min, x1max + 0.1, x1base)\n', (21428, 21456), True, 'import numpy as np\n'), ((21473, 21510), 'numpy.arange', 'np.arange', (['x2min', '(x2max + 0.1)', 'x2base'], {}), '(x2min, x2max + 0.1, x2base)\n', (21482, 21510), True, 'import numpy as np\n'), ((22837, 22874), 'numpy.arange', 'np.arange', (['x1min', '(x1max + 0.1)', 'x1base'], {}), '(x1min, x1max + 0.1, x1base)\n', (22846, 22874), True, 'import numpy as np\n'), ((22891, 22928), 'numpy.arange', 'np.arange', (['x2min', '(x2max + 0.1)', 'x2base'], {}), '(x2min, x2max + 0.1, x2base)\n', (22900, 22928), True, 'import numpy as np\n'), ((1580, 1591), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (1588, 1591), True, 'import numpy as np\n'), ((1598, 1609), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (1606, 1609), True, 'import numpy as np\n'), ((1776, 1804), 'numpy.vstack', 'np.vstack', (['[X1, X2, d[:, j]]'], {}), '([X1, X2, d[:, j]])\n', (1785, 1804), True, 'import numpy as np\n'), ((6271, 6299), 'numpy.vstack', 'np.vstack', (['[X1, X2, d[:, j]]'], {}), '([X1, X2, d[:, j]])\n', (6280, 6299), True, 'import numpy as np\n'), ((7104, 7137), 'volcanic.tof.calc_s_es', 'calc_s_es', (['profile', 'dgr'], {'esp': '(True)'}), '(profile, dgr, esp=True)\n', (7113, 7137), False, 'from volcanic.tof import calc_tof, calc_es, calc_s_es\n'), ((7863, 7892), 'itertools.product', 'itertools.product', (['xint', 'yint'], {}), '(xint, yint)\n', (7880, 7892), False, 'import itertools\n'), ((10028, 10056), 'numpy.vstack', 'np.vstack', (['[X1, X2, d[:, j]]'], {}), '([X1, X2, d[:, j]])\n', (10037, 10056), True, 'import numpy as np\n'), ((10861, 10894), 'volcanic.tof.calc_s_es', 'calc_s_es', (['profile', 'dgr'], {'esp': '(True)'}), '(profile, dgr, esp=True)\n', (10870, 10894), False, 'from volcanic.tof import calc_tof, calc_es, calc_s_es\n'), ((11597, 11626), 'itertools.product', 'itertools.product', (['xint', 'yint'], {}), '(xint, yint)\n', (11614, 11626), False, 'import itertools\n'), ((13763, 13791), 'numpy.vstack', 'np.vstack', (['[X1, X2, d[:, j]]'], {}), '([X1, X2, d[:, j]])\n', (13772, 13791), True, 'import numpy as np\n'), ((14596, 14627), 'volcanic.tof.calc_es', 'calc_es', (['profile', 'dgr'], {'esp': '(True)'}), '(profile, dgr, esp=True)\n', (14603, 14627), False, 'from volcanic.tof import calc_tof, calc_es, calc_s_es\n'), ((15330, 15359), 'itertools.product', 'itertools.product', (['xint', 'yint'], {}), '(xint, yint)\n', (15347, 15359), False, 'import itertools\n'), ((17511, 17539), 'numpy.vstack', 'np.vstack', (['[X1, X2, d[:, j]]'], {}), '([X1, X2, d[:, j]])\n', (17520, 17539), True, 'import numpy as np\n'), ((18880, 18909), 'itertools.product', 'itertools.product', (['xint', 'yint'], {}), '(xint, yint)\n', (18897, 18909), False, 'import itertools\n'), ((21663, 21681), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['fmt'], {}), '(fmt)\n', (21676, 21681), False, 'from matplotlib.ticker import FuncFormatter\n'), ((23003, 23021), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['fmt'], {}), '(fmt)\n', (23016, 23021), False, 'from matplotlib.ticker import FuncFormatter\n'), ((1864, 1876), 'numpy.array', 'np.array', (['cb'], {}), '(cb)\n', (1872, 1876), True, 'import numpy as np\n'), ((1996, 2008), 'numpy.array', 'np.array', (['ms'], {}), '(ms)\n', (2004, 2008), True, 'import numpy as np\n'), ((2385, 2419), 'sklearn.linear_model.LinearRegression', 'sk.linear_model.LinearRegression', ([], {}), '()\n', (2417, 2419), True, 'import sklearn as sk\n'), ((2811, 2826), 'numpy.isnan', 'np.isnan', (['Ym[k]'], {}), '(Ym[k])\n', (2819, 2826), True, 'import numpy as np\n'), ((4050, 4068), 'numpy.sum', 'np.sum', (['(resid ** 2)'], {}), '(resid ** 2)\n', (4056, 4068), True, 'import numpy as np\n'), ((6359, 6393), 'sklearn.linear_model.LinearRegression', 'sk.linear_model.LinearRegression', ([], {}), '()\n', (6391, 6393), True, 'import sklearn as sk\n'), ((10116, 10150), 'sklearn.linear_model.LinearRegression', 'sk.linear_model.LinearRegression', ([], {}), '()\n', (10148, 10150), True, 'import sklearn as sk\n'), ((13851, 13885), 'sklearn.linear_model.LinearRegression', 'sk.linear_model.LinearRegression', ([], {}), '()\n', (13883, 13885), True, 'import sklearn as sk\n'), ((17599, 17633), 'sklearn.linear_model.LinearRegression', 'sk.linear_model.LinearRegression', ([], {}), '()\n', (17631, 17633), True, 'import sklearn as sk\n'), ((2088, 2100), 'numpy.isnan', 'np.isnan', (['XY'], {}), '(XY)\n', (2096, 2100), True, 'import numpy as np\n'), ((2761, 2779), 'numpy.isnan', 'np.isnan', (['Xm[k, 0]'], {}), '(Xm[k, 0])\n', (2769, 2779), True, 'import numpy as np\n'), ((2788, 2806), 'numpy.isnan', 'np.isnan', (['Xm[k, 1]'], {}), '(Xm[k, 1])\n', (2796, 2806), True, 'import numpy as np\n'), ((18224, 18268), 'volcanic.tof.calc_tof', 'calc_tof', (['profile', 'dgr', 'T', 'coeff'], {'exact': '(True)'}), '(profile, dgr, T, coeff, exact=True)\n', (18232, 18268), False, 'from volcanic.tof import calc_tof, calc_es, calc_s_es\n'), ((2131, 2143), 'numpy.isnan', 'np.isnan', (['XY'], {}), '(XY)\n', (2139, 2143), True, 'import numpy as np\n'), ((2959, 2974), 'numpy.isnan', 'np.isnan', (['Ym[k]'], {}), '(Ym[k])\n', (2967, 2974), True, 'import numpy as np\n'), ((2983, 3001), 'numpy.isnan', 'np.isnan', (['Xm[k, 0]'], {}), '(Xm[k, 0])\n', (2991, 3001), True, 'import numpy as np\n'), ((3694, 3832), 'volcanic.exceptions.MissingDataError', 'MissingDataError', (['"""Both descriptor and regression target are undefined. This should have been fixed before this point. Exiting."""'], {}), "(\n 'Both descriptor and regression target are undefined. This should have been fixed before this point. Exiting.'\n )\n", (3710, 3832), False, 'from volcanic.exceptions import MissingDataError\n'), ((6584, 6603), 'numpy.vstack', 'np.vstack', (['[x1, x2]'], {}), '([x1, x2])\n', (6593, 6603), True, 'import numpy as np\n'), ((10341, 10360), 'numpy.vstack', 'np.vstack', (['[x1, x2]'], {}), '([x1, x2])\n', (10350, 10360), True, 'import numpy as np\n'), ((14076, 14095), 'numpy.vstack', 'np.vstack', (['[x1, x2]'], {}), '([x1, x2])\n', (14085, 14095), True, 'import numpy as np\n'), ((17824, 17843), 'numpy.vstack', 'np.vstack', (['[x1, x2]'], {}), '([x1, x2])\n', (17833, 17843), True, 'import numpy as np\n'), ((1051, 1061), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1058, 1061), True, 'import numpy as np\n'), ((1878, 1890), 'numpy.isnan', 'np.isnan', (['XY'], {}), '(XY)\n', (1886, 1890), True, 'import numpy as np\n'), ((2010, 2022), 'numpy.isnan', 'np.isnan', (['XY'], {}), '(XY)\n', (2018, 2022), True, 'import numpy as np\n'), ((3317, 3332), 'numpy.isnan', 'np.isnan', (['Ym[k]'], {}), '(Ym[k])\n', (3325, 3332), True, 'import numpy as np\n'), ((3341, 3359), 'numpy.isnan', 'np.isnan', (['Xm[k, 1]'], {}), '(Xm[k, 1])\n', (3349, 3359), True, 'import numpy as np\n'), ((1082, 1092), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1089, 1092), True, 'import numpy as np\n'), ((4391, 4401), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (4398, 4401), True, 'import numpy as np\n'), ((2896, 2907), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (2904, 2907), True, 'import numpy as np\n'), ((4422, 4432), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (4429, 4432), True, 'import numpy as np\n'), ((3212, 3223), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (3220, 3223), True, 'import numpy as np\n'), ((3570, 3581), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (3578, 3581), True, 'import numpy as np\n')] |
import numpy as np
from dnnv.properties.expressions import *
from dnnv.properties.visitors import DetailsInference
def test_Image_symbolic():
inference = DetailsInference()
expr = Image(Symbol("path"))
inference.visit(expr)
assert not inference.shapes[expr].is_concrete
assert not inference.types[expr].is_concrete
def test_Image_concrete(tmp_path):
inference = DetailsInference()
arr = np.random.rand(3, 32, 32)
np.save(tmp_path / "test.npy", arr)
expr = Image(Constant(tmp_path / "test.npy"))
inference.visit(expr)
assert inference.shapes[expr].is_concrete
assert inference.shapes[expr].value == arr.shape
assert inference.types[expr].is_concrete
assert inference.types[expr].value == arr.dtype
| [
"numpy.random.rand",
"numpy.save",
"dnnv.properties.visitors.DetailsInference"
] | [((161, 179), 'dnnv.properties.visitors.DetailsInference', 'DetailsInference', ([], {}), '()\n', (177, 179), False, 'from dnnv.properties.visitors import DetailsInference\n'), ((393, 411), 'dnnv.properties.visitors.DetailsInference', 'DetailsInference', ([], {}), '()\n', (409, 411), False, 'from dnnv.properties.visitors import DetailsInference\n'), ((423, 448), 'numpy.random.rand', 'np.random.rand', (['(3)', '(32)', '(32)'], {}), '(3, 32, 32)\n', (437, 448), True, 'import numpy as np\n'), ((453, 488), 'numpy.save', 'np.save', (["(tmp_path / 'test.npy')", 'arr'], {}), "(tmp_path / 'test.npy', arr)\n", (460, 488), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import scipy.io
# layers = [
# 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
# 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
# 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2',
# 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
# 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2',
# 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
# 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2',
# 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4'
# ]
def build_vgg(vgg_mat_path, layers_list,input_data):
vgg_data = scipy.io.loadmat(vgg_mat_path)
layers_data = vgg_data['layers'][0]
net = {}
# 这样用placeholder,就建立一个vgg就行了,run的时候改变输入就行
current = input_data
for i, name in enumerate(layers_list):
kind = name[:4]
with tf.variable_scope(name):
if kind == 'conv':
weights, bias = layers_data[i][0][0][0][0]
weights = np.transpose(weights, (1, 0, 2, 3))
bias = bias.reshape(-1)
# 让vgg的参数不更新
weights_t = tf.Variable(initial_value=weights, trainable=False, name='weights_t')
bias_t = tf.Variable(initial_value=bias, trainable=False, name='bias_t')
current = tf.nn.conv2d(current, weights_t, strides=[1, 1, 1, 1], padding='SAME')
current = tf.nn.bias_add(current, bias_t)
if kind == 'relu':
current = tf.nn.relu(current)
if kind == 'pool':
current = tf.nn.max_pool(current, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
net[name] = current
assert len(net) == len(layers_list)
return net
def preprocess(input_image):
mean = np.array([123.68, 116.779, 103.939])
return input_image - mean
def depreprocess(output_image):
mean = np.array([123.68, 116.779, 103.939])
return input_image + mean
| [
"tensorflow.nn.conv2d",
"tensorflow.nn.max_pool",
"tensorflow.variable_scope",
"tensorflow.nn.relu",
"tensorflow.Variable",
"numpy.array",
"numpy.transpose",
"tensorflow.nn.bias_add"
] | [((1803, 1839), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (1811, 1839), True, 'import numpy as np\n'), ((1915, 1951), 'numpy.array', 'np.array', (['[123.68, 116.779, 103.939]'], {}), '([123.68, 116.779, 103.939])\n', (1923, 1951), True, 'import numpy as np\n'), ((857, 880), 'tensorflow.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (874, 880), True, 'import tensorflow as tf\n'), ((998, 1033), 'numpy.transpose', 'np.transpose', (['weights', '(1, 0, 2, 3)'], {}), '(weights, (1, 0, 2, 3))\n', (1010, 1033), True, 'import numpy as np\n'), ((1148, 1217), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'weights', 'trainable': '(False)', 'name': '"""weights_t"""'}), "(initial_value=weights, trainable=False, name='weights_t')\n", (1159, 1217), True, 'import tensorflow as tf\n'), ((1243, 1306), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'bias', 'trainable': '(False)', 'name': '"""bias_t"""'}), "(initial_value=bias, trainable=False, name='bias_t')\n", (1254, 1306), True, 'import tensorflow as tf\n'), ((1333, 1403), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['current', 'weights_t'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(current, weights_t, strides=[1, 1, 1, 1], padding='SAME')\n", (1345, 1403), True, 'import tensorflow as tf\n'), ((1430, 1461), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['current', 'bias_t'], {}), '(current, bias_t)\n', (1444, 1461), True, 'import tensorflow as tf\n'), ((1519, 1538), 'tensorflow.nn.relu', 'tf.nn.relu', (['current'], {}), '(current)\n', (1529, 1538), True, 'import tensorflow as tf\n'), ((1596, 1682), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['current'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(current, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\n 'SAME')\n", (1610, 1682), True, 'import tensorflow as tf\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
import pickle
import warnings
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import hydra
import gtimer as gt
import numpy as np
import omegaconf
import torch
import mbrl.models.util as model_util
import mbrl.types
import mbrl.util.math
from mbrl.util.lifelong_learning import separate_observations_and_task_ids
from .model import Ensemble, LossOutput, Model, UpdateOutput
from mbrl.types import ModelInput
from torch.nn import functional as F
MODEL_LOG_FORMAT = [
("train_iteration", "I", "int"),
("epoch", "E", "int"),
("train_dataset_size", "TD", "int"),
("val_dataset_size", "VD", "int"),
("model_loss", "MLOSS", "float"),
("model_score", "MSCORE", "float"),
("model_val_score", "MVSCORE", "float"),
("model_best_val_score", "MBVSCORE", "float"),
]
class LifelongLearningModel(): # Model):
"""Wrapper class for 1-D dynamics models.
"""
_MODEL_FNAME = "model.pth"
_ELITE_FNAME = "elite_models.pkl"
def __init__(
self,
model: Model,
num_tasks: int,
obs_shape: Tuple[int, ...],
act_shape: Tuple[int, ...],
cfg: omegaconf.DictConfig,
observe_task_id: bool = False,
forward_postprocess_fn: Callable[[
torch.Tensor, torch.Tensor, torch.Tensor, torch.nn.parameter.
Parameter
], Tuple[torch.Tensor, torch.Tensor]] = lambda inputs, mean, logvar:
(mean, logvar),
):
super().__init__()
self._model = model
self._num_tasks = num_tasks
self._obs_shape = obs_shape
self._act_shape = act_shape
self._cfg = cfg
self._observe_task_id = observe_task_id
self._forward_postprocess_fn = forward_postprocess_fn
self.device = model.device
self._original_forward = self._model.model.forward
self._model.model.forward = self.forward
# Make the dimensions of the task ID not delta.
# Extend the no_delta_list with -1, -2, -3, ..., -self._num_tasks.
print('self._model.no_delta_list: ', self._model.no_delta_list)
self._model.no_delta_list.extend(
list(range(-1, -self._num_tasks - 1, -1)))
@gt.wrap
def forward(self, x: torch.Tensor, **kwargs) -> Tuple[torch.Tensor, ...]:
"""Calls forward method of base model with the given input and args."""
original_inputs = x
# if not self._observe_task_id:
# x contains the observations and the actions
observations = x[..., :-np.prod(self._act_shape)]
# if self._num_tasks > 1:
observations, task_ids = separate_observations_and_task_ids(
observations, self._num_tasks)
if self._num_tasks > 1:
assert task_ids.min(
) == 0., f'task_ids.min(): {task_ids.min()}\ntask_ids: {task_ids}'
assert task_ids.max() == 1., f'task_ids.max(): {task_ids.max()}'
assert torch.all(
torch.logical_or(
task_ids.eq(torch.ones_like(task_ids).to(self.device)),
task_ids.eq(torch.zeros_like(task_ids).to(self.device))))
if self._observe_task_id:
observations = torch.cat([observations, task_ids], dim=-1)
x = torch.cat([observations, x[..., -np.prod(self._act_shape):]],
dim=-1)
# x[..., :-np.prod(self._act_shape)] = observations
gt.stamp('forward_preprocessing')
mean, logvar = self._original_forward(x, **kwargs)
gt.stamp('original_forward')
# if self._num_tasks > 1:
if not self._observe_task_id:
task_ids_for_concatenation = torch.broadcast_to(
task_ids, mean.shape[:-1] + (task_ids.shape[-1], ))
if not self._cfg.overrides.learned_rewards:
mean = torch.cat([mean, task_ids_for_concatenation], dim=-1)
logvar = torch.cat([logvar, task_ids_for_concatenation],
dim=-1)
else:
mean = torch.cat([
mean[..., :-1], task_ids_for_concatenation,
mean[..., -1][..., None]
],
dim=-1)
logvar = torch.cat([
logvar[..., :-1], task_ids_for_concatenation,
logvar[..., -1][..., None]
],
dim=-1)
if not self._cfg.overrides.learned_rewards:
mean[..., -self._num_tasks:] = task_ids.detach()
logvar[..., -self._num_tasks:] = (torch.ones_like(task_ids) *
-float('inf')).detach()
else:
mean[..., -(self._num_tasks + 1):-1] = task_ids.detach()
logvar[...,
-(self._num_tasks + 1):-1] = (torch.ones_like(task_ids) *
-float('inf')).detach()
if self._forward_postprocess_fn is not None:
mean, logvar = self._forward_postprocess_fn(
original_inputs, mean, logvar, self._model.model.min_logvar)
gt.stamp('forward_postprocessing')
return mean, logvar
def __len__(self, *args, **kwargs):
return self._model.__len__(*args, **kwargs)
# def loss(self, *args, **kwargs):
# return self._model.loss(*args, **kwargs)
def eval_score(
self,
batch: mbrl.types.TransitionBatch,
target: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, Dict[str, Any]]:
assert target is None
with torch.no_grad():
model_in, target = self._get_model_input_and_target_from_batch(
batch)
assert model_in.ndim == 2 and target.ndim == 2
pred_mean, _ = self.forward(model_in, use_propagation=False)
target = target.repeat((self._model.model.num_members, 1, 1))
return F.mse_loss(pred_mean, target, reduction="none"), {}
# def update_normalizer(self, *args, **kwargs):
# return self._model.update_normalizer(*args, **kwargs)
def update(
self,
batch: mbrl.types.TransitionBatch,
optimizer: torch.optim.Optimizer,
target: Optional[torch.Tensor] = None,
):
assert target is None
model_in, target = self._get_model_input_and_target_from_batch(batch)
return self.model.update(model_in, optimizer, target=target)
# def get_output_and_targets(self, *args, **kwargs):
# return self._model.get_output_and_targets(*args, **kwargs)
# def sample(self, *args, **kwargs):
# return self._model.sample(*args, **kwargs)
# def reset(self, *args, **kwargs):
# return self._model.reset(*args, **kwargs)
# def save(self, *args, **kwargs):
def __getattr__(self, attr_name):
return getattr(self._model, attr_name)
| [
"numpy.prod",
"torch.ones_like",
"mbrl.util.lifelong_learning.separate_observations_and_task_ids",
"torch.nn.functional.mse_loss",
"torch.broadcast_to",
"torch.no_grad",
"torch.zeros_like",
"gtimer.stamp",
"torch.cat"
] | [((2833, 2898), 'mbrl.util.lifelong_learning.separate_observations_and_task_ids', 'separate_observations_and_task_ids', (['observations', 'self._num_tasks'], {}), '(observations, self._num_tasks)\n', (2867, 2898), False, 'from mbrl.util.lifelong_learning import separate_observations_and_task_ids\n'), ((3612, 3645), 'gtimer.stamp', 'gt.stamp', (['"""forward_preprocessing"""'], {}), "('forward_preprocessing')\n", (3620, 3645), True, 'import gtimer as gt\n'), ((3713, 3741), 'gtimer.stamp', 'gt.stamp', (['"""original_forward"""'], {}), "('original_forward')\n", (3721, 3741), True, 'import gtimer as gt\n'), ((5335, 5369), 'gtimer.stamp', 'gt.stamp', (['"""forward_postprocessing"""'], {}), "('forward_postprocessing')\n", (5343, 5369), True, 'import gtimer as gt\n'), ((3396, 3439), 'torch.cat', 'torch.cat', (['[observations, task_ids]'], {'dim': '(-1)'}), '([observations, task_ids], dim=-1)\n', (3405, 3439), False, 'import torch\n'), ((3855, 3924), 'torch.broadcast_to', 'torch.broadcast_to', (['task_ids', '(mean.shape[:-1] + (task_ids.shape[-1],))'], {}), '(task_ids, mean.shape[:-1] + (task_ids.shape[-1],))\n', (3873, 3924), False, 'import torch\n'), ((5796, 5811), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5809, 5811), False, 'import torch\n'), ((4022, 4075), 'torch.cat', 'torch.cat', (['[mean, task_ids_for_concatenation]'], {'dim': '(-1)'}), '([mean, task_ids_for_concatenation], dim=-1)\n', (4031, 4075), False, 'import torch\n'), ((4101, 4156), 'torch.cat', 'torch.cat', (['[logvar, task_ids_for_concatenation]'], {'dim': '(-1)'}), '([logvar, task_ids_for_concatenation], dim=-1)\n', (4110, 4156), False, 'import torch\n'), ((4233, 4326), 'torch.cat', 'torch.cat', (['[mean[..., :-1], task_ids_for_concatenation, mean[..., -1][..., None]]'], {'dim': '(-1)'}), '([mean[..., :-1], task_ids_for_concatenation, mean[..., -1][...,\n None]], dim=-1)\n', (4242, 4326), False, 'import torch\n'), ((4439, 4537), 'torch.cat', 'torch.cat', (['[logvar[..., :-1], task_ids_for_concatenation, logvar[..., -1][..., None]]'], {'dim': '(-1)'}), '([logvar[..., :-1], task_ids_for_concatenation, logvar[..., -1][\n ..., None]], dim=-1)\n', (4448, 4537), False, 'import torch\n'), ((6137, 6184), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['pred_mean', 'target'], {'reduction': '"""none"""'}), "(pred_mean, target, reduction='none')\n", (6147, 6184), True, 'from torch.nn import functional as F\n'), ((2740, 2764), 'numpy.prod', 'np.prod', (['self._act_shape'], {}), '(self._act_shape)\n', (2747, 2764), True, 'import numpy as np\n'), ((4785, 4810), 'torch.ones_like', 'torch.ones_like', (['task_ids'], {}), '(task_ids)\n', (4800, 4810), False, 'import torch\n'), ((5039, 5064), 'torch.ones_like', 'torch.ones_like', (['task_ids'], {}), '(task_ids)\n', (5054, 5064), False, 'import torch\n'), ((3217, 3242), 'torch.ones_like', 'torch.ones_like', (['task_ids'], {}), '(task_ids)\n', (3232, 3242), False, 'import torch\n'), ((3289, 3315), 'torch.zeros_like', 'torch.zeros_like', (['task_ids'], {}), '(task_ids)\n', (3305, 3315), False, 'import torch\n'), ((3485, 3509), 'numpy.prod', 'np.prod', (['self._act_shape'], {}), '(self._act_shape)\n', (3492, 3509), True, 'import numpy as np\n')] |
import numpy as np
import cv2
mydata = {}
def detectShape(c):
shape = 'unknown'
# calculate perimeter using
peri = cv2.arcLength(c, True)
# apply contour approximation and store the result in vertices
vertices = cv2.approxPolyDP(c, 0.04 * peri, True)
# If the shape it triangle, it will have 3 vertices
if len(vertices) == 3:
shape = 'triangle'
# i/f/ the shape has 4 vertices, it is either a square or
# a rectangle
elif len(vertices) == 4:
# using the boundingRect method calculate the width and height
# of enclosing rectange and then calculte aspect ratio
x, y, width, height = cv2.boundingRect(vertices)
aspectRatio = float(width) / height
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
if aspectRatio >= 0.95 and aspectRatio <= 1.05:
shape = "square"
# print("X-sq-axis", x)
# print("Y-sq-axis", y)
else:
shape = "rectangle"
# print("X-rec-axis", x)
# print("Y-rec-axis", y)
# if the shape is a pentagon, it will have 5 vertices
elif len(vertices) == 5:
shape = "pentagon"
# otherwise, we assume the shape is a circle
else:
shape = "circle"
centers, radius = cv2.minEnclosingCircle(c)
print(centers,radius)
# return the name of the shape
if shape == "square":
return shape, x, y, width, height
elif shape == "rectangle":
return shape, x, y, width, height
elif shape == "circle":
return shape, centers,radius
else :
return shape
def detect_myshapes(img_url):
myshapes =[]
image = cv2.imread(img_url)
grayScale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
sigma = 0.33
v = np.median(grayScale)
low = int(max(0, (1.0 - sigma) * v))
high = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(grayScale, low, high)
(_, cnts, _) = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# cox1 = mpute the moment of contour
M = cv2.moments(c)
# print(M)
# From moment we can calculte area, centroid etc
# The center or centroid can be calculated as follows
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
# call detectShape for contour c
shape = detectShape(c)
# Outline the contours
# x2 = x + width
# y2 = y + height
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
# Write the name of shape on the center of shapes
# cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
# 0.5, (255, 255, 255), 2)
# cv2.imshow('frame', image)
myshapes.append(shape)
return myshapes
sh = detect_myshapes('n7.jpg')
print(sh)
| [
"numpy.median",
"cv2.drawContours",
"cv2.arcLength",
"cv2.minEnclosingCircle",
"cv2.approxPolyDP",
"cv2.cvtColor",
"cv2.moments",
"cv2.findContours",
"cv2.Canny",
"cv2.imread",
"cv2.boundingRect"
] | [((141, 163), 'cv2.arcLength', 'cv2.arcLength', (['c', '(True)'], {}), '(c, True)\n', (154, 163), False, 'import cv2\n'), ((248, 286), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['c', '(0.04 * peri)', '(True)'], {}), '(c, 0.04 * peri, True)\n', (264, 286), False, 'import cv2\n'), ((1840, 1859), 'cv2.imread', 'cv2.imread', (['img_url'], {}), '(img_url)\n', (1850, 1859), False, 'import cv2\n'), ((1877, 1916), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1889, 1916), False, 'import cv2\n'), ((1944, 1964), 'numpy.median', 'np.median', (['grayScale'], {}), '(grayScale)\n', (1953, 1964), True, 'import numpy as np\n'), ((2067, 2098), 'cv2.Canny', 'cv2.Canny', (['grayScale', 'low', 'high'], {}), '(grayScale, low, high)\n', (2076, 2098), False, 'import cv2\n'), ((2119, 2186), 'cv2.findContours', 'cv2.findContours', (['edged', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (2135, 2186), False, 'import cv2\n'), ((2270, 2284), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (2281, 2284), False, 'import cv2\n'), ((2678, 2726), 'cv2.drawContours', 'cv2.drawContours', (['image', '[c]', '(-1)', '(0, 255, 0)', '(2)'], {}), '(image, [c], -1, (0, 255, 0), 2)\n', (2694, 2726), False, 'import cv2\n'), ((684, 710), 'cv2.boundingRect', 'cv2.boundingRect', (['vertices'], {}), '(vertices)\n', (700, 710), False, 'import cv2\n'), ((1429, 1454), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (1451, 1454), False, 'import cv2\n')] |
import warnings
import argparse
import torch
import numpy as np
from rich import print
from constants import DPAC_ATT_CAT_COUNT
from dataloader import load_data
from models.base import MultiOutputModel
# from loss import MultiTaskLoss_DPAC
from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score
def _net_output_to_predictions(output):
_, predicted_age = output['age'].cpu().max(1)
_, predicted_gender = output['gender'].cpu().max(1)
_, predicted_emotion = output['emotion'].cpu().max(1)
return predicted_age.numpy().tolist(), predicted_gender.numpy().tolist(), predicted_emotion.numpy().tolist()
def _model_checkpoint_load(model, name):
print('Restoring checkpoint: {}'.format(name))
model.load_state_dict(torch.load(name, map_location='cpu'))
def _calculate_metrics(target, output):
predicted_age = output['age']
gt_age = target['age']
predicted_gender = output['gender']
gt_gender = target['gender']
predicted_emotion = output['emotion']
gt_emotion = target['emotion']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
recall_age = recall_score(
y_true=gt_age, y_pred=predicted_age, average='weighted')
precision_age = precision_score(
y_true=gt_age, y_pred=predicted_age, average='weighted')
f1_age = f1_score(y_true=gt_age, y_pred=predicted_age,
average='weighted')
accuracy_age = accuracy_score(y_true=gt_age, y_pred=predicted_age)
recall_gender = recall_score(
y_true=gt_gender, y_pred=predicted_gender, average='weighted')
precision_gender = precision_score(
y_true=gt_gender, y_pred=predicted_gender, average='weighted')
f1_gender = f1_score(
y_true=gt_gender, y_pred=predicted_gender, average='weighted')
accuracy_gender = accuracy_score(
y_true=gt_gender, y_pred=predicted_gender)
recall_emotion = recall_score(
y_true=gt_emotion, y_pred=predicted_emotion, average='weighted')
precision_emotion = precision_score(
y_true=gt_emotion, y_pred=predicted_emotion, average='weighted')
f1_emotion = f1_score(
y_true=gt_emotion, y_pred=predicted_emotion, average='weighted')
accuracy_emotion = accuracy_score(
y_true=gt_emotion, y_pred=predicted_emotion)
print("Accuracy Age: {:.4f}, Gender: {:.4f}, Emotion: {:.4f}".format(
accuracy_age, accuracy_gender, accuracy_emotion))
print("Precision Age: {:.4f}, Gender: {:.4f}, Emotion: {:.4f}".format(
precision_age, precision_gender, precision_emotion))
print("Recall Age: {:.4f}, Gender: {:.4f}, Emotion: {:.4f}".format(
recall_age, recall_gender, recall_emotion))
print("F1 Age: {:.4f}, Gender: {:.4f}, Emotion: {:.4f}".format(
f1_age, f1_gender, f1_emotion))
def test(checkpoint=None, gpu_device=0):
device = torch.device("cuda:" + str(gpu_device)
if torch.cuda.is_available() else "cpu")
# loss = MultiTaskLoss_DPAC()
model = MultiOutputModel(device, n_age_cat=DPAC_ATT_CAT_COUNT['age'],
n_gender_cat=DPAC_ATT_CAT_COUNT['gender'], n_emotion_cat=DPAC_ATT_CAT_COUNT['emotion'])
model.to(device)
test_dataloader = load_data(batch_size=16, datatype='test')
if checkpoint is not None:
_model_checkpoint_load(model, checkpoint)
model.eval()
age_predictions = []
gender_predictions = []
emotion_predictions = []
age_labels = []
gender_labels = []
emotion_labels = []
with torch.no_grad():
for batch in test_dataloader:
target_pose = batch['pose'].to(device)
target = batch['target'].to(device)
context = batch['context'].to(device)
target_att_labels = batch['labels']
target_att_labels = {t: target_att_labels[t].to(
device) for t in target_att_labels}
output = model(context, target, target_pose)
# _train, val_train_losses = loss(output, target_labels)
(batch_age_predictions,
batch_gender_predictions, batch_emotion_predictions) = _net_output_to_predictions(output)
emotion_labels.extend(
target_att_labels['emotion'].cpu().numpy().tolist())
age_labels.extend(target_att_labels['age'].cpu().numpy().tolist())
gender_labels.extend(
target_att_labels['gender'].cpu().numpy().tolist())
age_predictions.extend(batch_age_predictions)
gender_predictions.extend(batch_gender_predictions)
emotion_predictions.extend(batch_emotion_predictions)
target_dict = {"age": np.asarray(age_labels), "gender": np.asarray(
gender_labels), "emotion": np.asarray(emotion_labels), }
output_dict = {"age": np.asarray(age_predictions), "gender": np.asarray(
gender_predictions), "emotion": np.asarray(emotion_predictions)}
_calculate_metrics(target_dict, output_dict)
# model.train()
if __name__ == "__main__":
# print("torch.cuda.device_count() ", torch.cuda.device_count())
# print("cuda is_available:", torch.cuda.is_available())
# print("cuda current_device", torch.cuda.current_device())
# print("cuda get_device_name:", torch.cuda.get_device_name())
# print("cuda memory_allocated:", torch.cuda.memory_allocated())
# print("cuda memory_reserved:", torch.cuda.memory_reserved())
parser = argparse.ArgumentParser()
parser.add_argument('--cp_path', type=str, default="cp_dpac_face_T/29.pth",
help='Checkpoint to test on', required=True)
# gpu device to use
parser.add_argument('--gpu_device', type=int,
help='GPU device to train the model on')
args = parser.parse_args()
test(args.cp_path, args.gpu_device)
| [
"sklearn.metrics.f1_score",
"argparse.ArgumentParser",
"models.base.MultiOutputModel",
"torch.load",
"warnings.catch_warnings",
"numpy.asarray",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"torch.cuda.is_available",
"dataloader.load_data",
"warnings.simplefilter",
"torch... | [((3127, 3281), 'models.base.MultiOutputModel', 'MultiOutputModel', (['device'], {'n_age_cat': "DPAC_ATT_CAT_COUNT['age']", 'n_gender_cat': "DPAC_ATT_CAT_COUNT['gender']", 'n_emotion_cat': "DPAC_ATT_CAT_COUNT['emotion']"}), "(device, n_age_cat=DPAC_ATT_CAT_COUNT['age'], n_gender_cat=\n DPAC_ATT_CAT_COUNT['gender'], n_emotion_cat=DPAC_ATT_CAT_COUNT['emotion'])\n", (3143, 3281), False, 'from models.base import MultiOutputModel\n'), ((3350, 3391), 'dataloader.load_data', 'load_data', ([], {'batch_size': '(16)', 'datatype': '"""test"""'}), "(batch_size=16, datatype='test')\n", (3359, 3391), False, 'from dataloader import load_data\n'), ((5558, 5583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5581, 5583), False, 'import argparse\n'), ((765, 801), 'torch.load', 'torch.load', (['name'], {'map_location': '"""cpu"""'}), "(name, map_location='cpu')\n", (775, 801), False, 'import torch\n'), ((1068, 1093), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1091, 1093), False, 'import warnings\n'), ((1103, 1134), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1124, 1134), False, 'import warnings\n'), ((1157, 1226), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'gt_age', 'y_pred': 'predicted_age', 'average': '"""weighted"""'}), "(y_true=gt_age, y_pred=predicted_age, average='weighted')\n", (1169, 1226), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1264, 1336), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'gt_age', 'y_pred': 'predicted_age', 'average': '"""weighted"""'}), "(y_true=gt_age, y_pred=predicted_age, average='weighted')\n", (1279, 1336), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1367, 1432), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'gt_age', 'y_pred': 'predicted_age', 'average': '"""weighted"""'}), "(y_true=gt_age, y_pred=predicted_age, average='weighted')\n", (1375, 1432), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1482, 1533), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'gt_age', 'y_pred': 'predicted_age'}), '(y_true=gt_age, y_pred=predicted_age)\n', (1496, 1533), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1559, 1634), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'gt_gender', 'y_pred': 'predicted_gender', 'average': '"""weighted"""'}), "(y_true=gt_gender, y_pred=predicted_gender, average='weighted')\n", (1571, 1634), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1675, 1753), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'gt_gender', 'y_pred': 'predicted_gender', 'average': '"""weighted"""'}), "(y_true=gt_gender, y_pred=predicted_gender, average='weighted')\n", (1690, 1753), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1787, 1858), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'gt_gender', 'y_pred': 'predicted_gender', 'average': '"""weighted"""'}), "(y_true=gt_gender, y_pred=predicted_gender, average='weighted')\n", (1795, 1858), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1898, 1955), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'gt_gender', 'y_pred': 'predicted_gender'}), '(y_true=gt_gender, y_pred=predicted_gender)\n', (1912, 1955), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((1995, 2072), 'sklearn.metrics.recall_score', 'recall_score', ([], {'y_true': 'gt_emotion', 'y_pred': 'predicted_emotion', 'average': '"""weighted"""'}), "(y_true=gt_emotion, y_pred=predicted_emotion, average='weighted')\n", (2007, 2072), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((2114, 2199), 'sklearn.metrics.precision_score', 'precision_score', ([], {'y_true': 'gt_emotion', 'y_pred': 'predicted_emotion', 'average': '"""weighted"""'}), "(y_true=gt_emotion, y_pred=predicted_emotion, average='weighted'\n )\n", (2129, 2199), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((2229, 2302), 'sklearn.metrics.f1_score', 'f1_score', ([], {'y_true': 'gt_emotion', 'y_pred': 'predicted_emotion', 'average': '"""weighted"""'}), "(y_true=gt_emotion, y_pred=predicted_emotion, average='weighted')\n", (2237, 2302), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((2343, 2402), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'gt_emotion', 'y_pred': 'predicted_emotion'}), '(y_true=gt_emotion, y_pred=predicted_emotion)\n', (2357, 2402), False, 'from sklearn.metrics import precision_score, f1_score, recall_score, accuracy_score\n'), ((3653, 3668), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3666, 3668), False, 'import torch\n'), ((4786, 4808), 'numpy.asarray', 'np.asarray', (['age_labels'], {}), '(age_labels)\n', (4796, 4808), True, 'import numpy as np\n'), ((4820, 4845), 'numpy.asarray', 'np.asarray', (['gender_labels'], {}), '(gender_labels)\n', (4830, 4845), True, 'import numpy as np\n'), ((4867, 4893), 'numpy.asarray', 'np.asarray', (['emotion_labels'], {}), '(emotion_labels)\n', (4877, 4893), True, 'import numpy as np\n'), ((4923, 4950), 'numpy.asarray', 'np.asarray', (['age_predictions'], {}), '(age_predictions)\n', (4933, 4950), True, 'import numpy as np\n'), ((4962, 4992), 'numpy.asarray', 'np.asarray', (['gender_predictions'], {}), '(gender_predictions)\n', (4972, 4992), True, 'import numpy as np\n'), ((5014, 5045), 'numpy.asarray', 'np.asarray', (['emotion_predictions'], {}), '(emotion_predictions)\n', (5024, 5045), True, 'import numpy as np\n'), ((3042, 3067), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3065, 3067), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import ga, optimization, numpy, struct
def binary(num):
return ''.join(bin(ord(c)).replace('0b', '').rjust(8, '0') for c in struct.pack('!f', num))
class RastriginFloatIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.RastriginFloatIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x,y] values are uniformly distributed over -5.0 and 5.0."""
genotype = numpy.random.uniform(low=-5.0, high=5.0, size=2)
fitness_evaluator = optimization.RastriginFloatFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.uniform(low=-5.0, high=5.0)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(RastriginFloatIndividualFactory)
class RastriginBinaryIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.RastriginBinaryIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x,y] values are represented by 32 bits."""
genotype = map(binary, numpy.random.uniform(low=-5.0, high=5.0, size=2))
genotype = numpy.array(list("".join(genotype)), dtype=int)
fitness_evaluator = optimization.RastriginBinaryFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.randint(2)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(RastriginBinaryIndividualFactory)
class XSquareFloatIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.XSquareFloatIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x1,x2,...,x30] values are uniformly distributed over -100.0 and 100.0."""
genotype = numpy.random.uniform(low=-100.0, high=100.0, size=30)
fitness_evaluator = optimization.XSquareFloatFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.uniform(low=-100.0, high=100.0)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(XSquareFloatIndividualFactory)
class XSquareBinaryIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.XSquareBinaryIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x,y] values are represented by 32 bits."""
genotype = map(binary, numpy.random.uniform(low=-100.0, high=100.0, size=30))
genotype = numpy.array(list("".join(genotype)), dtype=int)
fitness_evaluator = optimization.XSquareBinaryFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.randint(2)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(XSquareBinaryIndividualFactory)
class XAbsoluteSquareFloatIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.XAbsoluteSquareFloatIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x1,x2,...,x30] values are uniformly distributed over -100.0 and 100.0."""
genotype = numpy.random.uniform(low=-100.0, high=100.0, size=30)
fitness_evaluator = optimization.XAbsoluteSquareFloatFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.uniform(low=-100.0, high=100.0)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(XAbsoluteSquareFloatIndividualFactory)
class XAbsoluteSquareBinaryIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.XAbsoluteSquareBinaryIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x,y] values are represented by 32 bits."""
genotype = map(binary, numpy.random.uniform(low=-100.0, high=100.0, size=30))
genotype = numpy.array(list("".join(genotype)), dtype=int)
fitness_evaluator = optimization.XAbsoluteSquareBinaryFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.randint(2)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(XAbsoluteSquareBinaryIndividualFactory)
class SineXSquareRootFloatIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.SineXSquareRootFloatIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x1,x2,...,x30] values are uniformly distributed over -500.0 and 500.0."""
genotype = numpy.random.uniform(low=-500.0, high=500.0, size=30)
fitness_evaluator = optimization.SineXSquareRootFloatFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.uniform(low=-500.0, high=500.0)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(SineXSquareRootFloatIndividualFactory)
class SineXSquareRootBinaryIndividualFactory(ga.IndividualFactory):
def __init__(self, crossover_method='one_point', mutation_method='permutation'):
super(optimization.SineXSquareRootBinaryIndividualFactory, self).__init__()
self.crossover_method = crossover_method
if mutation_method == 'basic_mutation':
self.mutation_method = self.basic_mutation
else:
self.mutation_method = mutation_method
def create(self):
"""Creates individuals which [x,y] values are represented by 32 bits."""
genotype = map(binary, numpy.random.uniform(low=-500.0, high=500.0, size=30))
genotype = numpy.array(list("".join(genotype)), dtype=int)
fitness_evaluator = optimization.SineXSquareRootBinaryFitnessEvaluator()
return optimization.Individual(genotype, fitness_evaluator, self.crossover_method, self.mutation_method)
def basic_mutation(self_individual, individual):
"""Performs a basic mutation where one value in the chromosome is replaced by another valid value."""
idx = numpy.random.randint(0, len(individual.genotype))
value = numpy.random.randint(2)
numpy.put(individual.genotype, [idx], [value])
individual.fitness = individual.fitness_evaluator.evaluate(individual)
return individual
ga.IndividualFactory.register(SineXSquareRootBinaryIndividualFactory) | [
"optimization.XSquareBinaryFitnessEvaluator",
"numpy.put",
"optimization.RastriginBinaryFitnessEvaluator",
"optimization.XSquareFloatFitnessEvaluator",
"optimization.XAbsoluteSquareFloatFitnessEvaluator",
"optimization.SineXSquareRootFloatFitnessEvaluator",
"optimization.SineXSquareRootBinaryFitnessEval... | [((1508, 1570), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['RastriginFloatIndividualFactory'], {}), '(RastriginFloatIndividualFactory)\n', (1537, 1570), False, 'import ga, optimization, numpy, struct\n'), ((2892, 2955), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['RastriginBinaryIndividualFactory'], {}), '(RastriginBinaryIndividualFactory)\n', (2921, 2955), False, 'import ga, optimization, numpy, struct\n'), ((4236, 4296), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['XSquareFloatIndividualFactory'], {}), '(XSquareFloatIndividualFactory)\n', (4265, 4296), False, 'import ga, optimization, numpy, struct\n'), ((5617, 5678), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['XSquareBinaryIndividualFactory'], {}), '(XSquareBinaryIndividualFactory)\n', (5646, 5678), False, 'import ga, optimization, numpy, struct\n'), ((6983, 7051), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['XAbsoluteSquareFloatIndividualFactory'], {}), '(XAbsoluteSquareFloatIndividualFactory)\n', (7012, 7051), False, 'import ga, optimization, numpy, struct\n'), ((8396, 8465), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['XAbsoluteSquareBinaryIndividualFactory'], {}), '(XAbsoluteSquareBinaryIndividualFactory)\n', (8425, 8465), False, 'import ga, optimization, numpy, struct\n'), ((9770, 9838), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['SineXSquareRootFloatIndividualFactory'], {}), '(SineXSquareRootFloatIndividualFactory)\n', (9799, 9838), False, 'import ga, optimization, numpy, struct\n'), ((11183, 11252), 'ga.IndividualFactory.register', 'ga.IndividualFactory.register', (['SineXSquareRootBinaryIndividualFactory'], {}), '(SineXSquareRootBinaryIndividualFactory)\n', (11212, 11252), False, 'import ga, optimization, numpy, struct\n'), ((59, 80), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (74, 80), False, 'import sys, os\n'), ((826, 874), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(2)'}), '(low=-5.0, high=5.0, size=2)\n', (846, 874), False, 'import ga, optimization, numpy, struct\n'), ((903, 948), 'optimization.RastriginFloatFitnessEvaluator', 'optimization.RastriginFloatFitnessEvaluator', ([], {}), '()\n', (946, 948), False, 'import ga, optimization, numpy, struct\n'), ((964, 1065), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (987, 1065), False, 'import ga, optimization, numpy, struct\n'), ((1306, 1346), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)'}), '(low=-5.0, high=5.0)\n', (1326, 1346), False, 'import ga, optimization, numpy, struct\n'), ((1355, 1401), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (1364, 1401), False, 'import ga, optimization, numpy, struct\n'), ((2303, 2349), 'optimization.RastriginBinaryFitnessEvaluator', 'optimization.RastriginBinaryFitnessEvaluator', ([], {}), '()\n', (2347, 2349), False, 'import ga, optimization, numpy, struct\n'), ((2365, 2466), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (2388, 2466), False, 'import ga, optimization, numpy, struct\n'), ((2707, 2730), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {}), '(2)\n', (2727, 2730), False, 'import ga, optimization, numpy, struct\n'), ((2739, 2785), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (2748, 2785), False, 'import ga, optimization, numpy, struct\n'), ((3547, 3600), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(30)'}), '(low=-100.0, high=100.0, size=30)\n', (3567, 3600), False, 'import ga, optimization, numpy, struct\n'), ((3629, 3672), 'optimization.XSquareFloatFitnessEvaluator', 'optimization.XSquareFloatFitnessEvaluator', ([], {}), '()\n', (3670, 3672), False, 'import ga, optimization, numpy, struct\n'), ((3688, 3789), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (3711, 3789), False, 'import ga, optimization, numpy, struct\n'), ((4030, 4074), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)'}), '(low=-100.0, high=100.0)\n', (4050, 4074), False, 'import ga, optimization, numpy, struct\n'), ((4083, 4129), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (4092, 4129), False, 'import ga, optimization, numpy, struct\n'), ((5030, 5074), 'optimization.XSquareBinaryFitnessEvaluator', 'optimization.XSquareBinaryFitnessEvaluator', ([], {}), '()\n', (5072, 5074), False, 'import ga, optimization, numpy, struct\n'), ((5090, 5191), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (5113, 5191), False, 'import ga, optimization, numpy, struct\n'), ((5432, 5455), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {}), '(2)\n', (5452, 5455), False, 'import ga, optimization, numpy, struct\n'), ((5464, 5510), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (5473, 5510), False, 'import ga, optimization, numpy, struct\n'), ((6286, 6339), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(30)'}), '(low=-100.0, high=100.0, size=30)\n', (6306, 6339), False, 'import ga, optimization, numpy, struct\n'), ((6368, 6419), 'optimization.XAbsoluteSquareFloatFitnessEvaluator', 'optimization.XAbsoluteSquareFloatFitnessEvaluator', ([], {}), '()\n', (6417, 6419), False, 'import ga, optimization, numpy, struct\n'), ((6435, 6536), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (6458, 6536), False, 'import ga, optimization, numpy, struct\n'), ((6777, 6821), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)'}), '(low=-100.0, high=100.0)\n', (6797, 6821), False, 'import ga, optimization, numpy, struct\n'), ((6830, 6876), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (6839, 6876), False, 'import ga, optimization, numpy, struct\n'), ((7801, 7853), 'optimization.XAbsoluteSquareBinaryFitnessEvaluator', 'optimization.XAbsoluteSquareBinaryFitnessEvaluator', ([], {}), '()\n', (7851, 7853), False, 'import ga, optimization, numpy, struct\n'), ((7869, 7970), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (7892, 7970), False, 'import ga, optimization, numpy, struct\n'), ((8211, 8234), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {}), '(2)\n', (8231, 8234), False, 'import ga, optimization, numpy, struct\n'), ((8243, 8289), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (8252, 8289), False, 'import ga, optimization, numpy, struct\n'), ((9073, 9126), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-500.0)', 'high': '(500.0)', 'size': '(30)'}), '(low=-500.0, high=500.0, size=30)\n', (9093, 9126), False, 'import ga, optimization, numpy, struct\n'), ((9155, 9206), 'optimization.SineXSquareRootFloatFitnessEvaluator', 'optimization.SineXSquareRootFloatFitnessEvaluator', ([], {}), '()\n', (9204, 9206), False, 'import ga, optimization, numpy, struct\n'), ((9222, 9323), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (9245, 9323), False, 'import ga, optimization, numpy, struct\n'), ((9564, 9608), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-500.0)', 'high': '(500.0)'}), '(low=-500.0, high=500.0)\n', (9584, 9608), False, 'import ga, optimization, numpy, struct\n'), ((9617, 9663), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (9626, 9663), False, 'import ga, optimization, numpy, struct\n'), ((10588, 10640), 'optimization.SineXSquareRootBinaryFitnessEvaluator', 'optimization.SineXSquareRootBinaryFitnessEvaluator', ([], {}), '()\n', (10638, 10640), False, 'import ga, optimization, numpy, struct\n'), ((10656, 10757), 'optimization.Individual', 'optimization.Individual', (['genotype', 'fitness_evaluator', 'self.crossover_method', 'self.mutation_method'], {}), '(genotype, fitness_evaluator, self.crossover_method,\n self.mutation_method)\n', (10679, 10757), False, 'import ga, optimization, numpy, struct\n'), ((10998, 11021), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {}), '(2)\n', (11018, 11021), False, 'import ga, optimization, numpy, struct\n'), ((11030, 11076), 'numpy.put', 'numpy.put', (['individual.genotype', '[idx]', '[value]'], {}), '(individual.genotype, [idx], [value])\n', (11039, 11076), False, 'import ga, optimization, numpy, struct\n'), ((2158, 2206), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(2)'}), '(low=-5.0, high=5.0, size=2)\n', (2178, 2206), False, 'import ga, optimization, numpy, struct\n'), ((4880, 4933), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(30)'}), '(low=-100.0, high=100.0, size=30)\n', (4900, 4933), False, 'import ga, optimization, numpy, struct\n'), ((7651, 7704), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(30)'}), '(low=-100.0, high=100.0, size=30)\n', (7671, 7704), False, 'import ga, optimization, numpy, struct\n'), ((10438, 10491), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(-500.0)', 'high': '(500.0)', 'size': '(30)'}), '(low=-500.0, high=500.0, size=30)\n', (10458, 10491), False, 'import ga, optimization, numpy, struct\n'), ((212, 234), 'struct.pack', 'struct.pack', (['"""!f"""', 'num'], {}), "('!f', num)\n", (223, 234), False, 'import ga, optimization, numpy, struct\n')] |
from pathlib import Path
import numpy as np
import pytest
def pytest_addoption(parser):
parser.addoption('--integration', action='store_true', default=False, dest='integration',
help='enable integration tests')
def pytest_collection_modifyitems(config, items):
if not config.getoption('--integration'):
integration_skip = pytest.mark.skip(reason='Integration tests not requested; skipping.')
for item in items:
if 'integration' in item.keywords:
item.add_marker(integration_skip)
@pytest.fixture(scope='session')
def raster_tiles():
tiles_file = Path(__file__).parent / 'data' / 'em_tiles.npz'
tile_data = np.load(tiles_file)
tiles = np.ma.MaskedArray(tile_data['tiles'], mask=tile_data['mask'])
return np.log10(tiles) + 30
@pytest.fixture(scope='session')
def thresholds():
thresholds_file = Path(__file__).parent / 'data' / 'em_thresholds.npz'
thresholds_data = np.load(thresholds_file)
return thresholds_data['thresholds']
@pytest.fixture(scope='session')
def hand_candidates():
hand_file = Path(__file__).parent / 'data' / 'hand_candidates.npz'
hand_data = np.load(hand_file)
return hand_data['hand_candidates']
| [
"numpy.log10",
"pathlib.Path",
"pytest.mark.skip",
"numpy.ma.MaskedArray",
"pytest.fixture",
"numpy.load"
] | [((561, 592), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (575, 592), False, 'import pytest\n'), ((823, 854), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (837, 854), False, 'import pytest\n'), ((1039, 1070), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1053, 1070), False, 'import pytest\n'), ((694, 713), 'numpy.load', 'np.load', (['tiles_file'], {}), '(tiles_file)\n', (701, 713), True, 'import numpy as np\n'), ((726, 787), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (["tile_data['tiles']"], {'mask': "tile_data['mask']"}), "(tile_data['tiles'], mask=tile_data['mask'])\n", (743, 787), True, 'import numpy as np\n'), ((970, 994), 'numpy.load', 'np.load', (['thresholds_file'], {}), '(thresholds_file)\n', (977, 994), True, 'import numpy as np\n'), ((1181, 1199), 'numpy.load', 'np.load', (['hand_file'], {}), '(hand_file)\n', (1188, 1199), True, 'import numpy as np\n'), ((364, 433), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Integration tests not requested; skipping."""'}), "(reason='Integration tests not requested; skipping.')\n", (380, 433), False, 'import pytest\n'), ((799, 814), 'numpy.log10', 'np.log10', (['tiles'], {}), '(tiles)\n', (807, 814), True, 'import numpy as np\n'), ((630, 644), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (634, 644), False, 'from pathlib import Path\n'), ((895, 909), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (899, 909), False, 'from pathlib import Path\n'), ((1110, 1124), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1114, 1124), False, 'from pathlib import Path\n')] |
"""
polarAWB_noGT.py
Copyright (c) 2022 Sony Group Corporation
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import json
from pathlib import Path
import shutil
import numpy as np
from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb
import myutils.polarutils as plutil
import myutils.weighturils as weutil
import myutils.wbutils as wbutil
if __name__ == "__main__":
params = json.load(open("parameters.json", "r"))
input_path = Path("images").joinpath(params["input_folder"])
result_path = Path("results").joinpath(input_path.name)
result_path.mkdir(parents=True, exist_ok=True)
shutil.copy("parameters.json", result_path)
imean_paths = input_path.glob("*_imean.png")
for imean_path in imean_paths:
i000_path = Path(str(imean_path).replace("imean", "i000"))
i045_path = Path(str(imean_path).replace("imean", "i045"))
i090_path = Path(str(imean_path).replace("imean", "i090"))
i135_path = Path(str(imean_path).replace("imean", "i135"))
imean = my_read_image(imean_path) / MAX_16BIT
i000 = my_read_image(i000_path) / MAX_16BIT
i045 = my_read_image(i045_path) / MAX_16BIT
i090 = my_read_image(i090_path) / MAX_16BIT
i135 = my_read_image(i135_path) / MAX_16BIT
s0, s1, s2 = plutil.calc_s0s1s2_from_fourPolar(i000, i045, i090, i135)
dolp = plutil.calc_dolp_from_s0s1s2(s0, s1, s2)
aolp = plutil.calc_aolp_from_s1s2(s1, s2)
# Weights
w_valid = weutil.valid_weight_fourPolar(i000, i045, i090, i135, th=params["valid_th"])
w_dolp = weutil.sigmoid(
np.mean(dolp, axis=2), alpha=params["w_dolp_a"], center=params["w_dolp_b"])
w_dolp_ach = weutil.rg_bg_sigmoid_weight_achromatic(
dolp, alpha=params["w_dolp_ach_a"], center=params["w_dolp_ach_b"], normalize=True)
w_aolp_ach = weutil.rg_bg_sigmoid_weight_achromatic_phase(
aolp, alpha=params["w_aolp_ach_a"], center=params["w_aolp_ach_b"])
w_dolp_ch = weutil.rg_bg_sigmoid_weight_chromatic(
dolp, alpha=params["w_dolp_ch_a"], center=params["w_dolp_ch_b"], normalize=True)
w_aolp_ch = weutil.rg_bg_sigmoid_weight_achromatic_phase(
aolp, alpha=params["w_aolp_ch_a"], center=params["w_aolp_ch_b"])
weight_achromatic = w_valid * w_dolp * w_dolp_ach * w_aolp_ach
weight_chromatic = w_valid * w_dolp * w_dolp_ch * w_aolp_ch
# WB.
illum_est = wbutil.polarAWB(dolp, imean, weight_achromatic, weight_chromatic, params["alpha"])
# Save White-balanced Images.
imean[..., 0] /= illum_est[..., 0]
imean[..., 2] /= illum_est[..., 2]
imean = np.clip(imean, 0, 1)
imean_sRGB = rgb_to_srgb(imean)
imean_sRGB = np.clip(imean_sRGB, 0, 1)
scene_name = str(imean_path.name).replace("_imean", "")
my_write_image(result_path.joinpath("{}.png".format(scene_name)), imean * MAX_16BIT)
my_write_image(result_path.joinpath("{}_sRGB.png".format(scene_name)), imean_sRGB * MAX_16BIT)
| [
"numpy.clip",
"numpy.mean",
"myutils.weighturils.rg_bg_sigmoid_weight_achromatic",
"myutils.weighturils.rg_bg_sigmoid_weight_achromatic_phase",
"pathlib.Path",
"myutils.wbutils.polarAWB",
"myutils.weighturils.valid_weight_fourPolar",
"myutils.polarutils.calc_dolp_from_s0s1s2",
"myutils.weighturils.r... | [((691, 734), 'shutil.copy', 'shutil.copy', (['"""parameters.json"""', 'result_path'], {}), "('parameters.json', result_path)\n", (702, 734), False, 'import shutil\n'), ((1374, 1431), 'myutils.polarutils.calc_s0s1s2_from_fourPolar', 'plutil.calc_s0s1s2_from_fourPolar', (['i000', 'i045', 'i090', 'i135'], {}), '(i000, i045, i090, i135)\n', (1407, 1431), True, 'import myutils.polarutils as plutil\n'), ((1447, 1487), 'myutils.polarutils.calc_dolp_from_s0s1s2', 'plutil.calc_dolp_from_s0s1s2', (['s0', 's1', 's2'], {}), '(s0, s1, s2)\n', (1475, 1487), True, 'import myutils.polarutils as plutil\n'), ((1503, 1537), 'myutils.polarutils.calc_aolp_from_s1s2', 'plutil.calc_aolp_from_s1s2', (['s1', 's2'], {}), '(s1, s2)\n', (1529, 1537), True, 'import myutils.polarutils as plutil\n'), ((1575, 1651), 'myutils.weighturils.valid_weight_fourPolar', 'weutil.valid_weight_fourPolar', (['i000', 'i045', 'i090', 'i135'], {'th': "params['valid_th']"}), "(i000, i045, i090, i135, th=params['valid_th'])\n", (1604, 1651), True, 'import myutils.weighturils as weutil\n'), ((1794, 1919), 'myutils.weighturils.rg_bg_sigmoid_weight_achromatic', 'weutil.rg_bg_sigmoid_weight_achromatic', (['dolp'], {'alpha': "params['w_dolp_ach_a']", 'center': "params['w_dolp_ach_b']", 'normalize': '(True)'}), "(dolp, alpha=params['w_dolp_ach_a'],\n center=params['w_dolp_ach_b'], normalize=True)\n", (1832, 1919), True, 'import myutils.weighturils as weutil\n'), ((1950, 2066), 'myutils.weighturils.rg_bg_sigmoid_weight_achromatic_phase', 'weutil.rg_bg_sigmoid_weight_achromatic_phase', (['aolp'], {'alpha': "params['w_aolp_ach_a']", 'center': "params['w_aolp_ach_b']"}), "(aolp, alpha=params[\n 'w_aolp_ach_a'], center=params['w_aolp_ach_b'])\n", (1994, 2066), True, 'import myutils.weighturils as weutil\n'), ((2096, 2218), 'myutils.weighturils.rg_bg_sigmoid_weight_chromatic', 'weutil.rg_bg_sigmoid_weight_chromatic', (['dolp'], {'alpha': "params['w_dolp_ch_a']", 'center': "params['w_dolp_ch_b']", 'normalize': '(True)'}), "(dolp, alpha=params['w_dolp_ch_a'],\n center=params['w_dolp_ch_b'], normalize=True)\n", (2133, 2218), True, 'import myutils.weighturils as weutil\n'), ((2248, 2362), 'myutils.weighturils.rg_bg_sigmoid_weight_achromatic_phase', 'weutil.rg_bg_sigmoid_weight_achromatic_phase', (['aolp'], {'alpha': "params['w_aolp_ch_a']", 'center': "params['w_aolp_ch_b']"}), "(aolp, alpha=params[\n 'w_aolp_ch_a'], center=params['w_aolp_ch_b'])\n", (2292, 2362), True, 'import myutils.weighturils as weutil\n'), ((2546, 2633), 'myutils.wbutils.polarAWB', 'wbutil.polarAWB', (['dolp', 'imean', 'weight_achromatic', 'weight_chromatic', "params['alpha']"], {}), "(dolp, imean, weight_achromatic, weight_chromatic, params[\n 'alpha'])\n", (2561, 2633), True, 'import myutils.wbutils as wbutil\n'), ((2770, 2790), 'numpy.clip', 'np.clip', (['imean', '(0)', '(1)'], {}), '(imean, 0, 1)\n', (2777, 2790), True, 'import numpy as np\n'), ((2813, 2831), 'myutils.imageutils.rgb_to_srgb', 'rgb_to_srgb', (['imean'], {}), '(imean)\n', (2824, 2831), False, 'from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb\n'), ((2853, 2878), 'numpy.clip', 'np.clip', (['imean_sRGB', '(0)', '(1)'], {}), '(imean_sRGB, 0, 1)\n', (2860, 2878), True, 'import numpy as np\n'), ((527, 541), 'pathlib.Path', 'Path', (['"""images"""'], {}), "('images')\n", (531, 541), False, 'from pathlib import Path\n'), ((594, 609), 'pathlib.Path', 'Path', (['"""results"""'], {}), "('results')\n", (598, 609), False, 'from pathlib import Path\n'), ((1106, 1131), 'myutils.imageutils.my_read_image', 'my_read_image', (['imean_path'], {}), '(imean_path)\n', (1119, 1131), False, 'from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb\n'), ((1159, 1183), 'myutils.imageutils.my_read_image', 'my_read_image', (['i000_path'], {}), '(i000_path)\n', (1172, 1183), False, 'from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb\n'), ((1211, 1235), 'myutils.imageutils.my_read_image', 'my_read_image', (['i045_path'], {}), '(i045_path)\n', (1224, 1235), False, 'from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb\n'), ((1263, 1287), 'myutils.imageutils.my_read_image', 'my_read_image', (['i090_path'], {}), '(i090_path)\n', (1276, 1287), False, 'from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb\n'), ((1315, 1339), 'myutils.imageutils.my_read_image', 'my_read_image', (['i135_path'], {}), '(i135_path)\n', (1328, 1339), False, 'from myutils.imageutils import MAX_16BIT, my_read_image, my_write_image, rgb_to_srgb\n'), ((1697, 1718), 'numpy.mean', 'np.mean', (['dolp'], {'axis': '(2)'}), '(dolp, axis=2)\n', (1704, 1718), True, 'import numpy as np\n')] |
from typing import Dict, List, Optional, Tuple
import numpy as np
import scipy
import torch
from tqdm import tqdm
import datasets
from fewie.data.datasets.generic.nway_kshot import NwayKshotDataset
from fewie.encoders.encoder import Encoder
from fewie.evaluation.classifiers.classifier import Classifier
from fewie.evaluation.utils import get_metric
def mean_confidence_interval(data: List[float], confidence: float = 0.95):
"""Computes the mean and error margin of given data for a given confidence level.
Args:
data: A list of data (in this case F1-scores).
confidence: The coverage probability we want to achieve with error margin.
Returns:
Mean and margin error of data, where mean equals the arithmetic average of data,
and margin error means: `[mean - margin_error, mean + margin_error]` covers
`confidence`*100% of the data points from `data`.
"""
array = np.array(data)
num = len(array)
ddof = num - 1
mean, std_error_mean = np.mean(array), scipy.stats.sem(array)
margin_of_error = std_error_mean * scipy.stats.t._ppf(
(1.0 + confidence) / 2.0, ddof
)
return (
mean,
margin_of_error,
scipy.stats.t.interval(
0.95, ddof, loc=np.mean(array), scale=scipy.stats.sem(array)
),
)
def normalize(x: torch.Tensor) -> torch.Tensor:
"""Normalizes a vector with its L2-norm.
Args:
x: The vector to be normalized.
Returns:
The normalized vector of the same shape.
"""
norm = x.pow(2).sum(1, keepdim=True).pow(1.0 / 2)
out = x.div(norm)
return out
def prepare_features(
support_features: np.ndarray,
support_targets: np.ndarray,
support_labels: np.ndarray,
query_features: np.ndarray,
query_targets: np.ndarray,
query_labels: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Prepares the features (token-level) from the support/query set (sentence-level).
Since the support and query sets are sampled on a sentence-level (which means for each
to-be-classified token, we just take its whole sentence for contextual embedding), and
NER task is on token-level, in this function, we aim to take only the corresponding
position from the `seq_len`-length embedding for each sample.
Args:
support_features: The embedding of the whole sentence containing certain entitie for \
the support set, of shape: `[batch_size * n_ways * k_shots, seq_len, d_hidden]`.\n
support_targets: The original class-ids (therefore might not be continuous) \
for the support set, of shape: `[batch_size * n_ways * k_shots]`.\n
support_labels: The labels of all the tokens in a sentence for the support set, \
of shape: `[batch_size * n_ways * k_shots, seq_len, ]`.\n
query_features: The embeddings of the sentences containing entities for the query set, \
of shape: `[batch_size * n_ways * n_queries, seq_len, d_hidden]`.\n
query_targets: The (original) class-ids for the query set, of shape: \
`[batch_size * n_ways * k_shots]`.\n
Returns:
X_support: The contextual embedding of only the wanted tokens for the support set, \
of shape: `[batch_size * n_wanted_tokens, h_didden]`.\n
y_support: The encoded (because multi-classification is applied later) class-ids for the \
support set, of shape `[batch_size * n_wanted_tokens, ]`.\n
X_query: The contextual embedding of only the wanted tokens for the query set, \
of shape: `[batch_size * n_wanted_tokens, h_hidden]`.\n
y_support: The encoded class-ids for the query set, of shape \
`[batch_size * n_wanted_tokens, ]`.
"""
X_support = []
y_support = []
for i, (target, labels) in enumerate(zip(support_targets, support_labels)):
# take only the position with the wanted tokens
mask = labels == target
features = support_features[i, mask, :]
X_support.append(features)
y_support.extend([target] * features.shape[0])
X_query = []
y_query = []
for i, (target, labels) in enumerate(zip(query_targets, query_labels)):
mask = labels == target
features = query_features[i, mask, :]
X_query.append(features)
y_query.extend([target] * features.shape[0])
X_support = np.concatenate(X_support, axis=0)
y_support = np.array(y_support)
X_query = np.concatenate(X_query, axis=0)
y_query = np.array(y_query)
return (X_support, y_support, X_query, y_query)
def eval_few_shot_linear_readout(
encoder: Encoder,
dataset: datasets.Dataset,
few_shot_dataset: NwayKshotDataset,
classifier: Classifier,
batch_size: int,
device: torch.device,
normalize_embeddings: bool = True,
confidence: float = 0.95,
ignore_labels: Optional[List[str]] = None,
deterministic: bool = False,
metrics: Optional[List[str]] = None,
f1_include_O: bool = False,
):
"""Performs evaluation using prototypes of contextual embeddings and linear-readout method
as classifier top.
"""
encoder = encoder.eval()
dataloader = torch.utils.data.DataLoader(
few_shot_dataset,
batch_size=batch_size,
)
n_ways = few_shot_dataset.n_ways
k_shots = few_shot_dataset.k_shots
n_queries = few_shot_dataset.n_queries
if metrics is None:
metrics = ["accuracy"]
scorers = {metric: get_metric(metric) for metric in metrics}
metric_scores: Dict[str, List[float]] = {metric: [] for metric in metrics}
with torch.no_grad():
# Each "batch" corresponds to an independent experiment run.
for batch in tqdm(dataloader):
# support: [batch_size, n_ways * k_shots, ...]
# with columns: `attention_mask`, `input_ids`, `labels`, `token_type_ids`
# query: [batch_size, n_ways * n_queries, ...]
# support_targets: [batch_size, n_ways * k_shots]
# query_targets: [batch_size, n_ways * n_queries]
(
support,
support_targets,
query,
query_targets,
) = batch
batch_size, _, seq_len = support["input_ids"].shape
support_labels = support["labels"].cpu().numpy()
query_labels = query["labels"].cpu().numpy()
support = {
key: tensor.to(device)
.view(batch_size * n_ways * k_shots, seq_len)
.long()
for key, tensor in support.items()
if key != "labels"
}
query = {
key: tensor.to(device)
.view(batch_size * n_ways * n_queries, seq_len)
.long()
for key, tensor in query.items()
if key != "labels"
}
support_features = encoder(**support).embeddings.view(
batch_size, n_ways * k_shots, seq_len, -1
) # [batch_size, n_ways * k_shots, seq_len, d_hidden]
query_features = encoder(**query).embeddings.view(
batch_size, n_ways * n_queries, seq_len, -1
) # [batch_size, n_ways * n_queries, seq_len, d_hidden]
if normalize_embeddings:
support_features = normalize(support_features)
query_features = normalize(query_features)
support_features = support_features.cpu().numpy()
query_features = query_features.cpu().numpy()
support_targets = support_targets.numpy()
query_targets = query_targets.numpy()
for batch_idx in range(support_features.shape[0]):
X_support, y_support, X_query, y_query = prepare_features(
support_features[batch_idx],
support_targets[batch_idx],
support_labels[batch_idx],
query_features[batch_idx],
query_targets[batch_idx],
query_labels[batch_idx],
)
pred_query = classifier(X_support, y_support, X_query)
# prepare the entity-label list
entity_label_list = set(y_query)
if f1_include_O is False:
entity_label_list.discard(0)
for metric, scorer in scorers.items():
score = scorer(y_query, pred_query, labels=list(entity_label_list))
metric_scores[metric].append(score)
results: Dict[str, Dict[str, float]] = {}
for metric, scores in metric_scores.items():
mean, margin_of_error, _ = mean_confidence_interval(scores, confidence)
results[metric] = {
"mean": mean,
"margin_of_error": margin_of_error,
"confidence": confidence,
}
return results
| [
"numpy.mean",
"scipy.stats.t._ppf",
"tqdm.tqdm",
"numpy.array",
"scipy.stats.sem",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"torch.no_grad",
"fewie.evaluation.utils.get_metric"
] | [((929, 943), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (937, 943), True, 'import numpy as np\n'), ((4441, 4474), 'numpy.concatenate', 'np.concatenate', (['X_support'], {'axis': '(0)'}), '(X_support, axis=0)\n', (4455, 4474), True, 'import numpy as np\n'), ((4491, 4510), 'numpy.array', 'np.array', (['y_support'], {}), '(y_support)\n', (4499, 4510), True, 'import numpy as np\n'), ((4525, 4556), 'numpy.concatenate', 'np.concatenate', (['X_query'], {'axis': '(0)'}), '(X_query, axis=0)\n', (4539, 4556), True, 'import numpy as np\n'), ((4571, 4588), 'numpy.array', 'np.array', (['y_query'], {}), '(y_query)\n', (4579, 4588), True, 'import numpy as np\n'), ((5243, 5311), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['few_shot_dataset'], {'batch_size': 'batch_size'}), '(few_shot_dataset, batch_size=batch_size)\n', (5270, 5311), False, 'import torch\n'), ((1011, 1025), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (1018, 1025), True, 'import numpy as np\n'), ((1027, 1049), 'scipy.stats.sem', 'scipy.stats.sem', (['array'], {}), '(array)\n', (1042, 1049), False, 'import scipy\n'), ((1089, 1139), 'scipy.stats.t._ppf', 'scipy.stats.t._ppf', (['((1.0 + confidence) / 2.0)', 'ddof'], {}), '((1.0 + confidence) / 2.0, ddof)\n', (1107, 1139), False, 'import scipy\n'), ((5535, 5553), 'fewie.evaluation.utils.get_metric', 'get_metric', (['metric'], {}), '(metric)\n', (5545, 5553), False, 'from fewie.evaluation.utils import get_metric\n'), ((5666, 5681), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5679, 5681), False, 'import torch\n'), ((5773, 5789), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (5777, 5789), False, 'from tqdm import tqdm\n'), ((1266, 1280), 'numpy.mean', 'np.mean', (['array'], {}), '(array)\n', (1273, 1280), True, 'import numpy as np\n'), ((1288, 1310), 'scipy.stats.sem', 'scipy.stats.sem', (['array'], {}), '(array)\n', (1303, 1310), False, 'import scipy\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test circuits and reference outputs for snapshot state instructions.
"""
from numpy import array, sqrt
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.providers.aer.extensions.snapshot import Snapshot
from qiskit.providers.aer.extensions.snapshot_statevector import *
def snapshot_state_circuits_deterministic(snapshot_label='snap',
snapshot_type='statevector',
post_measure=False):
"""Snapshot Statevector test circuits"""
circuits = []
num_qubits = 3
qr = QuantumRegister(num_qubits)
cr = ClassicalRegister(num_qubits)
regs = (qr, cr)
# State snapshot instruction acting on all qubits
snapshot = Snapshot(snapshot_label, snapshot_type, num_qubits)
# Snapshot |000>
circuit = QuantumCircuit(*regs)
if not post_measure:
circuit.append(snapshot, qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if post_measure:
circuit.append(snapshot, qr)
circuits.append(circuit)
# Snapshot |111>
circuit = QuantumCircuit(*regs)
circuit.x(qr)
if not post_measure:
circuit.append(snapshot, qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if post_measure:
circuit.append(snapshot, qr)
circuits.append(circuit)
return circuits
def snapshot_state_counts_deterministic(shots):
"""Snapshot Statevector test circuits reference counts."""
targets = []
# Snapshot |000>
targets.append({'0x0': shots})
# Snapshot |111>
targets.append({'0x7': shots})
return targets
def snapshot_state_pre_measure_statevector_deterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000>
targets.append(array([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex))
# Snapshot |111>
targets.append(array([0, 0, 0, 0, 0, 0, 0, 1], dtype=complex))
return targets
def snapshot_state_pre_measure_statevector_ket_deterministic():
"""Snapshot Statevector_ket test circuits reference final statevector"""
targets = []
# Snapshot |000>
targets.append({'0x0': 1})
# Snapshot |111>
targets.append({'0x7': 1})
return targets
def snapshot_state_post_measure_statevector_deterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000>
targets.append({'0x0': array([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex)})
# Snapshot |111>
targets.append({'0x7': array([0, 0, 0, 0, 0, 0, 0, 1], dtype=complex)})
return targets
def snapshot_state_post_measure_statevector_ket_deterministic():
"""Snapshot Statevector_ket test circuits reference final statevector"""
targets = []
# Snapshot |000>
targets.append({'0x0': {'0x0': 1}})
# Snapshot |111>
targets.append({'0x7': {'0x7': 1}})
return targets
def snapshot_state_circuits_nondeterministic(snapshot_label='snap',
snapshot_type='statevector',
post_measure=False):
"""Snapshot Statevector test circuits"""
circuits = []
num_qubits = 3
qr = QuantumRegister(num_qubits)
cr = ClassicalRegister(num_qubits)
regs = (qr, cr)
# State snapshot instruction acting on all qubits
snapshot = Snapshot(snapshot_label, snapshot_type, num_qubits)
# Snapshot |000> + i|111>
circuit = QuantumCircuit(*regs)
circuit.h(qr[0])
circuit.s(qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[2])
if not post_measure:
circuit.append(snapshot, qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if post_measure:
circuit.append(snapshot, qr)
circuits.append(circuit)
# Snapshot |+++>
circuit = QuantumCircuit(*regs)
circuit.h(qr)
if not post_measure:
circuit.append(snapshot, qr)
circuit.barrier(qr)
circuit.measure(qr, cr)
if post_measure:
circuit.append(snapshot, qr)
circuits.append(circuit)
return circuits
def snapshot_state_counts_nondeterministic(shots):
"""Snapshot Statevector test circuits reference counts."""
targets = []
# Snapshot |000> + i|111>
targets.append({'0x0': shots/2,
'0x7': shots/2})
# Snapshot |+++>
targets.append({'0x0': shots/8,
'0x1': shots/8,
'0x2': shots/8,
'0x3': shots/8,
'0x4': shots/8,
'0x5': shots/8,
'0x6': shots/8,
'0x7': shots/8})
return targets
def snapshot_state_pre_measure_statevector_nondeterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000> + i|111>
targets.append(array([1, 0, 0, 0, 0, 0, 0, 1j], dtype=complex) / sqrt(2))
# Snapshot |+++>
targets.append(array([1, 1, 1, 1, 1, 1, 1, 1], dtype=complex) / sqrt(8))
return targets
def snapshot_state_pre_measure_statevector_ket_nondeterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000> + i|111>
targets.append({"0x0": 1 / sqrt(2), "0x7": 1j / sqrt(2)})
# Snapshot |+++>
targets.append({"0x0": 1 / sqrt(8),
"0x1": 1 / sqrt(8),
"0x2": 1 / sqrt(8),
"0x3": 1 / sqrt(8),
"0x4": 1 / sqrt(8),
"0x5": 1 / sqrt(8),
"0x6": 1 / sqrt(8),
"0x7": 1 / sqrt(8)
})
return targets
def snapshot_state_post_measure_statevector_nondeterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000> + i|111>
targets.append({'0x0': array([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex),
'0x7': array([0, 0, 0, 0, 0, 0, 0, 1j], dtype=complex)})
# Snapshot |+++>
targets.append({'0x0': array([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex),
'0x1': array([0, 1, 0, 0, 0, 0, 0, 0], dtype=complex),
'0x2': array([0, 0, 1, 0, 0, 0, 0, 0], dtype=complex),
'0x3': array([0, 0, 0, 1, 0, 0, 0, 0], dtype=complex),
'0x4': array([0, 0, 0, 0, 1, 0, 0, 0], dtype=complex),
'0x5': array([0, 0, 0, 0, 0, 1, 0, 0], dtype=complex),
'0x6': array([0, 0, 0, 0, 0, 0, 1, 0], dtype=complex),
'0x7': array([0, 0, 0, 0, 0, 0, 0, 1], dtype=complex)})
return targets
def snapshot_state_post_measure_statevector_ket_nondeterministic():
"""Snapshot Statevector test circuits reference final statevector"""
targets = []
# Snapshot |000> + i|111>
targets.append({'0x0': {'0x0': 1},
'0x7': {'0x7': 1j},
})
# Snapshot |+++>
targets.append({'0x0': {'0x0': 1},
'0x1': {'0x1': 1},
'0x2': {'0x2': 1},
'0x3': {'0x3': 1},
'0x4': {'0x4': 1},
'0x5': {'0x5': 1},
'0x6': {'0x6': 1},
'0x7': {'0x7': 1}
})
return targets
| [
"qiskit.ClassicalRegister",
"numpy.sqrt",
"qiskit.providers.aer.extensions.snapshot.Snapshot",
"numpy.array",
"qiskit.QuantumCircuit",
"qiskit.QuantumRegister"
] | [((1084, 1111), 'qiskit.QuantumRegister', 'QuantumRegister', (['num_qubits'], {}), '(num_qubits)\n', (1099, 1111), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1121, 1150), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['num_qubits'], {}), '(num_qubits)\n', (1138, 1150), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1241, 1292), 'qiskit.providers.aer.extensions.snapshot.Snapshot', 'Snapshot', (['snapshot_label', 'snapshot_type', 'num_qubits'], {}), '(snapshot_label, snapshot_type, num_qubits)\n', (1249, 1292), False, 'from qiskit.providers.aer.extensions.snapshot import Snapshot\n'), ((1329, 1350), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (1343, 1350), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((1588, 1609), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (1602, 1609), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((3711, 3738), 'qiskit.QuantumRegister', 'QuantumRegister', (['num_qubits'], {}), '(num_qubits)\n', (3726, 3738), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((3748, 3777), 'qiskit.ClassicalRegister', 'ClassicalRegister', (['num_qubits'], {}), '(num_qubits)\n', (3765, 3777), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((3868, 3919), 'qiskit.providers.aer.extensions.snapshot.Snapshot', 'Snapshot', (['snapshot_label', 'snapshot_type', 'num_qubits'], {}), '(snapshot_label, snapshot_type, num_qubits)\n', (3876, 3919), False, 'from qiskit.providers.aer.extensions.snapshot import Snapshot\n'), ((3965, 3986), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (3979, 3986), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((4324, 4345), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['*regs'], {}), '(*regs)\n', (4338, 4345), False, 'from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit\n'), ((2303, 2349), 'numpy.array', 'array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex)\n', (2308, 2349), False, 'from numpy import array, sqrt\n'), ((2391, 2437), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 0, 0, 1]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 0, 0, 0, 1], dtype=complex)\n', (2396, 2437), False, 'from numpy import array, sqrt\n'), ((2942, 2988), 'numpy.array', 'array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex)\n', (2947, 2988), False, 'from numpy import array, sqrt\n'), ((3039, 3085), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 0, 0, 1]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 0, 0, 0, 1], dtype=complex)\n', (3044, 3085), False, 'from numpy import array, sqrt\n'), ((5355, 5404), 'numpy.array', 'array', (['[1, 0, 0, 0, 0, 0, 0, 1.0j]'], {'dtype': 'complex'}), '([1, 0, 0, 0, 0, 0, 0, 1.0j], dtype=complex)\n', (5360, 5404), False, 'from numpy import array, sqrt\n'), ((5405, 5412), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5409, 5412), False, 'from numpy import array, sqrt\n'), ((5454, 5500), 'numpy.array', 'array', (['[1, 1, 1, 1, 1, 1, 1, 1]'], {'dtype': 'complex'}), '([1, 1, 1, 1, 1, 1, 1, 1], dtype=complex)\n', (5459, 5500), False, 'from numpy import array, sqrt\n'), ((5503, 5510), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (5507, 5510), False, 'from numpy import array, sqrt\n'), ((6378, 6424), 'numpy.array', 'array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex)\n', (6383, 6424), False, 'from numpy import array, sqrt\n'), ((6453, 6502), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 0, 0, 1.0j]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 0, 0, 0, 1.0j], dtype=complex)\n', (6458, 6502), False, 'from numpy import array, sqrt\n'), ((6551, 6597), 'numpy.array', 'array', (['[1, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([1, 0, 0, 0, 0, 0, 0, 0], dtype=complex)\n', (6556, 6597), False, 'from numpy import array, sqrt\n'), ((6626, 6672), 'numpy.array', 'array', (['[0, 1, 0, 0, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([0, 1, 0, 0, 0, 0, 0, 0], dtype=complex)\n', (6631, 6672), False, 'from numpy import array, sqrt\n'), ((6701, 6747), 'numpy.array', 'array', (['[0, 0, 1, 0, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([0, 0, 1, 0, 0, 0, 0, 0], dtype=complex)\n', (6706, 6747), False, 'from numpy import array, sqrt\n'), ((6776, 6822), 'numpy.array', 'array', (['[0, 0, 0, 1, 0, 0, 0, 0]'], {'dtype': 'complex'}), '([0, 0, 0, 1, 0, 0, 0, 0], dtype=complex)\n', (6781, 6822), False, 'from numpy import array, sqrt\n'), ((6851, 6897), 'numpy.array', 'array', (['[0, 0, 0, 0, 1, 0, 0, 0]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 1, 0, 0, 0], dtype=complex)\n', (6856, 6897), False, 'from numpy import array, sqrt\n'), ((6926, 6972), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 1, 0, 0]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 0, 1, 0, 0], dtype=complex)\n', (6931, 6972), False, 'from numpy import array, sqrt\n'), ((7001, 7047), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 0, 1, 0]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 0, 0, 1, 0], dtype=complex)\n', (7006, 7047), False, 'from numpy import array, sqrt\n'), ((7076, 7122), 'numpy.array', 'array', (['[0, 0, 0, 0, 0, 0, 0, 1]'], {'dtype': 'complex'}), '([0, 0, 0, 0, 0, 0, 0, 1], dtype=complex)\n', (7081, 7122), False, 'from numpy import array, sqrt\n'), ((5750, 5757), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5754, 5757), False, 'from numpy import array, sqrt\n'), ((5771, 5778), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (5775, 5778), False, 'from numpy import array, sqrt\n'), ((5834, 5841), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (5838, 5841), False, 'from numpy import array, sqrt\n'), ((5874, 5881), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (5878, 5881), False, 'from numpy import array, sqrt\n'), ((5914, 5921), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (5918, 5921), False, 'from numpy import array, sqrt\n'), ((5954, 5961), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (5958, 5961), False, 'from numpy import array, sqrt\n'), ((5994, 6001), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (5998, 6001), False, 'from numpy import array, sqrt\n'), ((6034, 6041), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (6038, 6041), False, 'from numpy import array, sqrt\n'), ((6074, 6081), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (6078, 6081), False, 'from numpy import array, sqrt\n'), ((6114, 6121), 'numpy.sqrt', 'sqrt', (['(8)'], {}), '(8)\n', (6118, 6121), False, 'from numpy import array, sqrt\n')] |
# Ported from the Synchrosqueezing Toolbox, authored by
# <NAME>, <NAME>
# (http://www.math.princeton.edu/~ebrevdo/)
# (https://github.com/ebrevdo/synchrosqueezing/)
import numpy as np
from .utils import wfiltfn, padsignal, buffer
from quadpy import quad as quadgk
PI = np.pi
EPS = np.finfo(np.float64).eps # machine epsilon for float64
def stft_fwd(x, dt, opts={}):
"""Compute the short-time Fourier transform and modified short-time
Fourier transform from [1]. The former is very closely based on Steven
Schimmel's stft.m and istft.m from his SPHSC 503: Speech Signal Processing
course at Univ. Washington.
# Arguments:
x: np.ndarray. Input signal vector, length `n` (need not be dyadic).
dt: int, sampling period (defaults to 1).
opts: dict. Options:
'type': str. Wavelet type. See `wfiltfn`
'winlen': int. length of window in samples; Nyquist frequency
is winlen/2
'padtype': str ('symmetric', 'repliace', 'circular'). Type
of padding (default = 'symmetric')
'rpadded': bool. Whether to return padded `Sx` and `dSx`
(default = True)
's', 'mu', ... : window options (see `wfiltfn`)
# Returns:
Sx: (na x n) size matrix (rows = scales, cols = times) containing
samples of the CWT of `x`.
Sfs: vector containign the associated frequencies.
dSx: (na x n) size matrix containing samples of the time-derivatives
of the STFT of `x`.
# References:
1. <NAME> and <NAME>,
"Synchrosqueezing-based Recovery of Instantaneous Frequency
from Nonuniform Samples",
SIAM Journal on Mathematical Analysis, 43(5):2078-2095, 2011.
"""
def _process_opts(opts, x):
# opts['window'] is window length; opts['type'] overrides the
# default hamming window
opts['stft_type'] = opts.get('stft_type', 'normal')
opts['winlen'] = opts.get('winlen', int(np.round(len(x) / 8)))
# 'padtype' is one of: 'symmetric', 'replicate', 'circular'
opts['padtype'] = opts.get('padtype', 'symmetric')
opts['rpadded'] = opts.get('rpadded', False)
windowfunc, diff_windowfunc = None, None
if 'type' in opts:
windowfunc = wfiltfn(opts['type'], opts, derivative=False)
diff_windowfunc = wfiltfn(opts['type'], opts, derivative=True)
return opts, windowfunc, diff_windowfunc
opts, windowfunc, diff_windowfunc = _process_opts(opts, x)
# Pre-pad signal; this only works well for 'normal' STFT
n = len(x)
if opts['stft_type'] == 'normal':
x, N_old, n1, n2 = padsignal(x, opts['padtype'], opts['winlen'])
n1 = n1 // 2
else:
n1 = 0
N = len(x)
if opts['stft_type'] == 'normal':
# set up window
if 'type' in opts:
window = windowfunc(np.linspace(-1, 1, opts['winlen']))
diff_window = diff_windowfunc(np.linspace(-1, 1, opts['winlen']))
else:
window = np.hamming(opts['winlen'])
diff_window = np.hstack([np.diff(np.hamming(opts['winlen'])), 0])
diff_window[np.where(np.isnan(diff_window))] = 0
# frequency range
Sfs = np.linspace(0, 1, opts['winlen'] + 1)
Sfs = Sfs[:np.floor(opts['winlen'] / 2).astype('int64') + 1] / dt
# compute STFT and keep only the positive frequencies
xbuf = buffer(x, opts['winlen'], opts['winlen'] - 1, 'nodelay')
xbuf = np.diag(window) @ xbuf
Sx = np.fft.fft(xbuf, None, axis=0)
Sx = Sx[:opts['winlen'] // 2 + 1] / np.sqrt(N)
# same steps for STFT derivative
dxbuf = buffer(x, opts['winlen'], opts['winlen'] - 1, 'nodelay')
dxbuf = np.diag(diff_window) @ dxbuf
dSx = np.fft.fft(dxbuf, None, axis=0)
dSx = dSx[:opts['winlen'] // 2 + 1] / np.sqrt(N)
dSx /= dt
elif opts['stfttype'] == 'modified':
# modified STFt is more accurately done in the frequency domain,
# like a filter bank over different frequency bands
# uses a lot of memory, so best used on small blocks
# (<5000 samples) at a time
Sfs = np.linspace(0, 1, N) / dt
Sx = np.zeros((N, N))
dSx = np.zeros((N, N))
halfN = np.round(N / 2)
halfwin = np.floor((opts['winlen'] - 1) / 2)
window = windowfunc(np.linspace(-1, 1, opts['winlen'])).T # TODO chk dim
diff_window = diff_windowfunc(np.linspace(-1, 1, opts['winlen'])).T * (
2 / opts['winlen'] / dt)
for k in range(N):
freqs = np.arange(-min(halfN - 1, halfwin, k - 1),
min(halfN - 1, halfwin, N- k) + 1)
indices = np.mod(freqs, N)
Sx[indices, k] = x[k + freqs] * window(halfwin + freqs + 1)
dSx[indices, k] = x[k + freqs] * diff_window(halfwin + freqs + 1)
Sx = np.fft.fft(Sx) / np.sqrt(N)
dSx = np.fft.fft(dSx) / np.sqrt(N)
# only keep the positive frequencies
Sx = Sx[:halfN]
dSx = dSx[:halfN]
Sfs = Sfs[:halfN]
# Shorten Sx to proper size (remove padding)
if not opts['rpadded']:
Sx = Sx[:, range(n1, n1 + n)]
dSx = dSx[:, range(n1, n1 + n)]
return Sx, Sfs, dSx
def stft_inv(Sx, opts={}):
"""Inverse short-time Fourier transform.
Very closely based on <NAME>'s stft.m and istft.m from his
SPHSC 503: Speech Signal Processing course at Univ. Washington.
Adapted for use with Synchrosqueeing Toolbox.
# Arguments:
Sx: np.ndarray. Wavelet transform of a signal (see `stft_fwd`).
opts: dict. Options:
'type': str. Wavelet type. See `stft_fwd`, and `wfiltfn`.
Others; see `stft_fwd` and source code.
# Returns:
x: the signal, as reconstructed from `Sx`.
"""
def _unbuffer(x, w, o):
# Undo the effect of 'buffering' by overlap-add;
# returns the signal A that is the unbuffered version of B
y = []
skip = w - o
N = np.ceil(w / skip)
L = (x.shape[1] - 1) * skip + x.shape[0]
# zero-pad columns to make length nearest integer multiple of `skip`
if x.shape[0] < skip * N:
x[skip * N - 1, -1] = 0 # TODO columns?
# selectively reshape columns of input into 1d signals
for i in range(N):
t = x[:, range(i, len(x) - 1, N)].reshape(1, -1)
l = len(t)
y[i, l + (i - 1)*skip - 1] = 0
y[i, np.arange(l) + (i - 1)*skip] = t
# overlap-add
y = np.sum(y, axis=0)
y = y[:L]
return y
def _process_opts(opts, Sx):
# opts['window'] is window length; opts['type'] overrides
# default hamming window
opts['winlen'] = opts.get('winlen', int(np.round(Sx.shape[1] / 16)))
opts['overlap'] = opts.get('overlap', opts['winlen'] - 1)
opts['rpadded'] = opts.get('rpadded', False)
if 'type' in opts:
A = wfiltfn(opts['type'], opts)
window = A(np.linspace(-1, 1, opts['winlen']))
else:
window = np.hamming(opts['winlen'])
return opts, window
opts, window = _process_opts(opts, Sx)
# window = window / norm(window, 2) --> Unit norm
n_win = len(window)
# find length of padding, similar to outputs of `padsignal`
n = Sx.shape[1]
if not opts['rpadded']:
xLen = n
else:
xLen == n - n_win
# n_up = xLen + 2 * n_win
n1 = n_win - 1
# n2 = n_win
new_n1 = np.floor((n1 - 1) / 2)
# add STFT apdding if it doesn't exist
if not opts['rpadded']:
Sxp = np.zeros(Sx.shape)
Sxp[:, range(new_n1, new_n1 + n + 1)] = Sx
Sx = Sxp
else:
n = xLen
# regenerate the full spectrum 0...2pi (minus zero Hz value)
Sx = np.hstack([Sx, np.conj(Sx[np.arange(
np.floor((n_win + 1) / 2), 3, -1)])])
# take the inverse fft over the columns
xbuf = np.real(np.fft.ifft(Sx, None, axis=0))
# apply the window to the columns
xbuf *= np.matlib.repmat(window.flatten(), 1, xbuf.shape[1])
# overlap-add the columns
x = _unbuffer(xbuf, n_win, opts['overlap'])
# keep the unpadded part only
x = x[n1:n1 + n + 1]
# compute L2-norm of window to normalize STFT with
windowfunc = wfiltfn(opts['type'], opts, derivative=False)
C = lambda x: quadgk(windowfunc(x) ** 2, -np.inf, np.inf)
# `quadgk` is a bit inaccurate with the 'bump' function,
# this scales it correctly
if opts['type'] == 'bump':
C *= 0.8675
x *= 2 / (PI * C)
return x
def phase_stft(Sx, dSx, Sfs, t, opts={}):
"""Calculate the phase transform of modified STFT at each (freq, time) pair:
w[a, b] = Im( eta - d/dt(Sx[t, eta]) / Sx[t, eta] / (2*pi*j))
Uses direct differentiation by calculating dSx/dt in frequency domain
(the secondary output of `stft_fwd`, see `stft_fwd`).
# Arguments:
Sx: np.ndarray. Wavelet transform of `x` (see `stft_fwd`).
dSx: np.ndarray. Samples of time-derivative of STFT of `x`
(see `stft_fwd`).
opts: dict. Options:
'gamma': float. Wavelet threshold (default: sqrt(machine epsilon))
# Returns:
w: phase transform, w.shape == Sx.shape
# References:
1. <NAME> and <NAME>,
"Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples",
SIAM Journal on Mathematical Analysis, 43(5):2078-2095, 2011.
<NAME>, <NAME>, <NAME>, and <NAME>,
"The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications,"
Signal Processing, 93:1079-1094, 2013.
"""
opts['gamma'] = opts.get('gamma', np.sqrt(EPS))
# calculate phase transform; modified STFT amounts to extra frequency term
w = np.matlib.repmat(Sfs, len(t), 1).T - np.imag(dSx / Sx / (2 * PI))
# threshold out small points
w[np.abs(Sx) < opts['gamma']] = np.inf
return w
| [
"numpy.abs",
"numpy.ceil",
"numpy.sqrt",
"numpy.arange",
"numpy.fft.fft",
"numpy.floor",
"numpy.hamming",
"numpy.diag",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"numpy.isnan",
"numpy.mod",
"numpy.finfo",
"numpy.fft.ifft",
"numpy.imag",
"numpy.round"
] | [((290, 310), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (298, 310), True, 'import numpy as np\n'), ((7841, 7863), 'numpy.floor', 'np.floor', (['((n1 - 1) / 2)'], {}), '((n1 - 1) / 2)\n', (7849, 7863), True, 'import numpy as np\n'), ((3366, 3403), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', "(opts['winlen'] + 1)"], {}), "(0, 1, opts['winlen'] + 1)\n", (3377, 3403), True, 'import numpy as np\n'), ((3672, 3702), 'numpy.fft.fft', 'np.fft.fft', (['xbuf', 'None'], {'axis': '(0)'}), '(xbuf, None, axis=0)\n', (3682, 3702), True, 'import numpy as np\n'), ((3940, 3971), 'numpy.fft.fft', 'np.fft.fft', (['dxbuf', 'None'], {'axis': '(0)'}), '(dxbuf, None, axis=0)\n', (3950, 3971), True, 'import numpy as np\n'), ((6266, 6283), 'numpy.ceil', 'np.ceil', (['(w / skip)'], {}), '(w / skip)\n', (6273, 6283), True, 'import numpy as np\n'), ((6829, 6846), 'numpy.sum', 'np.sum', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (6835, 6846), True, 'import numpy as np\n'), ((7954, 7972), 'numpy.zeros', 'np.zeros', (['Sx.shape'], {}), '(Sx.shape)\n', (7962, 7972), True, 'import numpy as np\n'), ((8298, 8327), 'numpy.fft.ifft', 'np.fft.ifft', (['Sx', 'None'], {'axis': '(0)'}), '(Sx, None, axis=0)\n', (8309, 8327), True, 'import numpy as np\n'), ((10170, 10182), 'numpy.sqrt', 'np.sqrt', (['EPS'], {}), '(EPS)\n', (10177, 10182), True, 'import numpy as np\n'), ((10313, 10341), 'numpy.imag', 'np.imag', (['(dSx / Sx / (2 * PI))'], {}), '(dSx / Sx / (2 * PI))\n', (10320, 10341), True, 'import numpy as np\n'), ((3163, 3189), 'numpy.hamming', 'np.hamming', (["opts['winlen']"], {}), "(opts['winlen'])\n", (3173, 3189), True, 'import numpy as np\n'), ((3636, 3651), 'numpy.diag', 'np.diag', (['window'], {}), '(window)\n', (3643, 3651), True, 'import numpy as np\n'), ((3747, 3757), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (3754, 3757), True, 'import numpy as np\n'), ((3897, 3917), 'numpy.diag', 'np.diag', (['diff_window'], {}), '(diff_window)\n', (3904, 3917), True, 'import numpy as np\n'), ((4018, 4028), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (4025, 4028), True, 'import numpy as np\n'), ((4375, 4391), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (4383, 4391), True, 'import numpy as np\n'), ((4406, 4422), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (4414, 4422), True, 'import numpy as np\n'), ((4448, 4463), 'numpy.round', 'np.round', (['(N / 2)'], {}), '(N / 2)\n', (4456, 4463), True, 'import numpy as np\n'), ((4482, 4516), 'numpy.floor', 'np.floor', (["((opts['winlen'] - 1) / 2)"], {}), "((opts['winlen'] - 1) / 2)\n", (4490, 4516), True, 'import numpy as np\n'), ((7404, 7430), 'numpy.hamming', 'np.hamming', (["opts['winlen']"], {}), "(opts['winlen'])\n", (7414, 7430), True, 'import numpy as np\n'), ((10386, 10396), 'numpy.abs', 'np.abs', (['Sx'], {}), '(Sx)\n', (10392, 10396), True, 'import numpy as np\n'), ((3014, 3048), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', "opts['winlen']"], {}), "(-1, 1, opts['winlen'])\n", (3025, 3048), True, 'import numpy as np\n'), ((3092, 3126), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', "opts['winlen']"], {}), "(-1, 1, opts['winlen'])\n", (3103, 3126), True, 'import numpy as np\n'), ((3297, 3318), 'numpy.isnan', 'np.isnan', (['diff_window'], {}), '(diff_window)\n', (3305, 3318), True, 'import numpy as np\n'), ((4335, 4355), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (4346, 4355), True, 'import numpy as np\n'), ((4901, 4917), 'numpy.mod', 'np.mod', (['freqs', 'N'], {}), '(freqs, N)\n', (4907, 4917), True, 'import numpy as np\n'), ((5092, 5106), 'numpy.fft.fft', 'np.fft.fft', (['Sx'], {}), '(Sx)\n', (5102, 5106), True, 'import numpy as np\n'), ((5110, 5120), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (5117, 5120), True, 'import numpy as np\n'), ((5135, 5150), 'numpy.fft.fft', 'np.fft.fft', (['dSx'], {}), '(dSx)\n', (5145, 5150), True, 'import numpy as np\n'), ((5153, 5163), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (5160, 5163), True, 'import numpy as np\n'), ((7082, 7108), 'numpy.round', 'np.round', (['(Sx.shape[1] / 16)'], {}), '(Sx.shape[1] / 16)\n', (7090, 7108), True, 'import numpy as np\n'), ((7333, 7367), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', "opts['winlen']"], {}), "(-1, 1, opts['winlen'])\n", (7344, 7367), True, 'import numpy as np\n'), ((4545, 4579), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', "opts['winlen']"], {}), "(-1, 1, opts['winlen'])\n", (4556, 4579), True, 'import numpy as np\n'), ((3235, 3261), 'numpy.hamming', 'np.hamming', (["opts['winlen']"], {}), "(opts['winlen'])\n", (3245, 3261), True, 'import numpy as np\n'), ((4636, 4670), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', "opts['winlen']"], {}), "(-1, 1, opts['winlen'])\n", (4647, 4670), True, 'import numpy as np\n'), ((6749, 6761), 'numpy.arange', 'np.arange', (['l'], {}), '(l)\n', (6758, 6761), True, 'import numpy as np\n'), ((8192, 8217), 'numpy.floor', 'np.floor', (['((n_win + 1) / 2)'], {}), '((n_win + 1) / 2)\n', (8200, 8217), True, 'import numpy as np\n'), ((3423, 3451), 'numpy.floor', 'np.floor', (["(opts['winlen'] / 2)"], {}), "(opts['winlen'] / 2)\n", (3431, 3451), True, 'import numpy as np\n')] |
# python -m odf.mfs.collector
import datetime
import os
import time
from datetime import datetime
from threading import Thread
import cv2
import mss
import pandas as pd
import wave
import pyaudio
from odf.config import config
from PIL import Image
import numpy
import json
sensors = {
# up to seven groups, each group has up to 32/2=16 float values. (max rt2rt)
'Instrument': [
# Instrument Data (Shown to User)
'AIRSPEED_INDICATED', 'VERTICAL_SPEED', 'INDICATED_ALTITUDE',
'WISKEY_COMPASS_INDICATION_DEGREES', 'PLANE_HEADING_DEGREES_GYRO', 'HEADING_INDICATOR',
'ANGLE_OF_ATTACK_INDICATOR',
# Fuel Data
'FUEL_TOTAL_QUANTITY', 'ESTIMATED_FUEL_FLOW'
],
'Speed': [
# Raw Speed Data (in ft/s and ft/s^2) relative to world
'GROUND_VELOCITY', 'TOTAL_WORLD_VELOCITY', 'VELOCITY_WORLD_X', 'VELOCITY_WORLD_Y', 'VELOCITY_WORLD_Z',
'ACCELERATION_WORLD_X', 'ACCELERATION_WORLD_Y', 'ACCELERATION_WORLD_Z',
# Raw Speed Data (in ft/s and ft/s^2) relative to plane
'VELOCITY_BODY_X', 'VELOCITY_BODY_Y', 'VELOCITY_BODY_Z',
'ACCELERATION_BODY_X', 'ACCELERATION_BODY_Y', 'ACCELERATION_BODY_Z'],
# Angle and Turning Data (in radians not degrees)
'Angle': ['PLANE_PITCH_DEGREES', 'PLANE_BANK_DEGREES',
# AoA and Sideslip angles
'INCIDENCE_ALPHA', 'INCIDENCE_BETA'],
# GPS and Position Data
'GPS': ['GPS_POSITION_LAT', 'GPS_POSITION_LON', 'GPS_POSITION_ALT',
'PLANE_LATITUDE', 'PLANE_LONGITUDE', 'PLANE_ALTITUDE'],
# Weather and Conditions Data
'Weather': ['AMBIENT_DENSITY', 'AMBIENT_TEMPERATURE', 'AMBIENT_PRESSURE',
'AMBIENT_WIND_VELOCITY', 'AMBIENT_WIND_X', 'AMBIENT_WIND_Y', 'AMBIENT_WIND_Z',
'TOTAL_AIR_TEMPERATURE'],
# Time Data
# 'Time': ['TIME_OF_DAY', 'ABSOLUTE_TIME', 'LOCAL_TIME'],
# # Some data pieces that rarely change
# 'Some': ['NUMBER_OF_ENGINES', 'FUEL_TOTAL_CAPACITY'],
# Autopilot Data
# 'Autopilot': ['AI_DESIRED_SPEED', 'AI_DESIRED_HEADING', 'AI_GROUNDCRUISESPEED',
# 'AI_GROUNDTURNSPEED']
}
class VideoRecorder(Thread):
def __init__(self, folder, monitor=0) -> None:
self.folder = folder
self.running = True
self.monitor = monitor
Thread.__init__(self)
def run(self):
with mss.mss() as sct:
monitor = sct.monitors[self.monitor]
monitor = {'top': 0, 'left': 0, 'width': 1920, 'height': 1080}
# codec = cv2.VideoWriter_fourcc(*"XVID")
codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
frames = []
start = datetime.now()
while self.running:
pic = sct.grab(monitor)
im = cv2.cvtColor(numpy.array(pic), cv2.COLOR_BGRA2BGR)
frames.append(im)
duration = (datetime.now() - start).total_seconds()
fps = len(frames) / duration
print('saving data... fps: ', fps)
out = cv2.VideoWriter(
os.path.join(
self.folder, 'video.avi'),
codec,
fps,
(monitor['width'], monitor['height'])
)
for im in frames:
out.write(im)
out.release()
print('saved')
def done(self):
self.running = False
class AudioRecorder():
def __init__(self, folder, device=0) -> None:
self.device = device
self.folder = folder
self.running = True
self.stream = None
def start(self):
audio = pyaudio.PyAudio()
self.wavefile = wave.open(os.path.join(self.folder, 'audio.wav'), 'wb')
self.wavefile.setnchannels(2)
self.wavefile.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
self.wavefile.setframerate(44100)
self.stream = audio.open(
input_device_index=self.device,
format=pyaudio.paInt16, channels=2,
rate=44100, input=True,
frames_per_buffer=1024,
# recording speaker
as_loopback=True,
stream_callback=self.get_callback())
self.stream.start_stream()
def get_callback(self):
def callback(in_data, frame_count, time_info, status):
self.wavefile.writeframes(in_data)
return in_data, pyaudio.paContinue
return callback
def done(self):
if self.stream and self.running:
self.stream.stop_stream()
self.wavefile.close()
self.running = False
class MFSCollector(Thread):
def __init__(self, folder, keys, interval=300) -> None:
from SimConnect import AircraftRequests, SimConnect
# Create SimConnect link
sm = SimConnect()
# Note the default _time is 2000 to be refreshed every 2 seconds
aq = AircraftRequests(sm, _time=interval)
self.interval = interval
self.aq = aq
self.sm = sm
self.all_data = []
self.running = True
self.folder = folder
self.keys = keys
Thread.__init__(self)
def pull_mfs(self):
data = {'_t': datetime.now()}
print("Collecting: " + str(datetime.now()))
for k in self.keys:
data[k] = self.aq.get(k)
return data
def run(self):
self.all_data.clear()
while self.running:
time.sleep(self.interval/1000)
self.all_data.append(self.pull_mfs())
csv = os.path.join(
self.folder, 'msf.csv'
)
# sorted columns by name (so _t will be the first)
pd.DataFrame(self.all_data).sort_index(axis=1).to_csv(csv)
self.sm.exit()
def done(self):
self.running = False
def record(monitor=0, audio=0):
folder = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
folder = os.path.join(
config.DATA_PATH, folder
)
os.makedirs(folder)
keys = [k for v in sensors.values() for k in v]
with open(os.path.join(folder, 'keys.json'), 'w') as wf:
json.dump(sensors, wf)
mfs = MFSCollector(folder, keys=keys)
rcv = VideoRecorder(folder, monitor=monitor)
#rca = AudioRecorder(folder, device=audio)
mfs.start()
rcv.start()
# rca.start()
input("Press Enter to STOP recording...")
# rca.done()
mfs.done()
rcv.done()
# wait for threads to finish
mfs.join()
rcv.join()
def print_device():
print('monitors')
with mss.mss() as sct:
for m in sct.monitors:
print(m)
print()
print('audio')
p = pyaudio.PyAudio()
for i in range(p.get_device_count()):
d = p.get_device_info_by_index(i)
print(i, d['name'], 'out:{}'.format(d['maxOutputChannels']),
'in:{}'.format(d['maxInputChannels']), )
if __name__ == '__main__':
# print_device()
record(audio=4, monitor=1)
| [
"threading.Thread.__init__",
"mss.mss",
"os.makedirs",
"json.dump",
"os.path.join",
"time.sleep",
"datetime.datetime.now",
"numpy.array",
"cv2.VideoWriter_fourcc",
"pandas.DataFrame",
"pyaudio.PyAudio",
"SimConnect.AircraftRequests",
"SimConnect.SimConnect"
] | [((5928, 5966), 'os.path.join', 'os.path.join', (['config.DATA_PATH', 'folder'], {}), '(config.DATA_PATH, folder)\n', (5940, 5966), False, 'import os\n'), ((5985, 6004), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (5996, 6004), False, 'import os\n'), ((6662, 6679), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (6677, 6679), False, 'import pyaudio\n'), ((2339, 2360), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (2354, 2360), False, 'from threading import Thread\n'), ((3658, 3675), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (3673, 3675), False, 'import pyaudio\n'), ((4827, 4839), 'SimConnect.SimConnect', 'SimConnect', ([], {}), '()\n', (4837, 4839), False, 'from SimConnect import AircraftRequests, SimConnect\n'), ((4926, 4962), 'SimConnect.AircraftRequests', 'AircraftRequests', (['sm'], {'_time': 'interval'}), '(sm, _time=interval)\n', (4942, 4962), False, 'from SimConnect import AircraftRequests, SimConnect\n'), ((5155, 5176), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (5170, 5176), False, 'from threading import Thread\n'), ((5565, 5601), 'os.path.join', 'os.path.join', (['self.folder', '"""msf.csv"""'], {}), "(self.folder, 'msf.csv')\n", (5577, 5601), False, 'import os\n'), ((6127, 6149), 'json.dump', 'json.dump', (['sensors', 'wf'], {}), '(sensors, wf)\n', (6136, 6149), False, 'import json\n'), ((6553, 6562), 'mss.mss', 'mss.mss', ([], {}), '()\n', (6560, 6562), False, 'import mss\n'), ((2394, 2403), 'mss.mss', 'mss.mss', ([], {}), '()\n', (2401, 2403), False, 'import mss\n'), ((2610, 2652), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (2632, 2652), False, 'import cv2\n'), ((2697, 2711), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2709, 2711), False, 'from datetime import datetime\n'), ((3710, 3748), 'os.path.join', 'os.path.join', (['self.folder', '"""audio.wav"""'], {}), "(self.folder, 'audio.wav')\n", (3722, 3748), False, 'import os\n'), ((5224, 5238), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5236, 5238), False, 'from datetime import datetime\n'), ((5469, 5501), 'time.sleep', 'time.sleep', (['(self.interval / 1000)'], {}), '(self.interval / 1000)\n', (5479, 5501), False, 'import time\n'), ((5870, 5884), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5882, 5884), False, 'from datetime import datetime\n'), ((6072, 6105), 'os.path.join', 'os.path.join', (['folder', '"""keys.json"""'], {}), "(folder, 'keys.json')\n", (6084, 6105), False, 'import os\n'), ((3094, 3132), 'os.path.join', 'os.path.join', (['self.folder', '"""video.avi"""'], {}), "(self.folder, 'video.avi')\n", (3106, 3132), False, 'import os\n'), ((2818, 2834), 'numpy.array', 'numpy.array', (['pic'], {}), '(pic)\n', (2829, 2834), False, 'import numpy\n'), ((5276, 5290), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5288, 5290), False, 'from datetime import datetime\n'), ((2915, 2929), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2927, 2929), False, 'from datetime import datetime\n'), ((5691, 5718), 'pandas.DataFrame', 'pd.DataFrame', (['self.all_data'], {}), '(self.all_data)\n', (5703, 5718), True, 'import pandas as pd\n')] |
import numpy as np
from matplotlib import pyplot as plt
import pickle as pkl
import starry
import celerite2.jax
from celerite2.jax import terms as jax_terms
from celerite2 import terms, GaussianProcess
from exoplanet.distributions import estimate_inverse_gamma_parameters
from matplotlib import colors
import matplotlib.gridspec as gridspec
from matplotlib.ticker import FormatStrFormatter, AutoMinorLocator
from matplotlib.lines import Line2D
import matplotlib.cm as cm
import seaborn as sns
from volcano.utils import *
np.random.seed(42)
starry.config.lazy = False
def make_plots(
lc_in,
lc_eg,
samples,
yticks,
ylim,
xticks_in,
xticks_eg,
res_yticks,
res_ylim,
gp=True,
cmap_norm=colors.Normalize(vmin=0.0, vmax=1500),
):
# Compute epheremis
eph_list_io = []
eph_list_jup = []
for lc in (lc_in, lc_eg):
times = lc.time
eph_io = get_body_ephemeris(
times, body_id="501", step="1m", return_orientation=True
)
eph_jup = get_body_ephemeris(
times, body_id="599", step="1m", return_orientation=True
)
eph_list_io.append(eph_io)
eph_list_jup.append(eph_jup)
eph_io_in = eph_list_io[0]
eph_jup_in = eph_list_jup[0]
eph_io_eg = eph_list_io[1]
eph_jup_eg = eph_list_jup[1]
t_in = (lc_in.time.mjd - lc_in.time.mjd[0]) * 24 * 60
t_eg = (lc_eg.time.mjd - lc_eg.time.mjd[0]) * 24 * 60
f_obs_in = lc_in["flux"].value
f_err_in = lc_in["flux_err"].value
f_obs_eg = lc_eg["flux"].value
f_err_eg = lc_eg["flux_err"].value
f_obs = np.concatenate([f_obs_in, f_obs_eg])
f_err = np.concatenate([f_err_in, f_err_eg])
xo_in, yo_in, ro_in = get_occultor_position_and_radius(
eph_io_in, eph_jup_in, occultor_is_jupiter=True
)
xo_eg, yo_eg, ro_eg = get_occultor_position_and_radius(
eph_io_eg, eph_jup_eg, occultor_is_jupiter=True
)
# Phase
theta_in = eph_io_in["theta"].value
theta_eg = eph_io_eg["theta"].value
# Fit single map model with different map amplitudes for ingress and egress
ydeg_inf = 20
map = starry.Map(ydeg_inf)
# Evalute model on denser grid
xo_in_dense = np.linspace(xo_in[0], xo_in[-1], 200)
yo_in_dense = np.linspace(yo_in[0], yo_in[-1], 200)
theta_in_dense = np.linspace(theta_in[0], theta_in[-1], 200)
xo_eg_dense = np.linspace(xo_eg[0], xo_eg[-1], 200)
yo_eg_dense = np.linspace(yo_eg[0], yo_eg[-1], 200)
theta_eg_dense = np.linspace(theta_eg[0], theta_eg[-1], 200)
t_in_dense = np.linspace(t_in[0], t_in[-1], 200)
t_eg_dense = np.linspace(t_eg[0], t_eg[-1], 200)
median_map_moll_in = get_median_map(ydeg_inf, samples["x_in"], nsamples=50)
median_map_moll_eg = get_median_map(ydeg_inf, samples["x_eg"], nsamples=50)
median_map_in = get_median_map(
ydeg_inf,
samples["x_in"],
projection=None,
theta=np.mean(theta_in),
nsamples=50,
)
median_map_eg = get_median_map(
ydeg_inf,
samples["x_eg"],
projection=None,
theta=np.mean(theta_eg),
nsamples=50,
)
median_map_moll_in = np.clip(median_map_moll_in, 0.1, 1e5)
median_map_moll_eg = np.clip(median_map_moll_eg, 0.1, 1e5)
median_map_in = np.clip(median_map_in, 0.1, 1e5)
median_map_eg = np.clip(median_map_eg, 0.1, 1e5)
# Make plot
if gp:
gp_pred_in = []
gp_pred_eg = []
gp_pred_in_dense = []
gp_pred_eg_dense = []
# Compute GP predictions
for i in np.random.randint(0, len(samples), 100):
# Ingress
kernel_in = terms.Matern32Term(
sigma=np.array(samples["sigma_gp"])[i][0],
rho=np.array(samples["rho_gp"])[i][0],
)
gp = celerite2.GaussianProcess(
kernel_in, t=t_in, mean=np.array(samples["flux_in"])[i]
)
gp.compute(t_in, yerr=(samples["f_err_in_mod"][i]))
gp_pred_in.append(
gp.predict(f_obs_in, t=t_in, include_mean=False)
+ samples["flux_in"][i]
)
gp_pred_in_dense.append(
gp.predict(f_obs_in, t=t_in_dense, include_mean=False)
+ samples["flux_in_dense"][i]
)
# Egress
kernel_eg = terms.Matern32Term(
sigma=np.array(samples["sigma_gp"])[i][1],
rho=np.array(samples["rho_gp"])[i][1],
)
gp = celerite2.GaussianProcess(
kernel_eg, t=t_eg, mean=np.array(samples["flux_eg"])[i]
)
gp.compute(t_eg, yerr=(samples["f_err_eg_mod"][i]))
gp_pred_eg.append(
gp.predict(f_obs_eg, t=t_eg, include_mean=False)
+ samples["flux_eg"][i]
)
gp_pred_eg_dense.append(
gp.predict(f_obs_eg, t=t_eg_dense, include_mean=False)
+ samples["flux_eg_dense"][i]
)
# Compute residuals
f_in_median = np.median(samples["flux_in"], axis=0)
f_eg_median = np.median(samples["flux_eg"], axis=0)
if gp:
f_in_median_gp = np.median(gp_pred_in, axis=0)
f_eg_median_gp = np.median(gp_pred_eg, axis=0)
res_in = f_obs_in - f_in_median_gp
res_eg = f_obs_eg - f_eg_median_gp
else:
res_in = f_obs_in - f_in_median
res_eg = f_obs_eg - f_eg_median
# Set up the plot
resol = 100
nim = 7
fig = plt.figure(figsize=(10, 9))
fig.subplots_adjust(wspace=0.0)
heights = [2, 4, 2]
gs0 = fig.add_gridspec(
nrows=1, ncols=2 * nim, bottom=0.71, left=0.05, right=0.98, hspace=0.4
)
gs1 = fig.add_gridspec(
nrows=3,
ncols=nim,
height_ratios=heights,
top=0.72,
left=0.05,
right=0.50,
hspace=0.05,
)
gs2 = fig.add_gridspec(
nrows=3,
ncols=nim,
height_ratios=heights,
top=0.72,
left=0.53,
right=0.98,
hspace=0.05,
)
# Maps
ax_map_in = fig.add_subplot(gs0[0, :nim])
ax_map_eg = fig.add_subplot(gs0[0, nim:])
# Minimaps
ax_im = [
[fig.add_subplot(gs1[0, i]) for i in range(nim)],
[fig.add_subplot(gs2[0, i]) for i in range(nim)],
]
# Light curves
ax_lc = [fig.add_subplot(gs1[1, :]), fig.add_subplot(gs2[1, :])]
# Residuals
ax_res = [fig.add_subplot(gs1[2, :]), fig.add_subplot(gs2[2, :])]
# Plot maps
cmap = "OrRd"
map.show(
image=median_map_moll_in,
ax=ax_map_in,
projection="Mollweide",
norm=cmap_norm,
cmap=cmap,
)
map.show(
image=median_map_moll_eg,
ax=ax_map_eg,
projection="Mollweide",
norm=cmap_norm,
cmap=cmap,
)
ax_map_in.set_title(
"Ingress map\n" + lc_in.time[0].datetime.strftime("%Y-%m-%d %H:%M")
)
ax_map_eg.set_title(
"Egress map\n" + lc_eg.time[0].datetime.strftime("%Y-%m-%d %H:%M")
)
# Plot minimaps
xo_im_in = np.linspace(xo_in[0], xo_in[-1], nim)
yo_im_in = np.linspace(yo_in[0], yo_in[-1], nim)
xo_im_eg = np.linspace(xo_eg[0], xo_eg[-1], nim)
yo_im_eg = np.linspace(yo_eg[0], yo_eg[-1], nim)
xo_im = [xo_im_in, xo_im_eg]
yo_im = [yo_im_in, yo_im_eg]
for j in range(2):
a = ax_im[j]
for n in range(nim):
# Show the image
if j == 0:
map.show(
image=median_map_in,
ax=a[n],
grid=False,
norm=cmap_norm,
cmap=cmap,
)
ro = ro_in
else:
map.show(
image=median_map_eg,
ax=a[n],
grid=False,
norm=cmap_norm,
cmap=cmap,
)
ro = ro_eg
# Outline
x = np.linspace(-1, 1, 1000)
y = np.sqrt(1 - x ** 2)
f = 0.98
a[n].plot(f * x, f * y, "k-", lw=0.5, zorder=0)
a[n].plot(f * x, -f * y, "k-", lw=0.5, zorder=0)
# Occultor
x = np.linspace(-1.5, xo_im[j][n] + ro - 1e-5, resol)
y = np.sqrt(ro ** 2 - (x - xo_im[j][n]) ** 2)
a[n].fill_between(
x,
yo_im[j][n] - y,
yo_im[j][n] + y,
fc="w",
zorder=1,
clip_on=True,
ec="k",
lw=0.5,
)
a[n].axis("off")
a[n].set(xlim=(-1.1, 1.1), ylim=(-1.1, 1.1))
a[n].set_rasterization_zorder(0)
# Plot ingress
f_err_in_mod_median = np.median(samples["f_err_in_mod"], axis=0)
f_err_eg_mod_median = np.median(samples["f_err_eg_mod"], axis=0)
ax_lc[0].scatter( # Data
t_in,
f_obs_in,
color="black",
marker="o",
alpha=0.4,
)
if gp:
# for s in np.random.randint(0, len(samples["flux_in_dense"]), 10):
# ax_lc[0].plot(
# t_in_dense, samples["flux_in_dense"][s, :], "C0-", alpha=0.1
# ) # Model
# Plot full model
for s in range(10):
ax_lc[0].plot(
t_in_dense, gp_pred_in_dense[s], "C1-", alpha=0.1
) # Model
else:
for s in np.random.randint(0, len(samples["flux_in_dense"]), 10):
ax_lc[0].plot(
t_in_dense, samples["flux_in_dense"][s, :], "C1-", alpha=0.1
) # Model
# Residuals
ax_res[0].errorbar(
t_in,
res_in,
f_err_in_mod_median,
color="black",
marker="o",
ecolor="black",
linestyle="",
alpha=0.4,
)
# Plot egress
ax_lc[1].scatter(
t_eg,
f_obs_eg,
color="black",
marker="o",
alpha=0.4,
)
if gp:
# for s in np.random.randint(0, len(samples["flux_eg_dense"]), 10):
# ax_lc[1].plot(
# t_eg_dense, samples["flux_eg_dense"][s, :], "C0-", alpha=0.1
# ) # Model
# Plot full model
for s in range(10):
ax_lc[1].plot(
t_eg_dense, gp_pred_eg_dense[s], "C1-", alpha=0.1
) # Model
else:
for s in np.random.randint(0, len(samples["flux_eg_dense"]), 10):
ax_lc[1].plot(
t_eg_dense, samples["flux_eg_dense"][s, :], "C1-", alpha=0.1
) # Model
# Residuals
ax_res[1].errorbar(
t_eg,
res_eg,
f_err_eg_mod_median,
color="black",
marker="o",
ecolor="black",
linestyle="",
alpha=0.4,
)
# Colorbar
cbar_ax = fig.add_axes([0.92, 0.72, 0.014, 0.15])
fig.colorbar(
cm.ScalarMappable(norm=cmap_norm, cmap=cmap),
cax=cbar_ax,
label="Spectral flux\n [GW/um]",
)
# Ticks
for a in ax_lc:
a.set_xticklabels([])
a.grid(alpha=0.5)
a.set_yticks(yticks)
a.set_ylim(ylim[0], ylim[1])
for a in (ax_lc[0], ax_res[0]):
a.set_xticks(xticks_in)
a.set_xlim(left=-0.1)
a.xaxis.set_minor_locator(AutoMinorLocator())
a.yaxis.set_minor_locator(AutoMinorLocator())
for a in (ax_lc[1], ax_res[1]):
a.set_xticks(xticks_eg)
a.set_xlim(left=-0.1)
a.xaxis.set_minor_locator(AutoMinorLocator())
a.set_yticklabels([])
for a in ax_res:
a.grid(alpha=0.5)
a.set_ylim(res_ylim)
a.set_yticks(res_yticks)
for j in range(2):
ax_im[j][-1].set_zorder(-100)
# Set common labels
fig.text(0.5, 0.04, "Duration [minutes]", ha="center", va="center")
ax_lc[0].set_ylabel("Intensity [GW/sr/um]")
ax_res[0].set_ylabel("Residuals")
year = lc_in.time[0].isot[:4]
if gp:
fig.savefig(f"irtf_{year}.pdf", bbox_inches="tight", dpi=400)
else:
fig.savefig(
f"irtf_{year}_no_GP.pdf",
bbox_inches="tight",
dpi=400,
)
# Plots for the the 1998 pair of light curves
with open("../../data/irtf_processed/lc_1998-08-27.pkl", "rb") as handle:
lc_in = pkl.load(handle)
with open("../../data/irtf_processed/lc_1998-11-29.pkl", "rb") as handle:
lc_eg = pkl.load(handle)
yticks = np.arange(0, 60, 10)
ylim = (-2, 52)
xticks_in = np.arange(0, 5, 1)
xticks_eg = np.arange(0, 6, 1)
res_yticks = np.arange(-2, 3, 1)
res_ylim = (-2.5, 2.5)
with open("scripts/irtf_1998_samples.pkl", "rb") as handle:
samples = pkl.load(handle)
with open("scripts/irtf_1998_samples_no_GP.pkl", "rb") as handle:
samples2 = pkl.load(handle)
def print_percentiles(samples, varname):
mcmc = np.percentile(samples, [16, 50, 84])
q = np.diff(mcmc)
print(f"{varname}: {mcmc[1]:.3f} {q[0]:.3f} {q[1]:.3f}")
print("1998 event parameters:")
print_percentiles(samples["tau"], "tau")
print_percentiles(np.sqrt(samples["c2"]), "c")
print_percentiles(samples["amp_eg"], "a")
print_percentiles(samples["sigma_gp"][:, 0], "sigma_GP_I")
print_percentiles(samples["sigma_gp"][:, 1], "sigma_GP_E")
print_percentiles(samples["rho_gp"][:, 0], "rho_GP_I")
print_percentiles(samples["rho_gp"][:, 1], "rho_GP_E")
print_percentiles(samples["err_in_scale"], "err_scale_in")
print_percentiles(samples["err_eg_scale"], "err_scale_eg")
print_percentiles(np.exp(samples["ln_flux_offset"][:, 0]), "b_I")
print_percentiles(np.exp(samples["ln_flux_offset"][:, 1]), "b_E")
# Plot inferred maps and fit
make_plots(
lc_in,
lc_eg,
samples,
yticks,
ylim,
xticks_in,
xticks_eg,
res_yticks,
res_ylim,
cmap_norm=colors.LogNorm(vmin=30, vmax=1000),
)
# Model without GP
make_plots(
lc_in,
lc_eg,
samples2,
yticks,
ylim,
xticks_in,
xticks_eg,
res_yticks,
res_ylim,
gp=False,
cmap_norm=colors.LogNorm(vmin=30, vmax=1000),
)
# Plots for the 2017 pair of light curves
with open("../../data/irtf_processed/lc_2017-03-31.pkl", "rb") as handle:
lc_in = pkl.load(handle)
with open("../../data/irtf_processed/lc_2017-05-11.pkl", "rb") as handle:
lc_eg = pkl.load(handle)
with open("scripts/irtf_2017_samples.pkl", "rb") as handle:
samples = pkl.load(handle)
with open("scripts/irtf_2017_samples_no_GP.pkl", "rb") as handle:
samples2 = pkl.load(handle)
print("2017 event parameters:")
print_percentiles(samples["tau"], "tau")
print_percentiles(np.sqrt(samples["c2"]), "c")
print_percentiles(samples["amp_eg"], "a")
print_percentiles(samples["sigma_gp"][:, 0], "sigma_GP_I")
print_percentiles(samples["sigma_gp"][:, 1], "sigma_GP_E")
print_percentiles(samples["rho_gp"][:, 0], "rho_GP_I")
print_percentiles(samples["rho_gp"][:, 1], "rho_GP_E")
print_percentiles(samples["err_in_scale"], "err_scale_in")
print_percentiles(samples["err_eg_scale"], "err_scale_eg")
print_percentiles(np.exp(samples["ln_flux_offset"][:, 0]), "b_I")
print_percentiles(np.exp(samples["ln_flux_offset"][:, 1]), "b_E")
yticks = np.arange(0, 100, 20)
ylim = (-2, 82)
xticks_in = np.arange(0, 5, 1)
xticks_eg = np.arange(0, 6, 1)
res_yticks = np.arange(-5, 7.5, 2.5)
res_ylim = (-5.5, 5.5)
# Model including a GP
make_plots(
lc_in,
lc_eg,
samples,
yticks,
ylim,
xticks_in,
xticks_eg,
res_yticks,
res_ylim,
cmap_norm=colors.LogNorm(vmin=30, vmax=1500),
)
# Model without GP
make_plots(
lc_in,
lc_eg,
samples2,
yticks,
ylim,
xticks_in,
xticks_eg,
res_yticks,
res_ylim,
gp=False,
cmap_norm=colors.LogNorm(vmin=30, vmax=1500),
) | [
"numpy.clip",
"numpy.sqrt",
"numpy.array",
"matplotlib.ticker.AutoMinorLocator",
"numpy.arange",
"matplotlib.colors.LogNorm",
"numpy.mean",
"numpy.diff",
"numpy.exp",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"numpy.random.seed",
"numpy.concatenate",
"starry.Map",
"pickle.load",
... | [((525, 543), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (539, 543), True, 'import numpy as np\n'), ((12516, 12536), 'numpy.arange', 'np.arange', (['(0)', '(60)', '(10)'], {}), '(0, 60, 10)\n', (12525, 12536), True, 'import numpy as np\n'), ((12565, 12583), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (12574, 12583), True, 'import numpy as np\n'), ((12596, 12614), 'numpy.arange', 'np.arange', (['(0)', '(6)', '(1)'], {}), '(0, 6, 1)\n', (12605, 12614), True, 'import numpy as np\n'), ((12628, 12647), 'numpy.arange', 'np.arange', (['(-2)', '(3)', '(1)'], {}), '(-2, 3, 1)\n', (12637, 12647), True, 'import numpy as np\n'), ((15198, 15219), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(20)'], {}), '(0, 100, 20)\n', (15207, 15219), True, 'import numpy as np\n'), ((15248, 15266), 'numpy.arange', 'np.arange', (['(0)', '(5)', '(1)'], {}), '(0, 5, 1)\n', (15257, 15266), True, 'import numpy as np\n'), ((15279, 15297), 'numpy.arange', 'np.arange', (['(0)', '(6)', '(1)'], {}), '(0, 6, 1)\n', (15288, 15297), True, 'import numpy as np\n'), ((15311, 15334), 'numpy.arange', 'np.arange', (['(-5)', '(7.5)', '(2.5)'], {}), '(-5, 7.5, 2.5)\n', (15320, 15334), True, 'import numpy as np\n'), ((733, 770), 'matplotlib.colors.Normalize', 'colors.Normalize', ([], {'vmin': '(0.0)', 'vmax': '(1500)'}), '(vmin=0.0, vmax=1500)\n', (749, 770), False, 'from matplotlib import colors\n'), ((1613, 1649), 'numpy.concatenate', 'np.concatenate', (['[f_obs_in, f_obs_eg]'], {}), '([f_obs_in, f_obs_eg])\n', (1627, 1649), True, 'import numpy as np\n'), ((1662, 1698), 'numpy.concatenate', 'np.concatenate', (['[f_err_in, f_err_eg]'], {}), '([f_err_in, f_err_eg])\n', (1676, 1698), True, 'import numpy as np\n'), ((2146, 2166), 'starry.Map', 'starry.Map', (['ydeg_inf'], {}), '(ydeg_inf)\n', (2156, 2166), False, 'import starry\n'), ((2221, 2258), 'numpy.linspace', 'np.linspace', (['xo_in[0]', 'xo_in[-1]', '(200)'], {}), '(xo_in[0], xo_in[-1], 200)\n', (2232, 2258), True, 'import numpy as np\n'), ((2277, 2314), 'numpy.linspace', 'np.linspace', (['yo_in[0]', 'yo_in[-1]', '(200)'], {}), '(yo_in[0], yo_in[-1], 200)\n', (2288, 2314), True, 'import numpy as np\n'), ((2336, 2379), 'numpy.linspace', 'np.linspace', (['theta_in[0]', 'theta_in[-1]', '(200)'], {}), '(theta_in[0], theta_in[-1], 200)\n', (2347, 2379), True, 'import numpy as np\n'), ((2399, 2436), 'numpy.linspace', 'np.linspace', (['xo_eg[0]', 'xo_eg[-1]', '(200)'], {}), '(xo_eg[0], xo_eg[-1], 200)\n', (2410, 2436), True, 'import numpy as np\n'), ((2455, 2492), 'numpy.linspace', 'np.linspace', (['yo_eg[0]', 'yo_eg[-1]', '(200)'], {}), '(yo_eg[0], yo_eg[-1], 200)\n', (2466, 2492), True, 'import numpy as np\n'), ((2514, 2557), 'numpy.linspace', 'np.linspace', (['theta_eg[0]', 'theta_eg[-1]', '(200)'], {}), '(theta_eg[0], theta_eg[-1], 200)\n', (2525, 2557), True, 'import numpy as np\n'), ((2576, 2611), 'numpy.linspace', 'np.linspace', (['t_in[0]', 't_in[-1]', '(200)'], {}), '(t_in[0], t_in[-1], 200)\n', (2587, 2611), True, 'import numpy as np\n'), ((2629, 2664), 'numpy.linspace', 'np.linspace', (['t_eg[0]', 't_eg[-1]', '(200)'], {}), '(t_eg[0], t_eg[-1], 200)\n', (2640, 2664), True, 'import numpy as np\n'), ((3180, 3222), 'numpy.clip', 'np.clip', (['median_map_moll_in', '(0.1)', '(100000.0)'], {}), '(median_map_moll_in, 0.1, 100000.0)\n', (3187, 3222), True, 'import numpy as np\n'), ((3243, 3285), 'numpy.clip', 'np.clip', (['median_map_moll_eg', '(0.1)', '(100000.0)'], {}), '(median_map_moll_eg, 0.1, 100000.0)\n', (3250, 3285), True, 'import numpy as np\n'), ((3301, 3338), 'numpy.clip', 'np.clip', (['median_map_in', '(0.1)', '(100000.0)'], {}), '(median_map_in, 0.1, 100000.0)\n', (3308, 3338), True, 'import numpy as np\n'), ((3354, 3391), 'numpy.clip', 'np.clip', (['median_map_eg', '(0.1)', '(100000.0)'], {}), '(median_map_eg, 0.1, 100000.0)\n', (3361, 3391), True, 'import numpy as np\n'), ((5069, 5106), 'numpy.median', 'np.median', (["samples['flux_in']"], {'axis': '(0)'}), "(samples['flux_in'], axis=0)\n", (5078, 5106), True, 'import numpy as np\n'), ((5125, 5162), 'numpy.median', 'np.median', (["samples['flux_eg']"], {'axis': '(0)'}), "(samples['flux_eg'], axis=0)\n", (5134, 5162), True, 'import numpy as np\n'), ((5524, 5551), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 9)'}), '(figsize=(10, 9))\n', (5534, 5551), True, 'from matplotlib import pyplot as plt\n'), ((7103, 7140), 'numpy.linspace', 'np.linspace', (['xo_in[0]', 'xo_in[-1]', 'nim'], {}), '(xo_in[0], xo_in[-1], nim)\n', (7114, 7140), True, 'import numpy as np\n'), ((7156, 7193), 'numpy.linspace', 'np.linspace', (['yo_in[0]', 'yo_in[-1]', 'nim'], {}), '(yo_in[0], yo_in[-1], nim)\n', (7167, 7193), True, 'import numpy as np\n'), ((7209, 7246), 'numpy.linspace', 'np.linspace', (['xo_eg[0]', 'xo_eg[-1]', 'nim'], {}), '(xo_eg[0], xo_eg[-1], nim)\n', (7220, 7246), True, 'import numpy as np\n'), ((7262, 7299), 'numpy.linspace', 'np.linspace', (['yo_eg[0]', 'yo_eg[-1]', 'nim'], {}), '(yo_eg[0], yo_eg[-1], nim)\n', (7273, 7299), True, 'import numpy as np\n'), ((8815, 8857), 'numpy.median', 'np.median', (["samples['f_err_in_mod']"], {'axis': '(0)'}), "(samples['f_err_in_mod'], axis=0)\n", (8824, 8857), True, 'import numpy as np\n'), ((8884, 8926), 'numpy.median', 'np.median', (["samples['f_err_eg_mod']"], {'axis': '(0)'}), "(samples['f_err_eg_mod'], axis=0)\n", (8893, 8926), True, 'import numpy as np\n'), ((12385, 12401), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (12393, 12401), True, 'import pickle as pkl\n'), ((12489, 12505), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (12497, 12505), True, 'import pickle as pkl\n'), ((12746, 12762), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (12754, 12762), True, 'import pickle as pkl\n'), ((12844, 12860), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (12852, 12860), True, 'import pickle as pkl\n'), ((12915, 12951), 'numpy.percentile', 'np.percentile', (['samples', '[16, 50, 84]'], {}), '(samples, [16, 50, 84])\n', (12928, 12951), True, 'import numpy as np\n'), ((12960, 12973), 'numpy.diff', 'np.diff', (['mcmc'], {}), '(mcmc)\n', (12967, 12973), True, 'import numpy as np\n'), ((13128, 13150), 'numpy.sqrt', 'np.sqrt', (["samples['c2']"], {}), "(samples['c2'])\n", (13135, 13150), True, 'import numpy as np\n'), ((13563, 13602), 'numpy.exp', 'np.exp', (["samples['ln_flux_offset'][:, 0]"], {}), "(samples['ln_flux_offset'][:, 0])\n", (13569, 13602), True, 'import numpy as np\n'), ((13629, 13668), 'numpy.exp', 'np.exp', (["samples['ln_flux_offset'][:, 1]"], {}), "(samples['ln_flux_offset'][:, 1])\n", (13635, 13668), True, 'import numpy as np\n'), ((14235, 14251), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (14243, 14251), True, 'import pickle as pkl\n'), ((14339, 14355), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (14347, 14355), True, 'import pickle as pkl\n'), ((14431, 14447), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (14439, 14447), True, 'import pickle as pkl\n'), ((14530, 14546), 'pickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (14538, 14546), True, 'import pickle as pkl\n'), ((14639, 14661), 'numpy.sqrt', 'np.sqrt', (["samples['c2']"], {}), "(samples['c2'])\n", (14646, 14661), True, 'import numpy as np\n'), ((15074, 15113), 'numpy.exp', 'np.exp', (["samples['ln_flux_offset'][:, 0]"], {}), "(samples['ln_flux_offset'][:, 0])\n", (15080, 15113), True, 'import numpy as np\n'), ((15140, 15179), 'numpy.exp', 'np.exp', (["samples['ln_flux_offset'][:, 1]"], {}), "(samples['ln_flux_offset'][:, 1])\n", (15146, 15179), True, 'import numpy as np\n'), ((5200, 5229), 'numpy.median', 'np.median', (['gp_pred_in'], {'axis': '(0)'}), '(gp_pred_in, axis=0)\n', (5209, 5229), True, 'import numpy as np\n'), ((5255, 5284), 'numpy.median', 'np.median', (['gp_pred_eg'], {'axis': '(0)'}), '(gp_pred_eg, axis=0)\n', (5264, 5284), True, 'import numpy as np\n'), ((10987, 11031), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'cmap_norm', 'cmap': 'cmap'}), '(norm=cmap_norm, cmap=cmap)\n', (11004, 11031), True, 'import matplotlib.cm as cm\n'), ((13851, 13885), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': '(30)', 'vmax': '(1000)'}), '(vmin=30, vmax=1000)\n', (13865, 13885), False, 'from matplotlib import colors\n'), ((14067, 14101), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': '(30)', 'vmax': '(1000)'}), '(vmin=30, vmax=1000)\n', (14081, 14101), False, 'from matplotlib import colors\n'), ((15525, 15559), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': '(30)', 'vmax': '(1500)'}), '(vmin=30, vmax=1500)\n', (15539, 15559), False, 'from matplotlib import colors\n'), ((15741, 15775), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': '(30)', 'vmax': '(1500)'}), '(vmin=30, vmax=1500)\n', (15755, 15775), False, 'from matplotlib import colors\n'), ((2944, 2961), 'numpy.mean', 'np.mean', (['theta_in'], {}), '(theta_in)\n', (2951, 2961), True, 'import numpy as np\n'), ((3108, 3125), 'numpy.mean', 'np.mean', (['theta_eg'], {}), '(theta_eg)\n', (3115, 3125), True, 'import numpy as np\n'), ((8029, 8053), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (8040, 8053), True, 'import numpy as np\n'), ((8070, 8089), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (8077, 8089), True, 'import numpy as np\n'), ((8272, 8322), 'numpy.linspace', 'np.linspace', (['(-1.5)', '(xo_im[j][n] + ro - 1e-05)', 'resol'], {}), '(-1.5, xo_im[j][n] + ro - 1e-05, resol)\n', (8283, 8322), True, 'import numpy as np\n'), ((8338, 8379), 'numpy.sqrt', 'np.sqrt', (['(ro ** 2 - (x - xo_im[j][n]) ** 2)'], {}), '(ro ** 2 - (x - xo_im[j][n]) ** 2)\n', (8345, 8379), True, 'import numpy as np\n'), ((11390, 11408), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (11406, 11408), False, 'from matplotlib.ticker import FormatStrFormatter, AutoMinorLocator\n'), ((11444, 11462), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (11460, 11462), False, 'from matplotlib.ticker import FormatStrFormatter, AutoMinorLocator\n'), ((11597, 11615), 'matplotlib.ticker.AutoMinorLocator', 'AutoMinorLocator', ([], {}), '()\n', (11613, 11615), False, 'from matplotlib.ticker import FormatStrFormatter, AutoMinorLocator\n'), ((3893, 3921), 'numpy.array', 'np.array', (["samples['flux_in']"], {}), "(samples['flux_in'])\n", (3901, 3921), True, 'import numpy as np\n'), ((4598, 4626), 'numpy.array', 'np.array', (["samples['flux_eg']"], {}), "(samples['flux_eg'])\n", (4606, 4626), True, 'import numpy as np\n'), ((3703, 3732), 'numpy.array', 'np.array', (["samples['sigma_gp']"], {}), "(samples['sigma_gp'])\n", (3711, 3732), True, 'import numpy as np\n'), ((3760, 3787), 'numpy.array', 'np.array', (["samples['rho_gp']"], {}), "(samples['rho_gp'])\n", (3768, 3787), True, 'import numpy as np\n'), ((4408, 4437), 'numpy.array', 'np.array', (["samples['sigma_gp']"], {}), "(samples['sigma_gp'])\n", (4416, 4437), True, 'import numpy as np\n'), ((4465, 4492), 'numpy.array', 'np.array', (["samples['rho_gp']"], {}), "(samples['rho_gp'])\n", (4473, 4492), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.linalg import block_diag
class Network(object):
"""
Class for networks of boreholes with series, parallel, and mixed
connections between the boreholes.
Contains information regarding the physical dimensions and thermal
characteristics of the pipes and the grout material in each boreholes, the
topology of the connections between boreholes, as well as methods to
evaluate fluid temperatures and heat extraction rates based on the work of
Cimmino (2018, 2019) [#Network-Cimmin2018]_, [#Network-Cimmin2019]_.
Attributes
----------
boreholes : list of Borehole objects
List of boreholes included in the bore field.
pipes : list of pipe objects
List of pipes included in the bore field.
bore_connectivity : list, optional
Index of fluid inlet into each borehole. -1 corresponds to a borehole
connected to the bore field inlet. If this parameter is not provided,
parallel connections between boreholes is used.
Default is None.
m_flow_network : float or array, optional
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits. This
parameter is used to initialize the coefficients if it is provided.
Default is None.
cp_f : float, optional
Fluid specific isobaric heat capacity (in J/kg.degC). This parameter is
used to initialize the coefficients if it is provided.
Default is None.
nSegments : int, optional
Number of line segments used per borehole. This parameter is used to
initialize the coefficients if it is provided.
Default is None.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the network, and both correspond to the number of parallel
circuits. `nTotalSegments` is the sum of the number of discretized segments
along every borehole. `nBoreholes` is the total number of boreholes in the
network.
References
----------
.. [#Network-Cimmin2018] <NAME>. (2018). g-Functions for bore fields with
mixed parallel and series connections considering the axial fluid
temperature variations. Proceedings of the IGSHPA Sweden Research Track
2018. Stockholm, Sweden. pp. 262-270.
.. [#Network-Cimmin2019] <NAME>. (2019). Semi-analytical method for
g-function calculation of bore fields with series- and
parallel-connected boreholes. Science and Technology for the Built
Environment, 25 (8), 1007-1022.
"""
def __init__(self, boreholes, pipes, bore_connectivity=None,
m_flow_network=None, cp_f=None, nSegments=None):
self.b = boreholes
self.H_tot = sum([b.H for b in self.b])
self.nBoreholes = len(boreholes)
self.p = pipes
if bore_connectivity is None:
bore_connectivity = [-1]*self.nBoreholes
self.c = bore_connectivity
self.m_flow_network = m_flow_network
self.cp_f = cp_f
# Verify that borehole connectivity is valid
_verify_bore_connectivity(bore_connectivity, self.nBoreholes)
iInlets, nInlets, iOutlets, nOutlets, iCircuit = _find_inlets_outlets(
bore_connectivity, self.nBoreholes)
# Number of inlets and outlets in network
self.nInlets = nInlets
self.nOutlets = nOutlets
# Indices of inlets and outlets in network
self.iInlets = iInlets
self.iOutlets = iOutlets
# Indices of circuit of each borehole in network
self.iCircuit = iCircuit
# Initialize stored_coefficients
self._initialize_coefficients_connectivity()
self._initialize_stored_coefficients(m_flow_network, cp_f, nSegments)
def get_inlet_temperature(
self, T_f_in, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the inlet fluid temperatures of all boreholes.
Parameters
----------
T_f_in : float or (1,) array
Inlet fluid temperatures into network (in Celsius).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
T_f_in : (nBoreholes,) array
Inlet fluid temperature (in Celsius) into each borehole.
"""
# Build coefficient matrices
a_in, a_b = self.coefficients_inlet_temperature(
m_flow_network, cp_f, nSegments)
# Evaluate outlet temperatures
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
T_f_in_borehole = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
return T_f_in_borehole
def get_outlet_temperature(self, T_f_in, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the outlet fluid temperatures of all boreholes.
Parameters
----------
T_f_in : float or (1,) array
Inlet fluid temperatures into network (in Celsius).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
T_f_out : (nBoreholes,) array
Outlet fluid temperatures (in Celsius) from each borehole.
"""
# Build coefficient matrices
a_in, a_b = self.coefficients_outlet_temperature(
m_flow_network, cp_f, nSegments)
# Evaluate outlet temperatures
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
T_f_out = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
return T_f_out
def get_borehole_heat_extraction_rate(
self, T_f_in, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the heat extraction rates of all boreholes.
Parameters
----------
T_f_in : float or (1,) array
Inlet fluid temperatures into network (in Celsius).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
Q_b : (nTotalSegments,) array
Heat extraction rates along each borehole segment (in Watts).
"""
a_in, a_b = self.coefficients_borehole_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
Q_b = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
return Q_b
def get_fluid_heat_extraction_rate(
self, T_f_in, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the total heat extraction rates of all boreholes.
Parameters
----------
T_f_in : float or (1,) array
Inlet fluid temperatures into network (in Celsius).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
Q_f : (nBoreholes,) array
Total heat extraction rates from each borehole (in Watts).
"""
a_in, a_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
Q_f = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
return Q_f
def get_network_inlet_temperature(
self, Q_t, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the inlet fluid temperature of the network.
Parameters
----------
Q_t : float or (1,) array
Total heat extraction rate from the network (in Watts).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
T_f_in : float or (1,) array
Inlet fluid temperature (in Celsius) into the network. The returned
type corresponds to the type of the parameter `Qt`.
"""
# Build coefficient matrices
a_q, a_b = self.coefficients_network_inlet_temperature(
m_flow_network, cp_f, nSegments)
# Evaluate outlet temperatures
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
T_f_in = a_q @ np.atleast_1d(Q_t) + a_b @ T_b
if np.isscalar(Q_t):
T_f_in = T_f_in.item()
return T_f_in
def get_network_outlet_temperature(
self, T_f_in, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the outlet fluid temperature of the network.
Parameters
----------
T_f_in : float or (1,) array
Inlet fluid temperatures into network (in Celsius).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
T_f_out : float or (1,) array
Outlet fluid temperature (in Celsius) from the network. The
returned type corresponds to the type of the parameter `Tin`.
"""
# Build coefficient matrices
a_in, a_b = self.coefficients_network_outlet_temperature(
m_flow_network, cp_f, nSegments)
# Evaluate outlet temperatures
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
T_f_out = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
if np.isscalar(T_f_in):
T_f_out = T_f_out.item()
return T_f_out
def get_network_heat_extraction_rate(
self, T_f_in, T_b, m_flow_network, cp_f, nSegments):
"""
Returns the total heat extraction rate of the network.
Parameters
----------
T_f_in : float or (1,) array
Inlet fluid temperatures into network (in Celsius).
T_b : float or (nTotalSegments,) array
Borehole wall temperatures (in Celsius). If a float is supplied,
the same temperature is applied to all segments of all boreholes.
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
Q_t : float or (1,) array
Heat extraction rate of the network (in Watts). The returned type
corresponds to the type of the parameter `Tin`.
"""
a_in, a_b = self.coefficients_network_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
if np.isscalar(T_b):
T_b = np.tile(T_b, sum(self.nSegments))
Q_t = a_in @ np.atleast_1d(T_f_in) + a_b @ T_b
if np.isscalar(T_f_in):
Q_t = Q_t.item()
return Q_t
def coefficients_inlet_temperature(self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate intlet fluid temperatures of all
boreholes.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,borehole,in}} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (nBoreholes, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (nBoreholes, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_inlet_temperature is 0
method_id = 0
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Update input variables
self._format_inputs(m_flow_network, cp_f, nSegments)
# Coefficient matrices for borehole inlet temperatures:
# [T_{f,b,in}] = [c_in]*[T_{f,n,in}] + [c_out]*[T_{f,b,out}]
c_in = self._c_in
c_out = self._c_out
# Coefficient matrices for borehole outlet temperatures:
# [T_{f,b,out}] = [A]*[T_{f,b,in}] + [B]*[T_{b}]
AB = list(zip(*[
self.p[i].coefficients_outlet_temperature(
self._m_flow_borehole[i],
self._cp_borehole[i],
self.nSegments[i])
for i in range(self.nBoreholes)]))
A = block_diag(*AB[0])
B = block_diag(*AB[1])
# Coefficient matrices for borehole inlet temperatures:
# [T_{f,b,in}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
ICA = np.eye(self.nBoreholes) - c_out @ A
a_in = np.linalg.solve(ICA, c_in)
a_b = np.linalg.solve(ICA, c_out @ B)
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_outlet_temperature(self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate outlet fluid temperatures of all
boreholes.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,borehole,out}} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (nBoreholes, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (nBoreholes, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_outlet_temperature is 1
method_id = 1
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Update input variables
self._format_inputs(m_flow_network, cp_f, nSegments)
# Coefficient matrices for borehole inlet temperatures:
# [T_{f,b,in}] = [c_in]*[T_{f,n,in}] + [c_out]*[T_{f,b,out}]
c_in = self._c_in
c_out = self._c_out
# Coefficient matrices for borehole outlet temperatures:
# [T_{f,b,out}] = [A]*[T_{f,b,in}] + [B]*[T_{b}]
AB = list(zip(*[
self.p[i].coefficients_outlet_temperature(
self._m_flow_borehole[i],
self._cp_borehole[i],
self.nSegments[i])
for i in range(self.nBoreholes)]))
A = block_diag(*AB[0])
B = block_diag(*AB[1])
# Coefficient matrices for borehole outlet temperatures:
# [T_{f,b,out}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
IAC = np.eye(self.nBoreholes) - A @ c_out
a_in = np.linalg.solve(IAC, A @ c_in)
a_b = np.linalg.solve(IAC, B)
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_network_inlet_temperature(
self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate intlet fluid temperature of the
network.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,network,in}} =
\\mathbf{a_{q,f}} Q_{f}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_qf : (1, 1,) array
Array of coefficients for total heat extraction rate.
a_b : (1, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_network_inlet_temperature is 2
method_id = 2
# Check if stored coefficients are available
if self._check_coefficients(m_flow_network, cp_f, nSegments, method_id):
a_qf, a_b = self._get_stored_coefficients(method_id)
else:
# Coefficient matrices for network heat extraction rates:
# [Q_{tot}] = [b_in]*[T_{f,n,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_network_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
# Coefficient matrices for network inlet temperature:
# [T_{f,n,in}] = [a_qf]*[Q_{tot}] + [a_b]*[T_{b}]
b_in_inv = np.linalg.inv(b_in)
a_qf = b_in_inv
a_b = -b_in_inv.dot(b_b)
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_qf, a_b), method_id)
return a_qf, a_b
def coefficients_network_outlet_temperature(
self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate outlet fluid temperature of the
network.
Returns coefficients for the relation:
.. math::
\\mathbf{T_{f,network,out}} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (1, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (1, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_network_outlet_temperature is 3
method_id = 3
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Coefficient matrices for borehole outlet temperatures:
# [T_{f,b,out}] = [b_in]*[T_{f,n,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_outlet_temperature(
m_flow_network, cp_f, nSegments)
# Coefficient matrices for network outlet temperature:
# [T_{f,n,out}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
mix_out = self._coefficients_mixing(m_flow_network)
a_in = mix_out @ b_in
a_b = mix_out @ b_b
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_borehole_heat_extraction_rate(
self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate heat extraction rates of all
boreholes segments.
Returns coefficients for the relation:
.. math::
\\mathbf{Q_b} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (nTotalSegments, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (nTotalSegments, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_borehole_heat_extraction_rate is 4
method_id = 4
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Update input variables
self._format_inputs(m_flow_network, cp_f, nSegments)
# Coefficient matrices for borehole inlet temperatures:
# [T_{f,b,in}] = [b_in]*[T_{f,n,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_inlet_temperature(
m_flow_network, cp_f, nSegments)
# Coefficient matrices for borehole heat extraction rates:
# [Q_{b}] = [A]*[T_{f,b,in}] + [B]*[T_{b}]
AB = list(zip(*[
self.p[i].coefficients_borehole_heat_extraction_rate(
self._m_flow_borehole[i],
self._cp_borehole[i],
self.nSegments[i])
for i in range(self.nBoreholes)]))
A = block_diag(*AB[0])
B = block_diag(*AB[1])
# Coefficient matrices for borehole heat extraction rates:
# [Q_{b}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
a_in = A @ b_in
a_b = A @ b_b + B
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_fluid_heat_extraction_rate(
self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate heat extraction rates of all
boreholes.
Returns coefficients for the relation:
.. math::
\\mathbf{Q_f} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (nBoreholes, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (nBoreholes, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_fluid_heat_extraction_rate is 5
method_id = 5
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Update input variables
self._format_inputs(m_flow_network, cp_f, nSegments)
# Coefficient matrices for borehole inlet temperatures:
# [T_{f,b,in}] = [b_in]*[T_{f,n,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_inlet_temperature(
m_flow_network, cp_f, nSegments)
# Coefficient matrices for fluid heat extraction rates:
# [Q_{f}] = [A]*[T_{f,b,in}] + [B]*[T_{b}]
AB = list(zip(*[
self.p[i].coefficients_fluid_heat_extraction_rate(
self._m_flow_borehole[i],
self._cp_borehole[i],
self.nSegments[i])
for i in range(self.nBoreholes)]))
A = block_diag(*AB[0])
B = block_diag(*AB[1])
# Coefficient matrices for fluid heat extraction rates:
# [Q_{f}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
a_in = A @ b_in
a_b = A @ b_b + B
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def coefficients_network_heat_extraction_rate(
self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate total heat extraction rate of
the network.
Returns coefficients for the relation:
.. math::
\\mathbf{Q_network} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (1, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (1, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_network_heat_extraction_rate is 6
method_id = 6
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Coefficient matrices for fluid heat extraction rates:
# [Q_{f}] = [b_in]*[T_{f,n,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
# The total network heat extraction rate is the sum of heat
# extraction rates from all boreholes:
# [Q_{tot}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
a_in = np.reshape(np.sum(b_in, axis=0), (1,-1))
a_b = np.reshape(np.sum(b_b, axis=0), (1,-1))
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def _coefficients_mixing(self, m_flow_network):
"""
Returns coefficients for the relation:
.. math::
T_{f,network,out} =
\\mathbf{a_{out}} \\mathbf{T_{f,borehole,out}}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
Returns
-------
mix_out : (1, nOutlets,) array
Array of coefficients for outlet fluid temperatures of all
boreholes.
"""
if not self._check_mixing_coefficients(m_flow_network):
self._mix_out = np.zeros((1, self.nBoreholes))
self._mix_out[0, self.iOutlets] = self._m_flow_in/np.sum(self._m_flow_in)
self._mixing_m_flow = m_flow_network
return self._mix_out
def _initialize_coefficients_connectivity(self):
"""
Initializes coefficients for the relation:
.. math::
\\mathbf{T_{f,borehole,in}} =
\\mathbf{c_{in}} T_{f,network,in}
+ \\mathbf{c_{out}} \\mathbf{T_{f,borehole,out}}
"""
self._c_in = np.zeros((self.nBoreholes, 1))
self._c_out = np.zeros((self.nBoreholes, self.nBoreholes))
for i in range(self.nInlets):
self._c_in[self.iInlets[i], 0] = 1.
for i in range(self.nBoreholes):
if not self.c[i] == -1:
self._c_out[i, self.c[i]] = 1.
return
def _initialize_stored_coefficients(self, m_flow_network, cp_f, nSegments):
nMethods = 7 # Number of class methods
self._stored_coefficients = [() for i in range(nMethods)]
self._stored_m_flow_cp = [np.empty(self.nInlets)
for i in range(nMethods)]
self._stored_nSegments = [np.nan for i in range(nMethods)]
self._m_flow_cp_model_variables = np.empty(self.nInlets)
self._nSegments_model_variables = np.nan
self._mixing_m_flow = np.empty(self.nInlets)
self._mixing_m_flow[:] = np.nan
self._mix_out = np.empty((1, self.nBoreholes))
self._mix_out[:] = np.nan
# If m_flow, cp_f, and nSegments are specified, evaluate and store all
# matrix coefficients.
if m_flow_network is not None and cp_f is not None and nSegments is not None:
self.coefficients_inlet_temperature(
m_flow_network, cp_f, nSegments)
self.coefficients_outlet_temperature(
m_flow_network, cp_f, nSegments)
self.coefficients_network_inlet_temperature(
m_flow_network, cp_f, nSegments)
self.coefficients_network_outlet_temperature(
m_flow_network, cp_f, nSegments)
self.coefficients_borehole_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
self.coefficients_fluid_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
self.coefficients_network_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
return
def _set_stored_coefficients(self, m_flow_network, cp_f, nSegments, coefficients,
method_id):
self._stored_coefficients[method_id] = coefficients
self._stored_m_flow_cp[method_id] = m_flow_network*cp_f
self._stored_nSegments[method_id] = nSegments
return
def _get_stored_coefficients(self, method_id):
coefficients = self._stored_coefficients[method_id]
return coefficients
def _check_mixing_coefficients(self, m_flow_network, tol=1e-6):
mixing_m_flow = self._mixing_m_flow
if np.all(np.abs(m_flow_network - mixing_m_flow) < np.abs(mixing_m_flow)*tol):
check = True
else:
check = False
return check
def _check_coefficients(self, m_flow_network, cp_f, nSegments, method_id, tol=1e-6):
stored_m_flow_cp = self._stored_m_flow_cp[method_id]
stored_nSegments = self._stored_nSegments[method_id]
if (np.all(np.abs(m_flow_network*cp_f - stored_m_flow_cp) < np.abs(stored_m_flow_cp)*tol)
and nSegments == stored_nSegments):
check = True
else:
check = False
return check
def _format_inputs(self, m_flow_network, cp_f, nSegments):
"""
Format mass flow rate and heat capacity inputs.
"""
# Format mass flow rate inputs
# Mass flow rate in each fluid circuit
m_flow_in = np.atleast_1d(m_flow_network)
if len(m_flow_in) == 1:
m_flow_in = np.tile(m_flow_network/self.nInlets, self.nInlets)
elif not len(m_flow_in) == self.nInlets:
raise ValueError(
'Incorrect length of mass flow vector.')
self._m_flow_in = m_flow_in
# Format heat capacity inputs
# Heat capacity in each fluid circuit
cp_in = np.atleast_1d(cp_f)
if len(cp_in) == 1:
cp_in = np.tile(cp_f, self.nInlets)
elif not len(cp_in) == self.nInlets:
raise ValueError(
'Incorrect length of heat capacity vector.')
elif not np.all(cp_in == cp_in[0]):
raise ValueError(
'The heat capacity should be the same in all circuits.')
self._cp_in = cp_in
# Mass flow rate in boreholes
m_flow_borehole = np.array([m_flow_in[i] for i in self.iCircuit])
self._m_flow_borehole = m_flow_borehole
# Heat capacity in boreholes
cp_borehole = np.array([cp_in[i] for i in self.iCircuit])
self._cp_borehole = cp_borehole
# Format number of segments for each borehole
nSeg = np.atleast_1d(nSegments)
if len(nSeg) == 1:
self.nSegments = [nSeg[0]] * self.nBoreholes
elif not len(nSeg) == self.nBoreholes:
raise ValueError(
'Incorrect length of number of segments list.')
else:
self.nSegments = nSegments
class _EquivalentNetwork(Network):
"""
Class for networks of equivalent boreholes with parallel connections
between the equivalent boreholes.
Contains information regarding the physical dimensions and thermal
characteristics of the pipes and the grout material in each boreholes, the
topology of the connections between boreholes, as well as methods to
evaluate fluid temperatures and heat extraction rates based on the work of
Cimmino (2018, 2019) [#Network-Cimmin2018]_, [#Network-Cimmin2019]_.
Attributes
----------
boreholes : list of Borehole objects
List of boreholes included in the bore field.
pipes : list of pipe objects
List of pipes included in the bore field.
m_flow_network : float or array, optional
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits. This
parameter is used to initialize the coefficients if it is provided.
Default is None.
cp_f : float, optional
Fluid specific isobaric heat capacity (in J/kg.degC). This parameter is
used to initialize the coefficients if it is provided.
Default is None.
nSegments : int, optional
Number of line segments used per borehole. This parameter is used to
initialize the coefficients if it is provided.
Default is None.
Notes
-----
The expected array shapes of input parameters and outputs are documented
for each class method. `nInlets` and `nOutlets` are the number of inlets
and outlets to the network, and both correspond to the number of parallel
circuits. `nTotalSegments` is the sum of the number of discretized segments
along every borehole. `nBoreholes` is the total number of boreholes in the
network.
References
----------
.. [#Network-Cimmin2018] <NAME>. (2018). g-Functions for bore fields with
mixed parallel and series connections considering the axial fluid
temperature variations. Proceedings of the IGSHPA Sweden Research Track
2018. Stockholm, Sweden. pp. 262-270.
.. [#Network-Cimmin2019] <NAME>. (2019). Semi-analytical method for
g-function calculation of bore fields with series- and
parallel-connected boreholes. Science and Technology for the Built
Environment, 25 (8), 1007-1022.
"""
def __init__(self, equivalentBoreholes, pipes, m_flow_network=None,
cp_f=None, nSegments=None):
self.b = equivalentBoreholes
self.H_tot = sum([b.H*b.nBoreholes for b in self.b])
self.nBoreholes = len(equivalentBoreholes)
self.wBoreholes = np.array([[b.nBoreholes for b in equivalentBoreholes]]).T
self.nBoreholes_total = np.sum(self.wBoreholes)
self.p = pipes
self.c = [-1]*self.nBoreholes
self.m_flow_network = m_flow_network
self.cp_f = cp_f
# Verify that borehole connectivity is valid
iInlets, nInlets, iOutlets, nOutlets, iCircuit = _find_inlets_outlets(
self.c, self.nBoreholes)
# Number of inlets and outlets in network
self.nInlets = nInlets
self.nOutlets = nOutlets
# Indices of inlets and outlets in network
self.iInlets = iInlets
self.iOutlets = iOutlets
# Indices of circuit of each borehole in network
self.iCircuit = iCircuit
# Initialize stored_coefficients
self._initialize_coefficients_connectivity()
self._initialize_stored_coefficients(m_flow_network, cp_f, nSegments)
return
def coefficients_network_heat_extraction_rate(
self, m_flow_network, cp_f, nSegments):
"""
Build coefficient matrices to evaluate total heat extraction rate of
the network.
Returns coefficients for the relation:
.. math::
\\mathbf{Q_network} =
\\mathbf{a_{in}} T_{f,network,in}
+ \\mathbf{a_{b}} \\mathbf{T_b}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
nSegments : int or list
Number of borehole segments for each borehole. If an int is
supplied, all boreholes are considered to have the same number of
segments.
Returns
-------
a_in : (1, 1,) array
Array of coefficients for inlet fluid temperature.
a_b : (1, nTotalSegments,) array
Array of coefficients for borehole wall temperatures.
"""
# method_id for coefficients_network_heat_extraction_rate is 6
method_id = 6
# Check if stored coefficients are available
if self._check_coefficients(
m_flow_network, cp_f, nSegments, method_id):
a_in, a_b = self._get_stored_coefficients(method_id)
else:
# Coefficient matrices for fluid heat extraction rates:
# [Q_{f}] = [b_in]*[T_{f,n,in}] + [b_b]*[T_{b}]
b_in, b_b = self.coefficients_fluid_heat_extraction_rate(
m_flow_network, cp_f, nSegments)
# The total network heat extraction rate is the sum of heat
# extraction rates from all boreholes:
# [Q_{tot}] = [a_in]*[T_{f,n,in}] + [a_b]*[T_{b}]
a_in = np.reshape(np.sum(b_in*self.wBoreholes, axis=0), (1,-1))
a_b = np.reshape(np.sum(b_b*self.wBoreholes, axis=0), (1,-1))
# Store coefficients
self._set_stored_coefficients(
m_flow_network, cp_f, nSegments, (a_in, a_b), method_id)
return a_in, a_b
def _coefficients_mixing(self, m_flow_network):
"""
Returns coefficients for the relation:
.. math::
T_{f,network,out} =
\\mathbf{a_{out}} \\mathbf{T_{f,borehole,out}}
Parameters
----------
m_flow_network : float or (nInlets,) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
Returns
-------
mix_out : (1, nOutlets,) array
Array of coefficients for outlet fluid temperatures of all
boreholes.
"""
if not self._check_mixing_coefficients(m_flow_network):
self._mix_out = np.zeros((1, self.nBoreholes))
self._mix_out[0, self.iOutlets] = \
self._m_flow_in * self.wBoreholes.flatten() \
/ np.sum(self._m_flow_in * self.wBoreholes.flatten())
self._mixing_m_flow = m_flow_network
return self._mix_out
def _format_inputs(self, m_flow_network, cp_f, nSegments):
"""
Format mass flow rate and heat capacity inputs.
"""
# Format mass flow rate inputs
# Mass flow rate in each fluid circuit
m_flow_in = np.atleast_1d(m_flow_network)
if len(m_flow_in) == 1:
m_flow_in = np.array(
[m_flow_network/self.nBoreholes_total for b in self.b])
elif not len(m_flow_in) == self.nInlets:
raise ValueError(
'Incorrect length of mass flow vector.')
self._m_flow_in = m_flow_in
# Format heat capacity inputs
# Heat capacity in each fluid circuit
cp_in = np.atleast_1d(cp_f)
if len(cp_in) == 1:
cp_in = np.tile(cp_f, self.nInlets)
elif not len(cp_in) == self.nInlets:
raise ValueError(
'Incorrect length of heat capacity vector.')
elif not np.all(cp_in == cp_in[0]):
raise ValueError(
'The heat capacity should be the same in all circuits.')
self._cp_in = cp_in
# Mass flow rate in boreholes
m_flow_borehole = np.array([m_flow_in[i] for i in self.iCircuit])
self._m_flow_borehole = m_flow_borehole
# Heat capacity in boreholes
cp_borehole = np.array([cp_in[i] for i in self.iCircuit])
self._cp_borehole = cp_borehole
# Format number of segments for each borehole
nSeg = np.atleast_1d(nSegments)
if len(nSeg) == 1:
self.nSegments = [nSegments for i in range(self.nBoreholes)]
elif not len(nSeg) == self.nBoreholes:
raise ValueError(
'Incorrect length of number of segments list.')
else:
self.nSegments = nSegments
def network_thermal_resistance(network, m_flow_network, cp_f):
"""
Evaluate the effective bore field thermal resistance.
As proposed in Cimmino (2018, 2019) [#Network-Cimmin2018]_,
[#Network-Cimmin2019]_.
Parameters
----------
network : network object
Model of the network.
m_flow_network : float or (nInlets, ) array
Total mass flow rate into the network or inlet mass flow rates
into each circuit of the network (in kg/s). If a float is supplied,
the total mass flow rate is split equally into all circuits.
cp_f : float
Fluid specific isobaric heat capacity (in J/kg.degC).
Returns
-------
R_field : float
Effective bore field thermal resistance (m.K/W).
"""
# Number of boreholes
nBoreholes = len(network.b)
# Total borehole length
H_tot = network.H_tot
# Coefficients for T_{f,out} = A_out*T_{f,in} + [B_out]*[T_b], and
# Q_b = [A_Q]*T{f,in} + [B_Q]*[T_b]
A_out, B_out = network.coefficients_network_outlet_temperature(
m_flow_network, cp_f, 1)
A_Q, B_Q = network.coefficients_network_heat_extraction_rate(
m_flow_network, cp_f, 1)
# Effective bore field thermal resistance
R_field = -0.5*H_tot*(1. + A_out)/A_Q
if not np.isscalar(R_field):
R_field = R_field.item()
return R_field
def _find_inlets_outlets(bore_connectivity, nBoreholes):
"""
Finds the numbers of boreholes connected to the inlet and outlet of the
network and the indices of the boreholes.
This function raises an error if the supplied borehole connectivity is
invalid.
Parameters
----------
bore_connectivity : list
Index of fluid inlet into each borehole. -1 corresponds to a borehole
connected to the bore field inlet.
nBoreholes : int
Number of boreholes in the bore field.
"""
# Number and indices of inlets
nInlets = bore_connectivity.count(-1)
iInlets = [i for i in range(nBoreholes) if bore_connectivity[i]==-1]
# Number and indices of outlets
iOutlets = [i for i in range(nBoreholes) if i not in bore_connectivity]
nOutlets = len(iOutlets)
iCircuit = [iInlets.index(_path_to_inlet(bore_connectivity, i)[-1])
for i in range(nBoreholes)]
if not nInlets == nOutlets:
raise ValueError(
'The network should have as many inlets as outlets.')
return iInlets, nInlets, iOutlets, nOutlets, iCircuit
def _path_to_inlet(bore_connectivity, bore_index):
"""
Returns the path from a borehole to the bore field inlet.
Parameters
----------
bore_connectivity : list
Index of fluid inlet into each borehole. -1 corresponds to a borehole
connected to the bore field inlet.
bore_index : int
Index of borehole to evaluate path.
Returns
-------
path : list
List of boreholes leading to the bore field inlet, starting from
borehole bore_index
"""
# Initialize path
path = [bore_index]
# Index of borehole feeding into borehole (bore_index)
index_in = bore_connectivity[bore_index]
# Stop when bore field inlet is reached (index_in == -1)
while not index_in == -1:
# Add index of upstream borehole to path
path.append(index_in)
# Get index of next upstream borehole
index_in = bore_connectivity[index_in]
return path
def _verify_bore_connectivity(bore_connectivity, nBoreholes):
"""
Verifies that borehole connectivity is valid.
This function raises an error if the supplied borehole connectivity is
invalid.
Parameters
----------
bore_connectivity : list
Index of fluid inlet into each borehole. -1 corresponds to a borehole
connected to the bore field inlet.
nBoreholes : int
Number of boreholes in the bore field.
"""
if not len(bore_connectivity) == nBoreholes:
raise ValueError(
'The length of the borehole connectivity list does not correspond '
'to the number of boreholes in the bore field.')
if max(bore_connectivity) >= nBoreholes:
raise ValueError(
'The borehole connectivity list contains borehole indices that '
'are not part of the network.')
# Cycle through each borehole and verify that connections lead to -1
# (-1 is the bore field inlet) and that no two boreholes have the same
# index of fluid inlet (except for -1).
for i in range(nBoreholes):
n = 0 # Initialize step counter
# Index of borehole feeding into borehole i
index_in = bore_connectivity[i]
if index_in != -1 and bore_connectivity.count(index_in) > 1:
raise ValueError(
'Two boreholes cannot have the same inlet, except fort the '
'network inlet (index of -1).')
# Stop when bore field inlet is reached (index_in == -1)
while not index_in == -1:
index_in = bore_connectivity[index_in]
n += 1 # Increment step counter
# Raise error if n exceeds the number of boreholes
if n > nBoreholes:
raise ValueError(
'The borehole connectivity list is invalid.')
return
| [
"numpy.tile",
"numpy.eye",
"numpy.linalg.solve",
"numpy.abs",
"numpy.isscalar",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.linalg.inv",
"scipy.linalg.block_diag",
"numpy.atleast_1d"
] | [((5461, 5477), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (5472, 5477), True, 'import numpy as np\n'), ((7051, 7067), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (7062, 7067), True, 'import numpy as np\n'), ((8583, 8599), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (8594, 8599), True, 'import numpy as np\n'), ((10101, 10117), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (10112, 10117), True, 'import numpy as np\n'), ((11760, 11776), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (11771, 11776), True, 'import numpy as np\n'), ((11895, 11911), 'numpy.isscalar', 'np.isscalar', (['Q_t'], {}), '(Q_t)\n', (11906, 11911), True, 'import numpy as np\n'), ((13493, 13509), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (13504, 13509), True, 'import numpy as np\n'), ((13633, 13652), 'numpy.isscalar', 'np.isscalar', (['T_f_in'], {}), '(T_f_in)\n', (13644, 13652), True, 'import numpy as np\n'), ((15155, 15171), 'numpy.isscalar', 'np.isscalar', (['T_b'], {}), '(T_b)\n', (15166, 15171), True, 'import numpy as np\n'), ((15291, 15310), 'numpy.isscalar', 'np.isscalar', (['T_f_in'], {}), '(T_f_in)\n', (15302, 15310), True, 'import numpy as np\n'), ((34805, 34835), 'numpy.zeros', 'np.zeros', (['(self.nBoreholes, 1)'], {}), '((self.nBoreholes, 1))\n', (34813, 34835), True, 'import numpy as np\n'), ((34858, 34902), 'numpy.zeros', 'np.zeros', (['(self.nBoreholes, self.nBoreholes)'], {}), '((self.nBoreholes, self.nBoreholes))\n', (34866, 34902), True, 'import numpy as np\n'), ((35564, 35586), 'numpy.empty', 'np.empty', (['self.nInlets'], {}), '(self.nInlets)\n', (35572, 35586), True, 'import numpy as np\n'), ((35666, 35688), 'numpy.empty', 'np.empty', (['self.nInlets'], {}), '(self.nInlets)\n', (35674, 35688), True, 'import numpy as np\n'), ((35753, 35783), 'numpy.empty', 'np.empty', (['(1, self.nBoreholes)'], {}), '((1, self.nBoreholes))\n', (35761, 35783), True, 'import numpy as np\n'), ((38216, 38245), 'numpy.atleast_1d', 'np.atleast_1d', (['m_flow_network'], {}), '(m_flow_network)\n', (38229, 38245), True, 'import numpy as np\n'), ((38626, 38645), 'numpy.atleast_1d', 'np.atleast_1d', (['cp_f'], {}), '(cp_f)\n', (38639, 38645), True, 'import numpy as np\n'), ((39098, 39145), 'numpy.array', 'np.array', (['[m_flow_in[i] for i in self.iCircuit]'], {}), '([m_flow_in[i] for i in self.iCircuit])\n', (39106, 39145), True, 'import numpy as np\n'), ((39253, 39296), 'numpy.array', 'np.array', (['[cp_in[i] for i in self.iCircuit]'], {}), '([cp_in[i] for i in self.iCircuit])\n', (39261, 39296), True, 'import numpy as np\n'), ((39407, 39431), 'numpy.atleast_1d', 'np.atleast_1d', (['nSegments'], {}), '(nSegments)\n', (39420, 39431), True, 'import numpy as np\n'), ((42570, 42593), 'numpy.sum', 'np.sum', (['self.wBoreholes'], {}), '(self.wBoreholes)\n', (42576, 42593), True, 'import numpy as np\n'), ((47131, 47160), 'numpy.atleast_1d', 'np.atleast_1d', (['m_flow_network'], {}), '(m_flow_network)\n', (47144, 47160), True, 'import numpy as np\n'), ((47572, 47591), 'numpy.atleast_1d', 'np.atleast_1d', (['cp_f'], {}), '(cp_f)\n', (47585, 47591), True, 'import numpy as np\n'), ((48044, 48091), 'numpy.array', 'np.array', (['[m_flow_in[i] for i in self.iCircuit]'], {}), '([m_flow_in[i] for i in self.iCircuit])\n', (48052, 48091), True, 'import numpy as np\n'), ((48199, 48242), 'numpy.array', 'np.array', (['[cp_in[i] for i in self.iCircuit]'], {}), '([cp_in[i] for i in self.iCircuit])\n', (48207, 48242), True, 'import numpy as np\n'), ((48353, 48377), 'numpy.atleast_1d', 'np.atleast_1d', (['nSegments'], {}), '(nSegments)\n', (48366, 48377), True, 'import numpy as np\n'), ((49974, 49994), 'numpy.isscalar', 'np.isscalar', (['R_field'], {}), '(R_field)\n', (49985, 49994), True, 'import numpy as np\n'), ((17669, 17687), 'scipy.linalg.block_diag', 'block_diag', (['*AB[0]'], {}), '(*AB[0])\n', (17679, 17687), False, 'from scipy.linalg import block_diag\n'), ((17704, 17722), 'scipy.linalg.block_diag', 'block_diag', (['*AB[1]'], {}), '(*AB[1])\n', (17714, 17722), False, 'from scipy.linalg import block_diag\n'), ((17929, 17955), 'numpy.linalg.solve', 'np.linalg.solve', (['ICA', 'c_in'], {}), '(ICA, c_in)\n', (17944, 17955), True, 'import numpy as np\n'), ((17974, 18005), 'numpy.linalg.solve', 'np.linalg.solve', (['ICA', '(c_out @ B)'], {}), '(ICA, c_out @ B)\n', (17989, 18005), True, 'import numpy as np\n'), ((20493, 20511), 'scipy.linalg.block_diag', 'block_diag', (['*AB[0]'], {}), '(*AB[0])\n', (20503, 20511), False, 'from scipy.linalg import block_diag\n'), ((20528, 20546), 'scipy.linalg.block_diag', 'block_diag', (['*AB[1]'], {}), '(*AB[1])\n', (20538, 20546), False, 'from scipy.linalg import block_diag\n'), ((20755, 20785), 'numpy.linalg.solve', 'np.linalg.solve', (['IAC', '(A @ c_in)'], {}), '(IAC, A @ c_in)\n', (20770, 20785), True, 'import numpy as np\n'), ((20804, 20827), 'numpy.linalg.solve', 'np.linalg.solve', (['IAC', 'B'], {}), '(IAC, B)\n', (20819, 20827), True, 'import numpy as np\n'), ((22986, 23005), 'numpy.linalg.inv', 'np.linalg.inv', (['b_in'], {}), '(b_in)\n', (22999, 23005), True, 'import numpy as np\n'), ((27924, 27942), 'scipy.linalg.block_diag', 'block_diag', (['*AB[0]'], {}), '(*AB[0])\n', (27934, 27942), False, 'from scipy.linalg import block_diag\n'), ((27959, 27977), 'scipy.linalg.block_diag', 'block_diag', (['*AB[1]'], {}), '(*AB[1])\n', (27969, 27977), False, 'from scipy.linalg import block_diag\n'), ((30705, 30723), 'scipy.linalg.block_diag', 'block_diag', (['*AB[0]'], {}), '(*AB[0])\n', (30715, 30723), False, 'from scipy.linalg import block_diag\n'), ((30740, 30758), 'scipy.linalg.block_diag', 'block_diag', (['*AB[1]'], {}), '(*AB[1])\n', (30750, 30758), False, 'from scipy.linalg import block_diag\n'), ((34274, 34304), 'numpy.zeros', 'np.zeros', (['(1, self.nBoreholes)'], {}), '((1, self.nBoreholes))\n', (34282, 34304), True, 'import numpy as np\n'), ((35372, 35394), 'numpy.empty', 'np.empty', (['self.nInlets'], {}), '(self.nInlets)\n', (35380, 35394), True, 'import numpy as np\n'), ((38302, 38354), 'numpy.tile', 'np.tile', (['(m_flow_network / self.nInlets)', 'self.nInlets'], {}), '(m_flow_network / self.nInlets, self.nInlets)\n', (38309, 38354), True, 'import numpy as np\n'), ((38694, 38721), 'numpy.tile', 'np.tile', (['cp_f', 'self.nInlets'], {}), '(cp_f, self.nInlets)\n', (38701, 38721), True, 'import numpy as np\n'), ((42480, 42535), 'numpy.array', 'np.array', (['[[b.nBoreholes for b in equivalentBoreholes]]'], {}), '([[b.nBoreholes for b in equivalentBoreholes]])\n', (42488, 42535), True, 'import numpy as np\n'), ((46592, 46622), 'numpy.zeros', 'np.zeros', (['(1, self.nBoreholes)'], {}), '((1, self.nBoreholes))\n', (46600, 46622), True, 'import numpy as np\n'), ((47217, 47285), 'numpy.array', 'np.array', (['[(m_flow_network / self.nBoreholes_total) for b in self.b]'], {}), '([(m_flow_network / self.nBoreholes_total) for b in self.b])\n', (47225, 47285), True, 'import numpy as np\n'), ((47640, 47667), 'numpy.tile', 'np.tile', (['cp_f', 'self.nInlets'], {}), '(cp_f, self.nInlets)\n', (47647, 47667), True, 'import numpy as np\n'), ((5564, 5585), 'numpy.atleast_1d', 'np.atleast_1d', (['T_f_in'], {}), '(T_f_in)\n', (5577, 5585), True, 'import numpy as np\n'), ((7146, 7167), 'numpy.atleast_1d', 'np.atleast_1d', (['T_f_in'], {}), '(T_f_in)\n', (7159, 7167), True, 'import numpy as np\n'), ((8674, 8695), 'numpy.atleast_1d', 'np.atleast_1d', (['T_f_in'], {}), '(T_f_in)\n', (8687, 8695), True, 'import numpy as np\n'), ((10192, 10213), 'numpy.atleast_1d', 'np.atleast_1d', (['T_f_in'], {}), '(T_f_in)\n', (10205, 10213), True, 'import numpy as np\n'), ((11853, 11871), 'numpy.atleast_1d', 'np.atleast_1d', (['Q_t'], {}), '(Q_t)\n', (11866, 11871), True, 'import numpy as np\n'), ((13588, 13609), 'numpy.atleast_1d', 'np.atleast_1d', (['T_f_in'], {}), '(T_f_in)\n', (13601, 13609), True, 'import numpy as np\n'), ((15246, 15267), 'numpy.atleast_1d', 'np.atleast_1d', (['T_f_in'], {}), '(T_f_in)\n', (15259, 15267), True, 'import numpy as np\n'), ((17874, 17897), 'numpy.eye', 'np.eye', (['self.nBoreholes'], {}), '(self.nBoreholes)\n', (17880, 17897), True, 'import numpy as np\n'), ((20700, 20723), 'numpy.eye', 'np.eye', (['self.nBoreholes'], {}), '(self.nBoreholes)\n', (20706, 20723), True, 'import numpy as np\n'), ((33186, 33206), 'numpy.sum', 'np.sum', (['b_in'], {'axis': '(0)'}), '(b_in, axis=0)\n', (33192, 33206), True, 'import numpy as np\n'), ((33245, 33264), 'numpy.sum', 'np.sum', (['b_b'], {'axis': '(0)'}), '(b_b, axis=0)\n', (33251, 33264), True, 'import numpy as np\n'), ((34367, 34390), 'numpy.sum', 'np.sum', (['self._m_flow_in'], {}), '(self._m_flow_in)\n', (34373, 34390), True, 'import numpy as np\n'), ((37365, 37403), 'numpy.abs', 'np.abs', (['(m_flow_network - mixing_m_flow)'], {}), '(m_flow_network - mixing_m_flow)\n', (37371, 37403), True, 'import numpy as np\n'), ((45472, 45510), 'numpy.sum', 'np.sum', (['(b_in * self.wBoreholes)'], {'axis': '(0)'}), '(b_in * self.wBoreholes, axis=0)\n', (45478, 45510), True, 'import numpy as np\n'), ((45547, 45584), 'numpy.sum', 'np.sum', (['(b_b * self.wBoreholes)'], {'axis': '(0)'}), '(b_b * self.wBoreholes, axis=0)\n', (45553, 45584), True, 'import numpy as np\n'), ((37406, 37427), 'numpy.abs', 'np.abs', (['mixing_m_flow'], {}), '(mixing_m_flow)\n', (37412, 37427), True, 'import numpy as np\n'), ((37752, 37800), 'numpy.abs', 'np.abs', (['(m_flow_network * cp_f - stored_m_flow_cp)'], {}), '(m_flow_network * cp_f - stored_m_flow_cp)\n', (37758, 37800), True, 'import numpy as np\n'), ((38875, 38900), 'numpy.all', 'np.all', (['(cp_in == cp_in[0])'], {}), '(cp_in == cp_in[0])\n', (38881, 38900), True, 'import numpy as np\n'), ((47821, 47846), 'numpy.all', 'np.all', (['(cp_in == cp_in[0])'], {}), '(cp_in == cp_in[0])\n', (47827, 47846), True, 'import numpy as np\n'), ((37801, 37825), 'numpy.abs', 'np.abs', (['stored_m_flow_cp'], {}), '(stored_m_flow_cp)\n', (37807, 37825), True, 'import numpy as np\n')] |
import matplotlib.pyplot as pl
import os
import numpy as np
from ticle.data.dataHandler import normalizeData,load_file
from ticle.analysis.analysis import get_significant_periods
pl.rc('xtick', labelsize='x-small')
pl.rc('ytick', labelsize='x-small')
pl.rc('font', family='serif')
pl.rcParams.update({'font.size': 20})
pl.tight_layout()
path = os.getcwd()
sigma_pdm_dir = f"{path}/results/sigma_pdm"
try:
os.makedirs(sigma_pdm_dir)
except FileExistsError:
pass
data_dir = f"{path}/data/"
data_list_file = f"{data_dir}/dataList.txt"
data_list = np.loadtxt(data_list_file)
for data in data_list:
star = f"0{int(data[0])}"
file_name = f"{data_dir}/{star}/{star}_LC_destepped.txt"
res_dir = f"{sigma_pdm_dir}/{star}"
try:
os.mkdir(res_dir)
except FileExistsError:
pass
t_series = load_file(file_name)
t_series = normalizeData(t_series)
pdm = get_significant_periods(t_series, 20, True)
periods = pdm['periods']
sigma_vals = pdm['lspvals'][periods * 2 < max(t_series[0])]
periods = periods[periods * 2 < max(t_series[0])]
lightcurve_fig = pl.figure(figsize=(10,7))
pl.plot(t_series[0],t_series[1],color='k')
pl.xlabel("Time(days)")
pl.ylabel("Flux")
pl.axhline(y=0,linestyle='dashed',color='k',alpha=0.6)
pl.title(f"{star}")
pdm_fig = pl.figure(figsize=(10, 7))
pl.plot(periods, sigma_vals, color='k', markersize=3,alpha=0.5)
pl.plot(periods, sigma_vals,'x',color='k',markersize=3)
p_guess = "%.2f" % float(data[1])
pl.axvline(x=data[1],color='blue',alpha=0.6,linestyle='dashed',label=rf"$P_{{guess}}={p_guess}$ days")
pl.xlabel(r"Period(days)")
pl.ylabel(r"$\Theta_{pdm}$")
pl.legend()
pl.title(f"PDM {star}")
lightcurve_fig.savefig(f"{res_dir}/{star}_lightcurve.pdf")
pdm_fig.savefig(f"{res_dir}/{star}_pdm.pdf")
| [
"os.makedirs",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"ticle.data.dataHandler.load_file",
"ticle.analysis.analysis.get_significant_periods",
"os.getcwd",
"ticle.data.dataHandler.normalizeData",
"matplotlib.pyplot.rcParams.updat... | [((181, 216), 'matplotlib.pyplot.rc', 'pl.rc', (['"""xtick"""'], {'labelsize': '"""x-small"""'}), "('xtick', labelsize='x-small')\n", (186, 216), True, 'import matplotlib.pyplot as pl\n'), ((217, 252), 'matplotlib.pyplot.rc', 'pl.rc', (['"""ytick"""'], {'labelsize': '"""x-small"""'}), "('ytick', labelsize='x-small')\n", (222, 252), True, 'import matplotlib.pyplot as pl\n'), ((253, 282), 'matplotlib.pyplot.rc', 'pl.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (258, 282), True, 'import matplotlib.pyplot as pl\n'), ((283, 320), 'matplotlib.pyplot.rcParams.update', 'pl.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (301, 320), True, 'import matplotlib.pyplot as pl\n'), ((321, 338), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (336, 338), True, 'import matplotlib.pyplot as pl\n'), ((347, 358), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (356, 358), False, 'import os\n'), ((557, 583), 'numpy.loadtxt', 'np.loadtxt', (['data_list_file'], {}), '(data_list_file)\n', (567, 583), True, 'import numpy as np\n'), ((413, 439), 'os.makedirs', 'os.makedirs', (['sigma_pdm_dir'], {}), '(sigma_pdm_dir)\n', (424, 439), False, 'import os\n'), ((832, 852), 'ticle.data.dataHandler.load_file', 'load_file', (['file_name'], {}), '(file_name)\n', (841, 852), False, 'from ticle.data.dataHandler import normalizeData, load_file\n'), ((868, 891), 'ticle.data.dataHandler.normalizeData', 'normalizeData', (['t_series'], {}), '(t_series)\n', (881, 891), False, 'from ticle.data.dataHandler import normalizeData, load_file\n'), ((903, 946), 'ticle.analysis.analysis.get_significant_periods', 'get_significant_periods', (['t_series', '(20)', '(True)'], {}), '(t_series, 20, True)\n', (926, 946), False, 'from ticle.analysis.analysis import get_significant_periods\n'), ((1117, 1143), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1126, 1143), True, 'import matplotlib.pyplot as pl\n'), ((1147, 1191), 'matplotlib.pyplot.plot', 'pl.plot', (['t_series[0]', 't_series[1]'], {'color': '"""k"""'}), "(t_series[0], t_series[1], color='k')\n", (1154, 1191), True, 'import matplotlib.pyplot as pl\n'), ((1194, 1217), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Time(days)"""'], {}), "('Time(days)')\n", (1203, 1217), True, 'import matplotlib.pyplot as pl\n'), ((1222, 1239), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (1231, 1239), True, 'import matplotlib.pyplot as pl\n'), ((1244, 1301), 'matplotlib.pyplot.axhline', 'pl.axhline', ([], {'y': '(0)', 'linestyle': '"""dashed"""', 'color': '"""k"""', 'alpha': '(0.6)'}), "(y=0, linestyle='dashed', color='k', alpha=0.6)\n", (1254, 1301), True, 'import matplotlib.pyplot as pl\n'), ((1303, 1322), 'matplotlib.pyplot.title', 'pl.title', (['f"""{star}"""'], {}), "(f'{star}')\n", (1311, 1322), True, 'import matplotlib.pyplot as pl\n'), ((1338, 1364), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (1347, 1364), True, 'import matplotlib.pyplot as pl\n'), ((1369, 1433), 'matplotlib.pyplot.plot', 'pl.plot', (['periods', 'sigma_vals'], {'color': '"""k"""', 'markersize': '(3)', 'alpha': '(0.5)'}), "(periods, sigma_vals, color='k', markersize=3, alpha=0.5)\n", (1376, 1433), True, 'import matplotlib.pyplot as pl\n'), ((1437, 1495), 'matplotlib.pyplot.plot', 'pl.plot', (['periods', 'sigma_vals', '"""x"""'], {'color': '"""k"""', 'markersize': '(3)'}), "(periods, sigma_vals, 'x', color='k', markersize=3)\n", (1444, 1495), True, 'import matplotlib.pyplot as pl\n'), ((1535, 1645), 'matplotlib.pyplot.axvline', 'pl.axvline', ([], {'x': 'data[1]', 'color': '"""blue"""', 'alpha': '(0.6)', 'linestyle': '"""dashed"""', 'label': 'f"""$P_{{guess}}={p_guess}$ days"""'}), "(x=data[1], color='blue', alpha=0.6, linestyle='dashed', label=\n f'$P_{{guess}}={p_guess}$ days')\n", (1545, 1645), True, 'import matplotlib.pyplot as pl\n'), ((1642, 1667), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Period(days)"""'], {}), "('Period(days)')\n", (1651, 1667), True, 'import matplotlib.pyplot as pl\n'), ((1673, 1701), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""$\\\\Theta_{pdm}$"""'], {}), "('$\\\\Theta_{pdm}$')\n", (1682, 1701), True, 'import matplotlib.pyplot as pl\n'), ((1706, 1717), 'matplotlib.pyplot.legend', 'pl.legend', ([], {}), '()\n', (1715, 1717), True, 'import matplotlib.pyplot as pl\n'), ((1722, 1745), 'matplotlib.pyplot.title', 'pl.title', (['f"""PDM {star}"""'], {}), "(f'PDM {star}')\n", (1730, 1745), True, 'import matplotlib.pyplot as pl\n'), ((757, 774), 'os.mkdir', 'os.mkdir', (['res_dir'], {}), '(res_dir)\n', (765, 774), False, 'import os\n')] |
import os
import sys
sys.path.append("..")
sys.path.append("../../")
sys.path.append("../../../")
from typing import Type, Union, Dict, List
import numpy as np
import torch.utils.data as data
from PIL import Image
from torchvision import transforms
from yacs.config import CfgNode
from datasets.augmentation import augmentation
from lib.config.config import pth
from lib.utils.base_utils import GetImgFpsAndLabels, LoadImgs
class Dataset(data.Dataset):
"""data_root の子ディレクトリ名がクラスラベルという仮定のもとデータセットを作成するクラス.
データセットは以下のような構成を仮定
dataset_root
|
|- train
| |
| |- OK
| |
| |- NG
|
|- test
"""
def __init__(
self,
cfg: CfgNode,
data_root: str,
split: str,
cls_names: List[str] = None,
transforms: transforms = None,
) -> None:
super(Dataset, self).__init__()
self.file_ext = {
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
}
self.cfg = cfg
self.data_root = os.path.join(pth.DATA_DIR, data_root)
# self.img_pths = self._get_img_pths_labels(self.data_root)
(
self.classes,
self.class_to_idx,
self.imgs,
self.targets,
self.msks,
) = GetImgFpsAndLabels(self.data_root)
self.split = split
self._transforms = transforms
def __getitem__(self, img_id: Type[Union[int, tuple]]) -> Dict:
"""
データセット中から `img_id` で指定された番号のデータを返す関数.
Arg:
img_id (Type[Union[int, tuple]]): 読みだすデータの番号
Return:
ret (Dict["img": torch.tensor,
"msk": torch.tensor,
"meta": str,
"target": int,
"cls_name": str]):
"""
if type(img_id) is tuple:
img_id, height, width = img_id
elif (
type(img_id) is int and "img_width" in self.cfg and "img_height" in self.cfg
):
height, width = self.cfg.img_width, self.cfg.img_height
else:
raise TypeError("Invalid type for variable index")
# images (rgb, mask) の読み出し
imgs = LoadImgs(self.imgs, img_id, self.msks)
# `OpenCV` および `numpy` を用いたデータ拡張
if self.split == "train":
imgs = augmentation(imgs, height, width, self.split)
# 画像をテンソルに変換
img_transforms = transforms.Compose(
[transforms.ToTensor(), transforms.Resize((width, height))]
)
for k in imgs.keys():
if len(imgs[k]) > 0:
imgs[k] = img_transforms(
Image.fromarray(np.ascontiguousarray(imgs[k], np.uint8))
)
# `transforms`を用いた変換がある場合は行う.
if self._transforms is not None:
imgs["img"] = self._transforms(imgs["img"])
ret = {
"img": imgs["img"],
"msk": imgs["msk"],
"meta": self.split,
"target": self.targets[img_id],
"cls_name": self.classes[self.targets[img_id]],
}
return ret
def __len__(self):
"""ディレクトリ内の画像ファイル数を返す関数."""
return len(self.imgs)
if __name__ == "__main__":
from yacs.config import CfgNode as CN
from lib.datasets.make_datasets import make_dataset
cfg = CN()
cfg.task = "classify"
cfg.img_width = 200
cfg.img_height = 200
cfg.train = CN()
cfg.train.dataset = "SampleTrain"
dataset = make_dataset(cfg, cfg.train.dataset)
print(dataset) | [
"torchvision.transforms.ToTensor",
"os.path.join",
"datasets.augmentation.augmentation",
"numpy.ascontiguousarray",
"yacs.config.CfgNode",
"torchvision.transforms.Resize",
"lib.utils.base_utils.LoadImgs",
"lib.datasets.make_datasets.make_dataset",
"sys.path.append",
"lib.utils.base_utils.GetImgFps... | [((22, 43), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (37, 43), False, 'import sys\n'), ((44, 69), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (59, 69), False, 'import sys\n'), ((70, 98), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (85, 98), False, 'import sys\n'), ((3525, 3529), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (3527, 3529), True, 'from yacs.config import CfgNode as CN\n'), ((3621, 3625), 'yacs.config.CfgNode', 'CN', ([], {}), '()\n', (3623, 3625), True, 'from yacs.config import CfgNode as CN\n'), ((3679, 3715), 'lib.datasets.make_datasets.make_dataset', 'make_dataset', (['cfg', 'cfg.train.dataset'], {}), '(cfg, cfg.train.dataset)\n', (3691, 3715), False, 'from lib.datasets.make_datasets import make_dataset\n'), ((1203, 1240), 'os.path.join', 'os.path.join', (['pth.DATA_DIR', 'data_root'], {}), '(pth.DATA_DIR, data_root)\n', (1215, 1240), False, 'import os\n'), ((1460, 1494), 'lib.utils.base_utils.GetImgFpsAndLabels', 'GetImgFpsAndLabels', (['self.data_root'], {}), '(self.data_root)\n', (1478, 1494), False, 'from lib.utils.base_utils import GetImgFpsAndLabels, LoadImgs\n'), ((2386, 2424), 'lib.utils.base_utils.LoadImgs', 'LoadImgs', (['self.imgs', 'img_id', 'self.msks'], {}), '(self.imgs, img_id, self.msks)\n', (2394, 2424), False, 'from lib.utils.base_utils import GetImgFpsAndLabels, LoadImgs\n'), ((2520, 2565), 'datasets.augmentation.augmentation', 'augmentation', (['imgs', 'height', 'width', 'self.split'], {}), '(imgs, height, width, self.split)\n', (2532, 2565), False, 'from datasets.augmentation import augmentation\n'), ((2646, 2667), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2665, 2667), False, 'from torchvision import transforms\n'), ((2669, 2703), 'torchvision.transforms.Resize', 'transforms.Resize', (['(width, height)'], {}), '((width, height))\n', (2686, 2703), False, 'from torchvision import transforms\n'), ((2856, 2895), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['imgs[k]', 'np.uint8'], {}), '(imgs[k], np.uint8)\n', (2876, 2895), True, 'import numpy as np\n')] |
import random
import pubchem as pc
import numpy as np
import pandas as pd
import sklearn as sk
import utility
import db.db as db
from config import config as cc
import sys
from sets import Set
import data
RD = cc.exp['params']['data']
RP = cc.exp['params']['rnn']
# not entirely correct, in one partition can appear two same elements, since we are concatenating two permutations
class PermutationPartitioner:
def __init__(self, samplesCount, partitionSize):
self.samplesCount = samplesCount
self.partitionSize = partitionSize
self.permutation = np.random.permutation(samplesCount)
self.idx = 0
def get(self):
part = np.copy(self.permutation[self.idx:self.idx+self.partitionSize])
if len(part) < self.partitionSize:
np.random.shuffle(self.permutation)
self.idx = self.partitionSize - len(part)
part = np.concatenate((part,self.permutation[:self.idx]))
else:
self.idx += self.partitionSize
return part
def computeR2(pred, truth):
return np.corrcoef([pred,truth])[0][1]**2
def computeMSE(pred, truth):
return ((pred - truth)**2).mean()
def computeMAE(pred, truth):
return (np.absolute(pred - truth)).mean()
def predict(model, input, labels, meta):
if RP['edge_prediction']:
partitioner = PermutationPartitioner(len(input[0]), len(input[0]) / RP['num_partitions'])
else:
partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
iterations = RP['num_partitions']**2
metrics = {
'r2': np.zeros((labels.shape[1], iterations)),
'mse': np.zeros((labels.shape[1], iterations)),
'mae': np.zeros((labels.shape[1], iterations)),
}
# first denormalize labels, so we do it only once
labels = data.denormalize(labels, meta)
for iteration in range(iterations):
print('\titer:\t{}/{}'.format(iteration+1, iterations))
part = partitioner.get()
if RP['edge_prediction']:
partIn = [input[0][part],input[1][part]]
else:
partIn = input[part]
partLabelsT = labels[part].T
partPredT = model.predict(partIn, batch_size = RP['batch']).T
for i in range(labels.shape[1]):
metrics['r2'][i][iteration] = computeR2(partPredT[i], partLabelsT[i])
metrics['mse'][i][iteration] = computeMSE(partPredT[i], partLabelsT[i])
metrics['mae'][i][iteration] = computeMAE(partPredT[i], partLabelsT[i])
del partIn
del partLabelsT
del partPredT
metricsPerLabel = {
'r2_avg': np.nanmean(metrics['r2'], axis = 1),
'r2_std': np.nanstd(metrics['r2'], axis = 1),
'mse_avg': np.nanmean(metrics['mse'], axis = 1),
'mse_std': np.nanstd(metrics['mse'], axis = 1),
'mae_avg': np.nanmean(metrics['mae'], axis = 1),
'mae_std': np.nanstd(metrics['mae'], axis = 1),
}
metricsOverall = {
'r2_avg': np.nanmean(metrics['r2']),
'r2_std': np.nanstd(metrics['r2']),
'mse_avg': np.nanmean(metrics['mse']),
'mse_std': np.nanstd(metrics['mse']),
'mae_avg': np.nanmean(metrics['mae']),
'mae_std': np.nanstd(metrics['mae']),
}
for i,labelName in enumerate(RD['labels']):
print('{}/{} - {}:'.format(i+1, len(RD['labels']),labelName))
print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['r2_avg'][i],metricsPerLabel['r2_std'][i]))
print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['mse_avg'][i],metricsPerLabel['mse_std'][i]))
print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['mae_avg'][i],metricsPerLabel['mae_std'][i]))
print('Overall metrics:')
print('\tR2:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['r2_avg'],metricsOverall['r2_std']))
print('\tMSE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mse_avg'],metricsOverall['mse_std']))
print('\tMAE:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['mae_avg'],metricsOverall['mae_std']))
return metricsOverall
def computeConfusion(pred, truth):
# pred_pos pred_neg
# true_pos TP FN
# true_neg FP TN
confusion = np.zeros((2,2))
thr,pos,neg = RP['classify_threshold'],RP['classify_label_pos'],RP['classify_label_neg']
for i in range(len(pred)):
if pred[i] < thr:
if utility.equals(truth[i], pos):
confusion[0][1]+=1
elif utility.equals(truth[i], neg):
confusion[1][1]+=1
elif pred[i] >= thr:
if utility.equals(truth[i], pos):
confusion[0][0]+=1
elif utility.equals(truth[i], neg):
confusion[1][0]+=1
return confusion
def computeAUC(pred, truth):
try:
return sk.metrics.roc_auc_score(truth, pred)
except:
return np.nan
def classify(model, input, labels, meta):
if RP['edge_prediction']:
partitioner = PermutationPartitioner(len(input[0]), len(input[0]) / RP['num_partitions'])
else:
partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
iterations = RP['num_partitions']**2
metrics = {
'acc': np.zeros((labels.shape[1], iterations)),
'log_loss': np.zeros((labels.shape[1], iterations)),
'auc': np.zeros((labels.shape[1], iterations)),
'confusion': np.zeros((labels.shape[1], iterations, 2, 2)),
}
# first denormalize labels, so we do it only once
labels = data.denormalize(labels, meta)
for iteration in range(iterations):
print('\titer:\t{}/{}'.format(iteration, iterations))
part = partitioner.get()
if RP['edge_prediction']:
partIn = [input[0][part],input[1][part]]
else:
partIn = input[part]
partLabelsT = labels[part].T
partPredT = model.predict(partIn, batch_size = RP['batch']).T
for i in range(labels.shape[1]):
confusion = computeConfusion(partPredT[i], partLabelsT[i])
metrics['confusion'][i][iteration] = confusion
metrics['acc'][i][iteration] = (confusion[0][0]+confusion[1][1]) / confusion.sum()
metrics['log_loss'][i][iteration] = utility.logloss(partPredT[i],partLabelsT[i],RP['classify_label_neg'],RP['classify_label_pos'])
metrics['auc'][i][iteration] = computeAUC(partPredT[i], partLabelsT[i])
del partIn
del partLabelsT
del partPredT
metricsPerLabel = {
'acc_avg': np.nanmean(metrics['acc'], axis = 1),
'acc_std': np.nanstd(metrics['acc'], axis = 1),
'log_loss_avg': np.nanmean(metrics['log_loss'], axis = 1),
'log_loss_std': np.nanstd(metrics['log_loss'], axis = 1),
'auc_avg': np.nanmean(metrics['auc'], axis = 1),
'auc_std': np.nanstd(metrics['auc'], axis = 1)
}
metricsOverall = {
'acc_avg': np.nanmean(metrics['acc']),
'acc_std': np.nanstd(metrics['acc']),
'log_loss_avg': np.nanmean(metrics['log_loss']),
'log_loss_std': np.nanstd(metrics['log_loss']),
'auc_avg': np.nanmean(metrics['auc']),
'auc_std': np.nanstd(metrics['auc'])
}
for i,labelName in enumerate(RD['labels']):
print('{}/{} - {}:'.format(i+1, len(RD['labels']),labelName))
print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['acc_avg'][i],metricsPerLabel['acc_std'][i]))
print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['log_loss_avg'][i],metricsPerLabel['log_loss_std'][i]))
print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsPerLabel['auc_avg'][i],metricsPerLabel['auc_std'][i]))
print('Overall metrics:')
print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['acc_avg'],metricsOverall['acc_std']))
print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['log_loss_avg'],metricsOverall['log_loss_std']))
print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_avg'],metricsOverall['auc_std']))
return metricsOverall
def discreteClassify(model, input, labels, meta):
partitioner = PermutationPartitioner(len(input), len(input) / RP['num_partitions'])
iterations = RP['num_partitions']**2
metrics = {
'acc': np.zeros((iterations)),
'log_loss': np.zeros((iterations)),
# 'auc': np.zeros((iterations)),
# 'auc_micro': np.zeros((iterations))
}
# first denormalize labels, so we do it only once
labels = data.denormalize(labels, meta)
for iteration in range(iterations):
# print('\titer:\t{}/{}'.format(iteration, iterations))
part = partitioner.get()
partIn = input[part]
partLabels = labels[part]
partPred = model.predict(partIn, batch_size = RP['batch'])
binarizedPred = np.zeros((len(partPred), len(partPred[0])))
"""
for row in range(len(partLabels)):
for idx, val in enumerate(partLabels[row]):
if val == 1:
sys.stdout.write('{}, '.format(idx))
for val in partPred[row]:
sys.stdout.write('{}, '.format(val))
sys.stdout.write('\n')
sys.stdout.flush()
"""
for i in range(len(partPred)):
maxValue = 0
maxIndex = 0
for index in range(len(partPred[i])):
value = partPred[i][index]
if value > maxValue:
maxValue = value
maxIndex = index
binarizedPred[i][maxIndex] = 1
metrics['acc'][iteration] = sk.metrics.accuracy_score(partLabels,
binarizedPred)
metrics['log_loss'][iteration] = sk.metrics.log_loss(partLabels,
binarizedPred)
'''
keepVec = []
for col in range(len(partLabels[0])):
wasOne = 0
for row in range(len(partLabels)):
if partLabels[row][col] == 1:
wasOne = 1
break
if wasOne:
keepVec.append(col)
cutLabels = np.zeros((len(partLabels), len(keepVec)))
cutPreds = np.zeros((len(partLabels), len(keepVec)))
for idx, keep in enumerate(keepVec):
for row in range(len(partLabels)):
cutLabels[row][idx] = partLabels[row][keep]
cutPreds[row][idx] = binarizedPred[row][keep]
metrics['auc'][iteration] = sk.metrics.roc_auc_score(cutLabels,
cutPreds, average = 'macro')
metrics['auc_micro'][iteration] = sk.metrics.roc_auc_score(cutLabels,
cutPreds, average = 'micro')
'''
metricsOverall = {
'acc_avg': np.nanmean(metrics['acc']),
'acc_std': np.nanstd(metrics['acc']),
'log_loss_avg': np.nanmean(metrics['log_loss']),
'log_loss_std': np.nanstd(metrics['log_loss']),
# 'auc_avg': np.nanmean(metrics['auc']),
# 'auc_std': np.nanstd(metrics['auc']),
# 'auc_micro_avg': np.nanmean(metrics['auc_micro']),
# 'auc_micro_std': np.nanstd(metrics['auc_micro'])
'auc_avg': None,
'auc_std': None,
'auc_micro_avg': None,
'auc_micro_std': None,
}
print('Overall metrics:')
print('\tACC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['acc_avg'],metricsOverall['acc_std']))
print('\tLogLos:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['log_loss_avg'],metricsOverall['log_loss_std']))
# print('\tAUC:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_avg'],metricsOverall['auc_std']))
# print('\tAUC Micro:\t{0:.3f}\t+/-\t{1:.3f}'.format(metricsOverall['auc_micro_avg'],metricsOverall['auc_micro_std']))
return metricsOverall
| [
"numpy.copy",
"numpy.nanstd",
"numpy.random.shuffle",
"data.denormalize",
"numpy.corrcoef",
"numpy.absolute",
"utility.equals",
"sklearn.metrics.roc_auc_score",
"utility.logloss",
"numpy.nanmean",
"numpy.zeros",
"sklearn.metrics.log_loss",
"numpy.concatenate",
"sklearn.metrics.accuracy_sco... | [((1820, 1850), 'data.denormalize', 'data.denormalize', (['labels', 'meta'], {}), '(labels, meta)\n', (1836, 1850), False, 'import data\n'), ((4235, 4251), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (4243, 4251), True, 'import numpy as np\n'), ((5554, 5584), 'data.denormalize', 'data.denormalize', (['labels', 'meta'], {}), '(labels, meta)\n', (5570, 5584), False, 'import data\n'), ((8541, 8571), 'data.denormalize', 'data.denormalize', (['labels', 'meta'], {}), '(labels, meta)\n', (8557, 8571), False, 'import data\n'), ((581, 616), 'numpy.random.permutation', 'np.random.permutation', (['samplesCount'], {}), '(samplesCount)\n', (602, 616), True, 'import numpy as np\n'), ((673, 738), 'numpy.copy', 'np.copy', (['self.permutation[self.idx:self.idx + self.partitionSize]'], {}), '(self.permutation[self.idx:self.idx + self.partitionSize])\n', (680, 738), True, 'import numpy as np\n'), ((1593, 1632), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations)'], {}), '((labels.shape[1], iterations))\n', (1601, 1632), True, 'import numpy as np\n'), ((1649, 1688), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations)'], {}), '((labels.shape[1], iterations))\n', (1657, 1688), True, 'import numpy as np\n'), ((1705, 1744), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations)'], {}), '((labels.shape[1], iterations))\n', (1713, 1744), True, 'import numpy as np\n'), ((2633, 2666), 'numpy.nanmean', 'np.nanmean', (["metrics['r2']"], {'axis': '(1)'}), "(metrics['r2'], axis=1)\n", (2643, 2666), True, 'import numpy as np\n'), ((2688, 2720), 'numpy.nanstd', 'np.nanstd', (["metrics['r2']"], {'axis': '(1)'}), "(metrics['r2'], axis=1)\n", (2697, 2720), True, 'import numpy as np\n'), ((2743, 2777), 'numpy.nanmean', 'np.nanmean', (["metrics['mse']"], {'axis': '(1)'}), "(metrics['mse'], axis=1)\n", (2753, 2777), True, 'import numpy as np\n'), ((2800, 2833), 'numpy.nanstd', 'np.nanstd', (["metrics['mse']"], {'axis': '(1)'}), "(metrics['mse'], axis=1)\n", (2809, 2833), True, 'import numpy as np\n'), ((2856, 2890), 'numpy.nanmean', 'np.nanmean', (["metrics['mae']"], {'axis': '(1)'}), "(metrics['mae'], axis=1)\n", (2866, 2890), True, 'import numpy as np\n'), ((2913, 2946), 'numpy.nanstd', 'np.nanstd', (["metrics['mae']"], {'axis': '(1)'}), "(metrics['mae'], axis=1)\n", (2922, 2946), True, 'import numpy as np\n'), ((2998, 3023), 'numpy.nanmean', 'np.nanmean', (["metrics['r2']"], {}), "(metrics['r2'])\n", (3008, 3023), True, 'import numpy as np\n'), ((3043, 3067), 'numpy.nanstd', 'np.nanstd', (["metrics['r2']"], {}), "(metrics['r2'])\n", (3052, 3067), True, 'import numpy as np\n'), ((3088, 3114), 'numpy.nanmean', 'np.nanmean', (["metrics['mse']"], {}), "(metrics['mse'])\n", (3098, 3114), True, 'import numpy as np\n'), ((3135, 3160), 'numpy.nanstd', 'np.nanstd', (["metrics['mse']"], {}), "(metrics['mse'])\n", (3144, 3160), True, 'import numpy as np\n'), ((3181, 3207), 'numpy.nanmean', 'np.nanmean', (["metrics['mae']"], {}), "(metrics['mae'])\n", (3191, 3207), True, 'import numpy as np\n'), ((3228, 3253), 'numpy.nanstd', 'np.nanstd', (["metrics['mae']"], {}), "(metrics['mae'])\n", (3237, 3253), True, 'import numpy as np\n'), ((4835, 4872), 'sklearn.metrics.roc_auc_score', 'sk.metrics.roc_auc_score', (['truth', 'pred'], {}), '(truth, pred)\n', (4859, 4872), True, 'import sklearn as sk\n'), ((5254, 5293), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations)'], {}), '((labels.shape[1], iterations))\n', (5262, 5293), True, 'import numpy as np\n'), ((5315, 5354), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations)'], {}), '((labels.shape[1], iterations))\n', (5323, 5354), True, 'import numpy as np\n'), ((5371, 5410), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations)'], {}), '((labels.shape[1], iterations))\n', (5379, 5410), True, 'import numpy as np\n'), ((5433, 5478), 'numpy.zeros', 'np.zeros', (['(labels.shape[1], iterations, 2, 2)'], {}), '((labels.shape[1], iterations, 2, 2))\n', (5441, 5478), True, 'import numpy as np\n'), ((6569, 6603), 'numpy.nanmean', 'np.nanmean', (["metrics['acc']"], {'axis': '(1)'}), "(metrics['acc'], axis=1)\n", (6579, 6603), True, 'import numpy as np\n'), ((6626, 6659), 'numpy.nanstd', 'np.nanstd', (["metrics['acc']"], {'axis': '(1)'}), "(metrics['acc'], axis=1)\n", (6635, 6659), True, 'import numpy as np\n'), ((6687, 6726), 'numpy.nanmean', 'np.nanmean', (["metrics['log_loss']"], {'axis': '(1)'}), "(metrics['log_loss'], axis=1)\n", (6697, 6726), True, 'import numpy as np\n'), ((6754, 6792), 'numpy.nanstd', 'np.nanstd', (["metrics['log_loss']"], {'axis': '(1)'}), "(metrics['log_loss'], axis=1)\n", (6763, 6792), True, 'import numpy as np\n'), ((6815, 6849), 'numpy.nanmean', 'np.nanmean', (["metrics['auc']"], {'axis': '(1)'}), "(metrics['auc'], axis=1)\n", (6825, 6849), True, 'import numpy as np\n'), ((6872, 6905), 'numpy.nanstd', 'np.nanstd', (["metrics['auc']"], {'axis': '(1)'}), "(metrics['auc'], axis=1)\n", (6881, 6905), True, 'import numpy as np\n'), ((6957, 6983), 'numpy.nanmean', 'np.nanmean', (["metrics['acc']"], {}), "(metrics['acc'])\n", (6967, 6983), True, 'import numpy as np\n'), ((7004, 7029), 'numpy.nanstd', 'np.nanstd', (["metrics['acc']"], {}), "(metrics['acc'])\n", (7013, 7029), True, 'import numpy as np\n'), ((7055, 7086), 'numpy.nanmean', 'np.nanmean', (["metrics['log_loss']"], {}), "(metrics['log_loss'])\n", (7065, 7086), True, 'import numpy as np\n'), ((7112, 7142), 'numpy.nanstd', 'np.nanstd', (["metrics['log_loss']"], {}), "(metrics['log_loss'])\n", (7121, 7142), True, 'import numpy as np\n'), ((7163, 7189), 'numpy.nanmean', 'np.nanmean', (["metrics['auc']"], {}), "(metrics['auc'])\n", (7173, 7189), True, 'import numpy as np\n'), ((7210, 7235), 'numpy.nanstd', 'np.nanstd', (["metrics['auc']"], {}), "(metrics['auc'])\n", (7219, 7235), True, 'import numpy as np\n'), ((8312, 8332), 'numpy.zeros', 'np.zeros', (['iterations'], {}), '(iterations)\n', (8320, 8332), True, 'import numpy as np\n'), ((8356, 8376), 'numpy.zeros', 'np.zeros', (['iterations'], {}), '(iterations)\n', (8364, 8376), True, 'import numpy as np\n'), ((9651, 9703), 'sklearn.metrics.accuracy_score', 'sk.metrics.accuracy_score', (['partLabels', 'binarizedPred'], {}), '(partLabels, binarizedPred)\n', (9676, 9703), True, 'import sklearn as sk\n'), ((9761, 9807), 'sklearn.metrics.log_loss', 'sk.metrics.log_loss', (['partLabels', 'binarizedPred'], {}), '(partLabels, binarizedPred)\n', (9780, 9807), True, 'import sklearn as sk\n'), ((10772, 10798), 'numpy.nanmean', 'np.nanmean', (["metrics['acc']"], {}), "(metrics['acc'])\n", (10782, 10798), True, 'import numpy as np\n'), ((10819, 10844), 'numpy.nanstd', 'np.nanstd', (["metrics['acc']"], {}), "(metrics['acc'])\n", (10828, 10844), True, 'import numpy as np\n'), ((10870, 10901), 'numpy.nanmean', 'np.nanmean', (["metrics['log_loss']"], {}), "(metrics['log_loss'])\n", (10880, 10901), True, 'import numpy as np\n'), ((10927, 10957), 'numpy.nanstd', 'np.nanstd', (["metrics['log_loss']"], {}), "(metrics['log_loss'])\n", (10936, 10957), True, 'import numpy as np\n'), ((792, 827), 'numpy.random.shuffle', 'np.random.shuffle', (['self.permutation'], {}), '(self.permutation)\n', (809, 827), True, 'import numpy as np\n'), ((901, 952), 'numpy.concatenate', 'np.concatenate', (['(part, self.permutation[:self.idx])'], {}), '((part, self.permutation[:self.idx]))\n', (915, 952), True, 'import numpy as np\n'), ((1215, 1240), 'numpy.absolute', 'np.absolute', (['(pred - truth)'], {}), '(pred - truth)\n', (1226, 1240), True, 'import numpy as np\n'), ((4418, 4447), 'utility.equals', 'utility.equals', (['truth[i]', 'pos'], {}), '(truth[i], pos)\n', (4432, 4447), False, 'import utility\n'), ((6280, 6382), 'utility.logloss', 'utility.logloss', (['partPredT[i]', 'partLabelsT[i]', "RP['classify_label_neg']", "RP['classify_label_pos']"], {}), "(partPredT[i], partLabelsT[i], RP['classify_label_neg'], RP[\n 'classify_label_pos'])\n", (6295, 6382), False, 'import utility\n'), ((1070, 1096), 'numpy.corrcoef', 'np.corrcoef', (['[pred, truth]'], {}), '([pred, truth])\n', (1081, 1096), True, 'import numpy as np\n'), ((4501, 4530), 'utility.equals', 'utility.equals', (['truth[i]', 'neg'], {}), '(truth[i], neg)\n', (4515, 4530), False, 'import utility\n'), ((4611, 4640), 'utility.equals', 'utility.equals', (['truth[i]', 'pos'], {}), '(truth[i], pos)\n', (4625, 4640), False, 'import utility\n'), ((4694, 4723), 'utility.equals', 'utility.equals', (['truth[i]', 'neg'], {}), '(truth[i], neg)\n', (4708, 4723), False, 'import utility\n')] |
from algos.custom_gym_loop import ReinforcementLearning
from collections import deque
import numpy as np
import tensorflow as tf
class Lagrangian( ReinforcementLearning ):
"""
Class that inherits from ReinforcementLearning to implements the REINFORCE algorithm, the original paper can be found here:
https://proceedings.neurips.cc/paper/1999/file/464d828b85b0bed98e80ade0a5c43b0f-Paper.pdf [1]
[1] Policy Gradient Methods for Reinforcement Learning with Function Approximation,
Sutton et al.,
Advances in neural information processing systems, 1999
"""
# Constructor of the class
def __init__( self, env, verbose, str_mod="Lagrangian", seed=None, **kwargs ):
#
super().__init__( env, verbose, str_mod, seed )
#
tf.random.set_seed( seed )
np.random.seed( seed )
#
self.actor = self.generate_model(self.input_shape, self.action_space.n, last_activation='softmax')
#
self.actor_optimizer = tf.keras.optimizers.Adam()
#
self.memory_size = None
self.gamma = 0.99
self.trajectory_update = 5
self.trajectory_mean = False
self.lagrangian_var = 1
self.cost_limit = 50
#
self.relevant_params = {
'gamma' : 'gamma',
'trajectory_update' : 'tu',
'lagrangian_var' : 'lambda',
'cost_limit' : 'climit'
}
# Override the default parameters with kwargs
for key, value in kwargs.items():
if hasattr(self, key) and value is not None:
setattr(self, key, value)
self.memory_buffer = deque( maxlen=self.memory_size )
# Mandatory method to implement for the ReinforcementLearning class, decide the
# update frequency and some variable update for on/off policy algorithms
# (i.e., eps_greedy, buffer, ...)
def network_update_rule( self, episode, terminal ):
# Update of the networks for Reinforce!
# - Performed every <trajectory_update> episode
# - clean up of the buffer at each update (on-policy).
# - Only on terminal state (collect the full trajectory)
if terminal and episode % self.trajectory_update == 0:
self.update_networks(np.array(self.memory_buffer, dtype=object))
self.memory_buffer.clear()
# Application of the gradient with TensorFlow and based on the objective function
def update_networks( self, memory_buffer ):
# Extract values from buffer for the advantage computation
cost = memory_buffer[:, 6]
done = np.vstack(memory_buffer[:, 5])
end_trajectories = np.where(done == True)[0]
#
trajectory_cost = []
counter = 0
for i in end_trajectories:
trajectory_cost.append( sum(cost[counter : i+1]) )
counter = i+1
# Lagrangian variable update (simulation of 1-variable gradient step)
# simulation of a SGD with a fixed learning rate of 0.05
cost_barrier = np.mean(trajectory_cost) - self.cost_limit
if cost_barrier <= 0: self.lagrangian_var -= 0.05
else: self.lagrangian_var += 0.05
# Limit of the lagrangian multiplier >= 0
if self.lagrangian_var < 0: self.lagrangian_var = 0
# Actor update (repeated 1 time for each call):
with tf.GradientTape() as actor_tape:
# Compute the objective function, compute the gradient information and apply the
# gradient with the optimizer
actor_objective_function = self.actor_objective_function( memory_buffer )
actor_gradient = actor_tape.gradient(actor_objective_function, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients( zip(actor_gradient, self.actor.trainable_variables) )
# Mandatory method to implement for the ReinforcementLearning class
# here we select thea action based on the state, for policy gradient method we obtain
# a probability from the network, from where we perform a sampling
def get_action(self, state):
softmax_out = self.actor(state.reshape((1, -1)))
selected_action = np.random.choice(self.action_space.n, p=softmax_out.numpy()[0])
return selected_action, None
# Computing the objective function of the actor for the gradient ascent procedure,
# here is where the 'magic happens'
def actor_objective_function( self, memory_buffer ):
# Extract values from buffer
state = np.vstack(memory_buffer[:, 0])
reward = memory_buffer[:, 3]
action = memory_buffer[:, 1]
done = np.vstack(memory_buffer[:, 5])
cost = memory_buffer[:, 6]
# For multiple trajectories find the end of each one
end_trajectories = np.where(done == True)[0]
# Extract the proability of the action with the crurent policy,
# execute the current policy on the state to obtain the probailities for each action
# than, using the action selected by the network in the buffer, compute the probability
# from the output. Notice that i need to execute again the network and can not use the probaiblity in the buffer
# computed at runtime beacuse we need the gradient informations
probability = self.actor(state)
action_idx = [[counter, val] for counter, val in enumerate(action)]
probability = tf.expand_dims(tf.gather_nd(probability, action_idx), axis=-1)
# Computation of the log_prob and the sum of the reward for each trajectory.
# To obtain the probability of the trajectory i need to sum up the values for each single trajectory and multiply
# this value for the cumulative reward (no discounted or 'reward to go' for this vanilla implementation).
trajectory_probabilities = []
trajectory_rewards = []
trajectory_cost = []
counter = 0
for i in end_trajectories:
trajectory_probabilities.append( tf.math.reduce_sum( tf.math.log(probability[counter : i+1])) )
trajectory_rewards.append( sum(reward[counter : i+1]) )
trajectory_cost.append( sum(cost[counter : i+1]) )
counter = i+1
# Multiplication of log_prob times the reward of the trajectory
# here we obtain an array of N elements where N is the number of trajectories (this
# value depends on the parameter <trajectory_update>).
trajectory_objectives = []
for log_prob, rw, cs in zip(trajectory_probabilities, trajectory_rewards, trajectory_cost):
#trajectory_objectives.append( log_prob * (rw - np.mean(trajectory_rewards)) )
trajectory_objectives.append( log_prob * (rw - self.lagrangian_var * (cs - self.cost_limit)) )
# Computing the mean value between all the trajectories, this introduce siamo variance but reduce
# the bias, see the original paper for more details about the baseline
objective_function = tf.reduce_mean( trajectory_objectives )
# NB: returna negative value to automatically use a gradient ascent approach
# on TensorFlow
return -objective_function
| [
"numpy.mean",
"collections.deque",
"tensorflow.random.set_seed",
"numpy.where",
"tensorflow.math.log",
"tensorflow.keras.optimizers.Adam",
"tensorflow.GradientTape",
"numpy.array",
"numpy.random.seed",
"numpy.vstack",
"tensorflow.reduce_mean",
"tensorflow.gather_nd"
] | [((741, 765), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), '(seed)\n', (759, 765), True, 'import tensorflow as tf\n'), ((770, 790), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (784, 790), True, 'import numpy as np\n'), ((929, 955), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (953, 955), True, 'import tensorflow as tf\n'), ((1453, 1483), 'collections.deque', 'deque', ([], {'maxlen': 'self.memory_size'}), '(maxlen=self.memory_size)\n', (1458, 1483), False, 'from collections import deque\n'), ((2332, 2362), 'numpy.vstack', 'np.vstack', (['memory_buffer[:, 5]'], {}), '(memory_buffer[:, 5])\n', (2341, 2362), True, 'import numpy as np\n'), ((4060, 4090), 'numpy.vstack', 'np.vstack', (['memory_buffer[:, 0]'], {}), '(memory_buffer[:, 0])\n', (4069, 4090), True, 'import numpy as np\n'), ((4162, 4192), 'numpy.vstack', 'np.vstack', (['memory_buffer[:, 5]'], {}), '(memory_buffer[:, 5])\n', (4171, 4192), True, 'import numpy as np\n'), ((6305, 6342), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['trajectory_objectives'], {}), '(trajectory_objectives)\n', (6319, 6342), True, 'import tensorflow as tf\n'), ((2385, 2407), 'numpy.where', 'np.where', (['(done == True)'], {}), '(done == True)\n', (2393, 2407), True, 'import numpy as np\n'), ((2702, 2726), 'numpy.mean', 'np.mean', (['trajectory_cost'], {}), '(trajectory_cost)\n', (2709, 2726), True, 'import numpy as np\n'), ((2994, 3011), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3009, 3011), True, 'import tensorflow as tf\n'), ((4299, 4321), 'numpy.where', 'np.where', (['(done == True)'], {}), '(done == True)\n', (4307, 4321), True, 'import numpy as np\n'), ((4888, 4925), 'tensorflow.gather_nd', 'tf.gather_nd', (['probability', 'action_idx'], {}), '(probability, action_idx)\n', (4900, 4925), True, 'import tensorflow as tf\n'), ((2025, 2067), 'numpy.array', 'np.array', (['self.memory_buffer'], {'dtype': 'object'}), '(self.memory_buffer, dtype=object)\n', (2033, 2067), True, 'import numpy as np\n'), ((5421, 5460), 'tensorflow.math.log', 'tf.math.log', (['probability[counter:i + 1]'], {}), '(probability[counter:i + 1])\n', (5432, 5460), True, 'import tensorflow as tf\n')] |
from typing import Tuple
import numpy as np
from PyGenetic.crossover import CrossoverDecidor
from PyGenetic.mutation import MutationDecidor
class FactoryPopulation():
def __init__(self):
self.crossover_decidor = CrossoverDecidor(self.crossover_type,
self.n_genes)
self.mutation_decidor = MutationDecidor(self.mutation_type,
self.n_genes,
self.mutation_propability,
self.low_boundery,
self.high_boundery)
def crossover(self, first_chromo: np.array,
second_chromo: np.array) -> Tuple[np.array, np.array]:
first_child, second_child = self.crossover_decidor.run(
first_chromo, second_chromo)
return (first_child, second_child)
def mutation(self, chromosome: np.array) -> np.array:
self.mutation_decidor.run(chromosome)
def parent_selection(self, fitness: np.array) -> None:
fit_idx = np.argsort(fitness)[::-1]
self.parents = self.pool[fit_idx[:self.n_parents]]
def breed_childern(self) -> None:
for i in range(self.n_pool // 2):
first_chromo = self.parents[np.random.choice(range(
self.n_parents))]
second_chromo = self.parents[np.random.choice(range(
self.n_parents))]
first_child, second_child = self.crossover(first_chromo,
second_chromo)
self.mutation(first_child)
self.mutation(second_child)
self.pool[i:i + 2] = [first_child, second_child]
self.pool[-1] = self.parents[0]
| [
"PyGenetic.mutation.MutationDecidor",
"numpy.argsort",
"PyGenetic.crossover.CrossoverDecidor"
] | [((227, 278), 'PyGenetic.crossover.CrossoverDecidor', 'CrossoverDecidor', (['self.crossover_type', 'self.n_genes'], {}), '(self.crossover_type, self.n_genes)\n', (243, 278), False, 'from PyGenetic.crossover import CrossoverDecidor\n'), ((362, 481), 'PyGenetic.mutation.MutationDecidor', 'MutationDecidor', (['self.mutation_type', 'self.n_genes', 'self.mutation_propability', 'self.low_boundery', 'self.high_boundery'], {}), '(self.mutation_type, self.n_genes, self.mutation_propability,\n self.low_boundery, self.high_boundery)\n', (377, 481), False, 'from PyGenetic.mutation import MutationDecidor\n'), ((1127, 1146), 'numpy.argsort', 'np.argsort', (['fitness'], {}), '(fitness)\n', (1137, 1146), True, 'import numpy as np\n')] |
import threading
import requests
from bs4 import BeautifulSoup
import uuid, base64
import io
import xlsxwriter
from .tree import Tree
from .webpage_classifier import WebpageClassifier
from .steady_state_genetic import SteadyStateGenetic
from .general_regression_neural_network import GeneralRegressionNeuralNetwork
import numpy as np
class WebScraper(threading.Thread):
def __init__(self, client, url, depth):
super().__init__(daemon=True, target=self.run)
self.client = client
self.url = url
self.depth = depth
self.tree = Tree(self)
self.webpage_classifier = WebpageClassifier(self.client)
self.num_websites_scraped = 0
self.num_sites_in_interval = 0
self.predictions = []
self.output_array = []
self.predictions_output = []
self.population = []
def run(self):
print("Web Scraper initialized.")
self.tree.add_node(self.url)
self.iterative_deepening_search(self.url, self.depth)
def depth_limited_search(self, node, depth):
def recursive_depth_limited_search(node, depth):
experiment_one = threading.Thread(target=self.experiment_one, args=[self.tree[node]], daemon=True)
experiment_one.start()
experiment_one.join()
unigram_vector = self.experiment_one(self.tree[node])
if unigram_vector is None:
return 'cutoff'
# experiment_two = threading.Thread(target=self.experiment_two, args=[self.tree[node]], daemon=True)
# experiment_two.start()
# experiment_two.join()
children = self.tree[node].find_children()
if depth == 0:
return 'cutoff'
else:
cutoff_occurred = False
for child in children:
result = recursive_depth_limited_search(child, depth - 1)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return 'cutoff' if cutoff_occurred else None
return recursive_depth_limited_search(node, depth)
def iterative_deepening_search(self, rootNode, maxDepth):
for depth in range(maxDepth):
result = self.depth_limited_search(rootNode, depth)
if result != 'cutoff':
self.client.gui.display_message('\nWebsites Added: ' + repr(self.websites_added))
print('\nWebsites Added: ' + repr(self.websites_added))
return result
def experiment_one(self, site):
file_save = threading.Thread(target=site.save_file, daemon=True)
file_save.start()
file_save.join()
unigram_vector = self.webpage_classifier.scrape_site(site)
if unigram_vector is None:
return None
self.num_websites_scraped = self.num_websites_scraped + 1
# self.predictions.append(unigram_vector[1])
self.client.gui.display_message(repr(self.num_websites_scraped) + " sites scraped.")
self.predictions_output.append([site.identifier, unigram_vector[1]])
self.predictions.append(unigram_vector[1])
if unigram_vector[1] >= -0.015 and unigram_vector[1] <= 0.015:
self.num_sites_in_interval = self.num_sites_in_interval + 1
print(site.identifier)
if self.num_websites_scraped == 100:
print("100 websites scraped!")
standard_deviation_first = np.std(self.predictions)
mean_first = np.mean(self.predictions)
self.client.gui.display_message("Standard Deviation of First 100 sites scraped: " + repr(standard_deviation_first))
self.client.gui.display_message("Mean of First 100 sites scraped: " + repr(mean_first))
print("Standard Deviation of First 100 sites scraped: " + repr(standard_deviation_first))
print("Mean of First 100 sites scraped: " + repr(mean_first))
self.output_array.append(['standard_deviation_first', standard_deviation_first])
self.output_array.append(['mean_first', mean_first])
if self.num_websites_scraped == 200:
print("200 sites scraped!")
standard_deviation_last = np.std(self.predictions[99:200])
mean_last = np.mean(self.predictions[99:200])
self.client.gui.display_message("Standard Deviation of Last 100 sites scraped: " + repr(standard_deviation_last))
self.client.gui.display_message("Mean of Last 100 sites scraped: " + repr(mean_last))
print("Standard Deviation of Last 100 sites scraped: " + repr(standard_deviation_last))
print("Mean of Last 100 sites scraped: " + repr(mean_last))
self.output_array.append(['standard_deviation_last', standard_deviation_last])
self.output_array.append(['mean_last', mean_last])
standard_deviation_overall = np.std(self.predictions)
mean_overall = np.mean(self.predictions)
self.client.gui.display_message("Standard Deviation of 200 sites scraped: " + repr(standard_deviation_overall))
self.client.gui.display_message("Mean of 200 sites scraped: " + repr(mean_overall))
print("Standard Deviation of 200 sites scraped: " + repr(standard_deviation_overall))
print("Mean of 200 sites scraped: " + repr(mean_overall))
self.output_array.append(['standard_deviation_overall', standard_deviation_overall])
self.output_array.append(['mean_overall', mean_overall])
self.client.gui.display_message("Number of Sites in the interval [-0.015, 0.015]: " + repr(self.num_sites_in_interval))
print("Number of Sites in the interval [-0.015, 0.015]: " + repr(self.num_sites_in_interval))
self.output_array.append(['num_sites_in_interval', self.num_sites_in_interval])
self.write_to_excel(self.predictions_output, self.output_array, 'web-scraper-outputs.xlsx')
return unigram_vector
def experiment_two(self, site):
file_save = threading.Thread(target=site.save_file, daemon=True)
file_save.start()
file_save.join()
frequency = site.frequency
if frequency is None:
return None
# extract the chars and their unigram_vector into two lists
chars, unigram_vector = map(list,zip(*frequency))
sum = 0
for x in range(len(unigram_vector)):
sum = unigram_vector[x] + sum
for x in range(len(unigram_vector)):
unigram_vector[x] = unigram_vector[x] / sum
unigram_vector = self.webpage_classifier.normalize_dataset(unigram_vector)
classification = 0 # temp
unigram_vector.insert(0, classification)
unigram_vector.insert(0, 0)
self.num_websites_scraped = self.num_websites_scraped + 1
self.population.append(unigram_vector)
def write_to_excel(self, predictions, output_array, filename):
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, "URL")
worksheet.write(0, 1, "Prediction")
column = 0
row = 1
for url, prediction in (predictions):
worksheet.write(row, column, url)
worksheet.write(row, column + 1, prediction)
row = row + 1
row = 0
for column_name, value in (output_array):
worksheet.write(row, column + 2, column_name)
worksheet.write(row, column + 3, value)
row = row + 1
workbook.close()
| [
"threading.Thread",
"numpy.mean",
"xlsxwriter.Workbook",
"numpy.std"
] | [((2669, 2721), 'threading.Thread', 'threading.Thread', ([], {'target': 'site.save_file', 'daemon': '(True)'}), '(target=site.save_file, daemon=True)\n', (2685, 2721), False, 'import threading\n'), ((6164, 6216), 'threading.Thread', 'threading.Thread', ([], {'target': 'site.save_file', 'daemon': '(True)'}), '(target=site.save_file, daemon=True)\n', (6180, 6216), False, 'import threading\n'), ((7100, 7129), 'xlsxwriter.Workbook', 'xlsxwriter.Workbook', (['filename'], {}), '(filename)\n', (7119, 7129), False, 'import xlsxwriter\n'), ((1148, 1234), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.experiment_one', 'args': '[self.tree[node]]', 'daemon': '(True)'}), '(target=self.experiment_one, args=[self.tree[node]], daemon\n =True)\n', (1164, 1234), False, 'import threading\n'), ((3554, 3578), 'numpy.std', 'np.std', (['self.predictions'], {}), '(self.predictions)\n', (3560, 3578), True, 'import numpy as np\n'), ((3604, 3629), 'numpy.mean', 'np.mean', (['self.predictions'], {}), '(self.predictions)\n', (3611, 3629), True, 'import numpy as np\n'), ((4320, 4352), 'numpy.std', 'np.std', (['self.predictions[99:200]'], {}), '(self.predictions[99:200])\n', (4326, 4352), True, 'import numpy as np\n'), ((4377, 4410), 'numpy.mean', 'np.mean', (['self.predictions[99:200]'], {}), '(self.predictions[99:200])\n', (4384, 4410), True, 'import numpy as np\n'), ((5005, 5029), 'numpy.std', 'np.std', (['self.predictions'], {}), '(self.predictions)\n', (5011, 5029), True, 'import numpy as np\n'), ((5057, 5082), 'numpy.mean', 'np.mean', (['self.predictions'], {}), '(self.predictions)\n', (5064, 5082), True, 'import numpy as np\n')] |
import cards.CardUtils as CardUtils
from players.Player import Player
import numpy as np
class PredictorPlayer(Player):
def __init__(self, actionPredictor, stateValuePredictor):
super().__init__()
self.actionPredictor = actionPredictor
self.stateValuePredictor = stateValuePredictor
def selectCard(self, state, env):
validCards = env.getValidCards()
predictions = self.actionPredictor.predict([state])[0]
predictions = self.removeInvalidActions(predictions, validCards)
action = np.random.choice(len(predictions), p=predictions)
return action
# set the probability of selecting an invalid action to 0
# and normailze the probabilty array so that its sum equals 1
def removeInvalidActions(self, predictions, validCards):
for actionIndex in range(len(predictions)):
if not actionIndex in validCards:
predictions[actionIndex] = 0
total = np.sum(predictions)
for predictionIndex in range(len(predictions)):
predictions[predictionIndex] = predictions[predictionIndex] / total
return predictions
| [
"numpy.sum"
] | [((967, 986), 'numpy.sum', 'np.sum', (['predictions'], {}), '(predictions)\n', (973, 986), True, 'import numpy as np\n')] |
import torch
import numpy as np
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
build_sampler, merge_aug_bboxes, merge_aug_masks,
multiclass_nms)
from mmdet.core.bbox import bbox_mapping_back
from .cascade_roi_head import CascadeRoIHead
from ..builder import HEADS, build_head, build_roi_extractor
@HEADS.register_module()
class CascadeAttrRoIHead(CascadeRoIHead):
def __init__(self, *arg, **kwargs):
super(CascadeAttrRoIHead, self).__init__(*arg, **kwargs)
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = build_head(mask_head)
def _attr_bbox_forward(self, stage, x, rois):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred, attr_score = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
attr_score=attr_score,
bbox_feats=bbox_feats)
return bbox_results
def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
"""Mask head forward function used in both training and testing."""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert bbox_feats is not None
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
return mask_results
def aug_test_attr(self, x, proposal_list, img_metas, rescale=False, **kwargs):
det_bboxes, det_labels, det_attr_scores, det_scores = \
self.aug_test_bboxes(x, img_metas, proposal_list, self.test_cfg)
det_attributes = []
det_attr_scores = det_attr_scores.detach().cpu().numpy()
for det_attr_score in det_attr_scores:
det_attributes.append(np.argwhere(det_attr_score > self.test_cfg.attribute_score_thr).reshape(-1,))
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.test_cfg.num_classes)
if self.with_mask == True:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
else:
segm_results = None
attribute_results = []
det_labels_tmp = det_labels.clone().detach().cpu().numpy()
for i in range(self.test_cfg.num_classes):
ids = np.argwhere(det_labels_tmp == i).reshape(-1)
attribute_results.append([det_attributes[id] for id in ids])
results = dict(
det_results=[bbox_results],
attr_results=attribute_results,
segm_results=segm_results,
garments_bboxes=_det_bboxes,
garments_labels=det_labels,
garments_scores=det_scores,
det_attr_scores=det_attr_scores)
return results
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
aug_attr_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
ms_scores = []
if rois.shape[0] == 0:
# There is no proposal in the single image
aug_bboxes.append(rois.new_zeros(0, 4))
aug_scores.append(rois.new_zeros(0, 1))
continue
for i in range(self.num_stages):
if i < self.num_stages - 1:
bbox_results = self._bbox_forward(i, x, rois)
ms_scores.append(bbox_results['cls_score'])
cls_score = bbox_results['cls_score']
if self.bbox_head[i].custom_activation:
cls_score = self.bbox_head[i].loss_cls.get_activation(
cls_score)
bbox_label = cls_score[:, :-1].argmax(dim=1)
rois = self.bbox_head[i].regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
else:
bbox_results = self._attr_bbox_forward(i, x, rois)
ms_scores.append(bbox_results['cls_score'])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores, attr_scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
bbox_results['attr_score'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
aug_attr_scores.append(attr_scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores, merged_attr_scores = self.merge_aug_bboxes(
aug_bboxes, aug_scores, aug_attr_scores, img_metas, rcnn_test_cfg)
if merged_bboxes.shape[0] == 0:
# There is no proposal in the single image
det_bboxes = merged_bboxes.new_zeros(0, 5)
det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long)
det_attr_scores = merged_bboxes.new_zeros(0, rcnn_test_cfg.attribute_num)
else:
det_bboxes, det_labels, inds = multiclass_nms(merged_bboxes,
merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img,
return_inds=True)
det_attr_scores = merged_attr_scores[:, None].expand(
merged_attr_scores.size(0), rcnn_test_cfg.num_classes, rcnn_test_cfg.attribute_num)
det_attr_scores = det_attr_scores.reshape(-1, rcnn_test_cfg.attribute_num)[inds]
det_scores = merged_scores[inds // rcnn_test_cfg.num_classes]
return det_bboxes, det_labels, det_attr_scores, det_scores
def merge_aug_bboxes(self, aug_bboxes, aug_scores, aug_attr_scores, img_metas, rcnn_test_cfg):
"""Merge augmented detection bboxes and scores.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class)
aug_scores (list[Tensor] or None): shape (n, #class)
img_shapes (list[Tensor]): shape (3, ).
rcnn_test_cfg (dict): rcnn test config.
Returns:
tuple: (bboxes, scores)
"""
recovered_bboxes = []
for bboxes, img_info in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
flip_direction = img_info[0]['flip_direction']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
flip_direction)
recovered_bboxes.append(bboxes)
bboxes = torch.stack(recovered_bboxes).mean(dim=0)
if aug_scores is None:
return bboxes
else:
scores = torch.stack(aug_scores).mean(dim=0)
if aug_attr_scores is not None:
aug_attr_scores = torch.stack(aug_attr_scores).mean(dim=0)
return bboxes, scores, aug_attr_scores
| [
"mmdet.core.bbox_mapping",
"torch.stack",
"mmdet.core.bbox2roi",
"mmdet.core.bbox.bbox_mapping_back",
"mmdet.core.bbox2result",
"mmdet.core.multiclass_nms",
"numpy.argwhere"
] | [((3207, 3270), 'mmdet.core.bbox2result', 'bbox2result', (['_det_bboxes', 'det_labels', 'self.test_cfg.num_classes'], {}), '(_det_bboxes, det_labels, self.test_cfg.num_classes)\n', (3218, 3270), False, 'from mmdet.core import bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((4712, 4800), 'mmdet.core.bbox_mapping', 'bbox_mapping', (['proposal_list[0][:, :4]', 'img_shape', 'scale_factor', 'flip', 'flip_direction'], {}), '(proposal_list[0][:, :4], img_shape, scale_factor, flip,\n flip_direction)\n', (4724, 4800), False, 'from mmdet.core import bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((4853, 4874), 'mmdet.core.bbox2roi', 'bbox2roi', (['[proposals]'], {}), '([proposals])\n', (4861, 4874), False, 'from mmdet.core import bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((7110, 7247), 'mmdet.core.multiclass_nms', 'multiclass_nms', (['merged_bboxes', 'merged_scores', 'rcnn_test_cfg.score_thr', 'rcnn_test_cfg.nms', 'rcnn_test_cfg.max_per_img'], {'return_inds': '(True)'}), '(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img, return_inds=True)\n', (7124, 7247), False, 'from mmdet.core import bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((8718, 8790), 'mmdet.core.bbox.bbox_mapping_back', 'bbox_mapping_back', (['bboxes', 'img_shape', 'scale_factor', 'flip', 'flip_direction'], {}), '(bboxes, img_shape, scale_factor, flip, flip_direction)\n', (8735, 8790), False, 'from mmdet.core.bbox import bbox_mapping_back\n'), ((8888, 8917), 'torch.stack', 'torch.stack', (['recovered_bboxes'], {}), '(recovered_bboxes)\n', (8899, 8917), False, 'import torch\n'), ((3686, 3718), 'numpy.argwhere', 'np.argwhere', (['(det_labels_tmp == i)'], {}), '(det_labels_tmp == i)\n', (3697, 3718), True, 'import numpy as np\n'), ((9022, 9045), 'torch.stack', 'torch.stack', (['aug_scores'], {}), '(aug_scores)\n', (9033, 9045), False, 'import torch\n'), ((9128, 9156), 'torch.stack', 'torch.stack', (['aug_attr_scores'], {}), '(aug_attr_scores)\n', (9139, 9156), False, 'import torch\n'), ((2882, 2945), 'numpy.argwhere', 'np.argwhere', (['(det_attr_score > self.test_cfg.attribute_score_thr)'], {}), '(det_attr_score > self.test_cfg.attribute_score_thr)\n', (2893, 2945), True, 'import numpy as np\n')] |
import sys
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
name = MPI.Get_processor_name()
print("Hello world from processor {}, rank {} out of {} processors"\
.format(name, comm.rank, comm.size))
print("Now I will take up memory and waste computing power for demonstration purposes")
sys.stdout.flush()
nums = np.zeros((500,500,500))
nums[0,0,0] = 1.
while(1):
nums[0,0,0] *= 3.14
nums[0,0,0] /= 3.14
| [
"sys.stdout.flush",
"mpi4py.MPI.Get_processor_name",
"numpy.zeros"
] | [((84, 108), 'mpi4py.MPI.Get_processor_name', 'MPI.Get_processor_name', ([], {}), '()\n', (106, 108), False, 'from mpi4py import MPI\n'), ((307, 325), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (323, 325), False, 'import sys\n'), ((334, 359), 'numpy.zeros', 'np.zeros', (['(500, 500, 500)'], {}), '((500, 500, 500))\n', (342, 359), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import os
def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
path_from = '/home/ml/Documents/attendance_dl/output1' # path of input folder
path_to = '/home/ml/Documents/attendance_dl/output/' # path of output folder
name = os.listdir(path_from)
for i in name:
if not os.path.exists(path_to + i):
os.mkdir(path_to + i)
k = os.listdir(path_from + i)
p = 1
for j in k:
length = 0
img = cv2.imread(path_from + i + '/' + j)
img0 = img
#print('/home/ml/Documents/attendance_dl/output/' + i + '/' + j)
img = cv2.GaussianBlur(img, (5,5), 0)
cv2.imshow('original', img)
img1 = cv2.flip(img, 1)
hsvv = cv2.cvtColor(img0,cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsvv)
cv2.normalize(v, v, 0, 150, cv2.NORM_MINMAX)
img2 = cv2.merge((h,s,v+35))
img2 = cv2.cvtColor(img2, cv2.COLOR_HSV2BGR)
h,s,v = cv2.split(hsvv)
cv2.normalize(v, v, 150, 255, cv2.NORM_MINMAX)
img4 = cv2.merge((h,s,v-10))
img4 = cv2.cvtColor(img4, cv2.COLOR_HSV2BGR)
noisy1 = noisy('gauss', img0)
noisy2 = noisy('s&p', img0)
noisy3 = noisy('poisson', img0)
#noisy4 = noisy('speckle', img0)
M = np.float32([[1, 0, 40], [0, 1, -5]])
dst1 = cv2.warpAffine(img0, M, (img0.shape[1], img0.shape[0]))
M = np.float32([[1, 0, -4], [0, 1, -40]])
dst = cv2.warpAffine(img0, M, (img0.shape[1], img0.shape[0]))
cv2.imshow('img1', img2)
#cv2.waitKey(0)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p) + '.jpg', img1)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+1) + '.jpg', img2)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+2) + '.jpg', img4)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+5) + '.jpg', img0)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+3) + '.jpg', noisy1)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+4) + '.jpg', dst1)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+2) + '.jpg', dst)
cv2.imwrite(path_to + i + '/' + i + '_' + str(length + p+7) + '.jpg', noisy4)
p = p + 8
if p >= 1000:
break
| [
"cv2.normalize",
"cv2.imshow",
"os.path.exists",
"os.listdir",
"numpy.random.poisson",
"os.mkdir",
"numpy.random.normal",
"cv2.merge",
"cv2.warpAffine",
"numpy.ceil",
"cv2.cvtColor",
"cv2.split",
"numpy.log2",
"cv2.GaussianBlur",
"numpy.random.randn",
"cv2.imread",
"numpy.copy",
"c... | [((1415, 1436), 'os.listdir', 'os.listdir', (['path_from'], {}), '(path_from)\n', (1425, 1436), False, 'import os\n'), ((1530, 1555), 'os.listdir', 'os.listdir', (['(path_from + i)'], {}), '(path_from + i)\n', (1540, 1555), False, 'import os\n'), ((196, 241), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', '(row, col, ch)'], {}), '(mean, sigma, (row, col, ch))\n', (212, 241), True, 'import numpy as np\n'), ((1463, 1490), 'os.path.exists', 'os.path.exists', (['(path_to + i)'], {}), '(path_to + i)\n', (1477, 1490), False, 'import os\n'), ((1500, 1521), 'os.mkdir', 'os.mkdir', (['(path_to + i)'], {}), '(path_to + i)\n', (1508, 1521), False, 'import os\n'), ((1616, 1651), 'cv2.imread', 'cv2.imread', (["(path_from + i + '/' + j)"], {}), "(path_from + i + '/' + j)\n", (1626, 1651), False, 'import cv2\n'), ((1760, 1792), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(5, 5)', '(0)'], {}), '(img, (5, 5), 0)\n', (1776, 1792), False, 'import cv2\n'), ((1800, 1827), 'cv2.imshow', 'cv2.imshow', (['"""original"""', 'img'], {}), "('original', img)\n", (1810, 1827), False, 'import cv2\n'), ((1843, 1859), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (1851, 1859), False, 'import cv2\n'), ((1875, 1912), 'cv2.cvtColor', 'cv2.cvtColor', (['img0', 'cv2.COLOR_BGR2HSV'], {}), '(img0, cv2.COLOR_BGR2HSV)\n', (1887, 1912), False, 'import cv2\n'), ((1929, 1944), 'cv2.split', 'cv2.split', (['hsvv'], {}), '(hsvv)\n', (1938, 1944), False, 'import cv2\n'), ((1953, 1997), 'cv2.normalize', 'cv2.normalize', (['v', 'v', '(0)', '(150)', 'cv2.NORM_MINMAX'], {}), '(v, v, 0, 150, cv2.NORM_MINMAX)\n', (1966, 1997), False, 'import cv2\n'), ((2013, 2038), 'cv2.merge', 'cv2.merge', (['(h, s, v + 35)'], {}), '((h, s, v + 35))\n', (2022, 2038), False, 'import cv2\n'), ((2050, 2087), 'cv2.cvtColor', 'cv2.cvtColor', (['img2', 'cv2.COLOR_HSV2BGR'], {}), '(img2, cv2.COLOR_HSV2BGR)\n', (2062, 2087), False, 'import cv2\n'), ((2105, 2120), 'cv2.split', 'cv2.split', (['hsvv'], {}), '(hsvv)\n', (2114, 2120), False, 'import cv2\n'), ((2129, 2175), 'cv2.normalize', 'cv2.normalize', (['v', 'v', '(150)', '(255)', 'cv2.NORM_MINMAX'], {}), '(v, v, 150, 255, cv2.NORM_MINMAX)\n', (2142, 2175), False, 'import cv2\n'), ((2191, 2216), 'cv2.merge', 'cv2.merge', (['(h, s, v - 10)'], {}), '((h, s, v - 10))\n', (2200, 2216), False, 'import cv2\n'), ((2228, 2265), 'cv2.cvtColor', 'cv2.cvtColor', (['img4', 'cv2.COLOR_HSV2BGR'], {}), '(img4, cv2.COLOR_HSV2BGR)\n', (2240, 2265), False, 'import cv2\n'), ((2435, 2471), 'numpy.float32', 'np.float32', (['[[1, 0, 40], [0, 1, -5]]'], {}), '([[1, 0, 40], [0, 1, -5]])\n', (2445, 2471), True, 'import numpy as np\n'), ((2487, 2542), 'cv2.warpAffine', 'cv2.warpAffine', (['img0', 'M', '(img0.shape[1], img0.shape[0])'], {}), '(img0, M, (img0.shape[1], img0.shape[0]))\n', (2501, 2542), False, 'import cv2\n'), ((2556, 2593), 'numpy.float32', 'np.float32', (['[[1, 0, -4], [0, 1, -40]]'], {}), '([[1, 0, -4], [0, 1, -40]])\n', (2566, 2593), True, 'import numpy as np\n'), ((2608, 2663), 'cv2.warpAffine', 'cv2.warpAffine', (['img0', 'M', '(img0.shape[1], img0.shape[0])'], {}), '(img0, M, (img0.shape[1], img0.shape[0]))\n', (2622, 2663), False, 'import cv2\n'), ((2673, 2697), 'cv2.imshow', 'cv2.imshow', (['"""img1"""', 'img2'], {}), "('img1', img2)\n", (2683, 2697), False, 'import cv2\n'), ((436, 450), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (443, 450), True, 'import numpy as np\n'), ((486, 523), 'numpy.ceil', 'np.ceil', (['(amount * image.size * s_vs_p)'], {}), '(amount * image.size * s_vs_p)\n', (493, 523), True, 'import numpy as np\n'), ((681, 726), 'numpy.ceil', 'np.ceil', (['(amount * image.size * (1.0 - s_vs_p))'], {}), '(amount * image.size * (1.0 - s_vs_p))\n', (688, 726), True, 'import numpy as np\n'), ((910, 926), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (919, 926), True, 'import numpy as np\n'), ((983, 1014), 'numpy.random.poisson', 'np.random.poisson', (['(image * vals)'], {}), '(image * vals)\n', (1000, 1014), True, 'import numpy as np\n'), ((1124, 1153), 'numpy.random.randn', 'np.random.randn', (['row', 'col', 'ch'], {}), '(row, col, ch)\n', (1139, 1153), True, 'import numpy as np\n'), ((954, 967), 'numpy.log2', 'np.log2', (['vals'], {}), '(vals)\n', (961, 967), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 4 12:22:44 2017
@author: a.sancho.asensio
"""
import argparse
import base64
import json
import re, sys
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import math
import pandas as pd
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
from keras.utils import np_utils
from keras.optimizers import SGD, Adam, RMSprop
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU, ELU
import keras.backend as K
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
if os.name == 'nt': # We're on the Windows machine.
print(" > Loading paths for the Windows machine")
PATH = "C:/Users/a.sancho.asensio/Documents/PaperWork/nanodegree/git/simulator-windows-64/"
else: # Linux/MAC machine.
print(" > Loading paths for the Linux machine")
PATH = "/home/andreu/nanodegree/simulator-linux/"
g_steering = np.zeros(10, dtype="float32") # Global array containing the last steering angles.
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
#old_angle = float(data["steering_angle"]) / 25.0 # We need to normalize!
# The current throttle of the car
#throttle = data["throttle"]
# The current speed of the car
#speed = float(data["speed"])
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image, dtype="uint8")
# This model currently assumes that the features of the model are just the images. Feel free to change this.
image_array = image_array[16:144, :, :] # Crop the image removing useless areas...
image_array = cv2.resize(image_array, (160, 64), interpolation=cv2.INTER_AREA)
transformed_image_array = image_array[None, :, :, :]
prediction = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 1.0
# Filter the data.
global g_steering
final_steering = 0.9 * prediction + 0.1 * np.mean(g_steering)
g_steering = np.roll(g_steering, 1)
g_steering[0] = final_steering
print("{:.3f}".format(final_steering), "{:.3f}".format(throttle))
send_control(final_steering, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
# Set the seed and load the model.
np.random.seed(1337)
tf.set_random_seed(1337) # Tensorflow specific.
with open(args.model, 'r') as jfile:
model = model_from_json(jfile.read())
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | [
"numpy.mean",
"numpy.roll",
"argparse.ArgumentParser",
"flask.Flask",
"socketio.Server",
"numpy.asarray",
"socketio.Middleware",
"eventlet.listen",
"base64.b64decode",
"numpy.zeros",
"numpy.random.seed",
"cv2.resize",
"tensorflow.set_random_seed"
] | [((1637, 1666), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': '"""float32"""'}), "(10, dtype='float32')\n", (1645, 1666), True, 'import numpy as np\n'), ((1726, 1743), 'socketio.Server', 'socketio.Server', ([], {}), '()\n', (1741, 1743), False, 'import socketio\n'), ((1751, 1766), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1756, 1766), False, 'from flask import Flask, render_template\n'), ((2296, 2328), 'numpy.asarray', 'np.asarray', (['image'], {'dtype': '"""uint8"""'}), "(image, dtype='uint8')\n", (2306, 2328), True, 'import numpy as np\n'), ((2550, 2614), 'cv2.resize', 'cv2.resize', (['image_array', '(160, 64)'], {'interpolation': 'cv2.INTER_AREA'}), '(image_array, (160, 64), interpolation=cv2.INTER_AREA)\n', (2560, 2614), False, 'import cv2\n'), ((2996, 3018), 'numpy.roll', 'np.roll', (['g_steering', '(1)'], {}), '(g_steering, 1)\n', (3003, 3018), True, 'import numpy as np\n'), ((3501, 3554), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Remote Driving"""'}), "(description='Remote Driving')\n", (3524, 3554), False, 'import argparse\n'), ((3762, 3782), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (3776, 3782), True, 'import numpy as np\n'), ((3788, 3812), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1337)'], {}), '(1337)\n', (3806, 3812), True, 'import tensorflow as tf\n'), ((4121, 4150), 'socketio.Middleware', 'socketio.Middleware', (['sio', 'app'], {}), '(sio, app)\n', (4140, 4150), False, 'import socketio\n'), ((4220, 4247), 'eventlet.listen', 'eventlet.listen', (["('', 4567)"], {}), "(('', 4567))\n", (4235, 4247), False, 'import eventlet\n'), ((2247, 2274), 'base64.b64decode', 'base64.b64decode', (['imgString'], {}), '(imgString)\n', (2263, 2274), False, 'import base64\n'), ((2958, 2977), 'numpy.mean', 'np.mean', (['g_steering'], {}), '(g_steering)\n', (2965, 2977), True, 'import numpy as np\n')] |
from __future__ import unicode_literals
from __future__ import print_function
import time
import unittest
import numpy as np
from hartigan_diptest import dip
class testModality(unittest.TestCase):
def setUp(self):
self.data = np.random.randn(1000)
def test_hartigan_diptest(self):
t0 = time.time()
dip(self.data)
t1 = time.time()
print("Hartigan diptest: {}".format(t1-t0))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"hartigan_diptest.dip",
"numpy.random.randn",
"time.time"
] | [((462, 477), 'unittest.main', 'unittest.main', ([], {}), '()\n', (475, 477), False, 'import unittest\n'), ((243, 264), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (258, 264), True, 'import numpy as np\n'), ((316, 327), 'time.time', 'time.time', ([], {}), '()\n', (325, 327), False, 'import time\n'), ((336, 350), 'hartigan_diptest.dip', 'dip', (['self.data'], {}), '(self.data)\n', (339, 350), False, 'from hartigan_diptest import dip\n'), ((364, 375), 'time.time', 'time.time', ([], {}), '()\n', (373, 375), False, 'import time\n')] |
import cv2
import sys
import numpy as np
#rectangle in Python is a tuple of (x,y,w,h)
#for rectangle
def union(a, b):
x = min(a[0], b[0])
y = min(a[1], b[1])
w = max(a[0]+a[2], b[0]+b[2]) - x
h = max(a[1]+a[3], b[1]+b[3]) - y
return (x, y, w, h)
#for rectangle
def intersection(a, b):
x = max(a[0], b[0])
y = max(a[1], b[1])
w = min(a[0]+a[2], b[0]+b[2]) - x
h = min(a[1]+a[3], b[1]+b[3]) - y
if w<0 or h<0: return () # or (0,0,0,0) ?
return (x, y, w, h)
#combine all rectangles with overlappings to outermost
def combine_boxes(boxes):
noIntersectLoop = False
noIntersectMain = False
posIndex = 0
# keep looping until we have completed a full pass over each rectangle
# and checked it does not overlap with any other rectangle
while noIntersectMain == False:
noIntersectMain = True
posIndex = 0
# start with the first rectangle in the list, once the first
# rectangle has been unioned with every other rectangle,
# repeat for the second until done
while posIndex < len(boxes):
noIntersectLoop = False
while noIntersectLoop == False and len(boxes) > 1 and posIndex < len(boxes):
a = boxes[posIndex]
listBoxes = np.delete(boxes, posIndex, 0)
index = 0
for b in listBoxes:
#if there is an intersection, the boxes overlap
if intersection(a, b):
newBox = union(a,b)
listBoxes[index] = newBox
boxes = listBoxes
noIntersectLoop = False
noIntersectMain = False
index = index + 1
break
noIntersectLoop = True
index = index + 1
posIndex = posIndex + 1
return boxes.astype("int")
#detect text in a image using mser
mser = cv2.MSER_create(_delta = 10, _min_area=1000)
img = cv2.imread(sys.argv[1])
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
vis = img.copy()
regions, _ = mser.detectRegions(gray)
rectList = []
for region in regions:
#fit a bounding box to the contour
(x, y, w, h) = cv2.boundingRect(region.reshape(-1,1,2))
#increase rect width and height, union overlapped rect
rectList.append((x, y, w+40, h+40))
newRectList = combine_boxes(rectList)
for rect in newRectList:
x, y, w, h = rect[0], rect[1], rect[2], rect[3]
if w*h > 50000:
cv2.rectangle(vis, (x, y), (x + w, y + h), (0, 255, 0), 5)
#hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
#cv2.polylines(vis, hulls, 1, (0, 255, 0))
vis = cv2.resize(vis, (720, 480))
cv2.imshow('image', vis)
cv2.imwrite('output2'+sys.argv[1], vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''#draw the filled in contours to an empty img in white
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
for contour in hulls:
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#extract only the text
text_only = cv2.bitwise_and(img, img, mask=mask)
pic2 = cv2.resize(text_only, (720, 480))
cv2.imshow('image2', pic2)
cv2.waitKey(0)
cv2.destroyAllWindows()''' | [
"cv2.rectangle",
"cv2.imwrite",
"numpy.delete",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.MSER_create",
"cv2.resize",
"cv2.imread"
] | [((1986, 2028), 'cv2.MSER_create', 'cv2.MSER_create', ([], {'_delta': '(10)', '_min_area': '(1000)'}), '(_delta=10, _min_area=1000)\n', (2001, 2028), False, 'import cv2\n'), ((2037, 2060), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (2047, 2060), False, 'import cv2\n'), ((2068, 2105), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2080, 2105), False, 'import cv2\n'), ((2706, 2733), 'cv2.resize', 'cv2.resize', (['vis', '(720, 480)'], {}), '(vis, (720, 480))\n', (2716, 2733), False, 'import cv2\n'), ((2736, 2760), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'vis'], {}), "('image', vis)\n", (2746, 2760), False, 'import cv2\n'), ((2761, 2802), 'cv2.imwrite', 'cv2.imwrite', (["('output2' + sys.argv[1])", 'vis'], {}), "('output2' + sys.argv[1], vis)\n", (2772, 2802), False, 'import cv2\n'), ((2801, 2815), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2812, 2815), False, 'import cv2\n'), ((2816, 2839), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2837, 2839), False, 'import cv2\n'), ((2532, 2590), 'cv2.rectangle', 'cv2.rectangle', (['vis', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(5)'], {}), '(vis, (x, y), (x + w, y + h), (0, 255, 0), 5)\n', (2545, 2590), False, 'import cv2\n'), ((1286, 1315), 'numpy.delete', 'np.delete', (['boxes', 'posIndex', '(0)'], {}), '(boxes, posIndex, 0)\n', (1295, 1315), True, 'import numpy as np\n')] |
import tensorflow as tf
import datetime
import numpy as np
import zutils.tf_math_funcs as tmf
from zutils.py_utils import *
from scipy.io import savemat
class OneEpochRunner:
def __init__(
self, data_module, output_list=None,
net_func=None, batch_axis=0, num_samples=None, disp_time_interval=2,
output_fn=None, is_large=False):
self.data_module = data_module
self.num_samples = self.data_module.num_samples()
self.batch_axis = batch_axis
self.disp_time_interval = disp_time_interval
self.output_fn = output_fn
self.is_large = is_large
if num_samples is not None:
if self.num_samples < num_samples:
print("specified number_samples is larger than one epoch")
else:
self.num_samples = num_samples
self.use_net_func = output_list is None # otherwise use net_func
if self.use_net_func:
assert net_func is not None, \
"output_list and net_func should not be both specified"
self.net_func = net_func
# remark: net_func(sess)
else:
assert net_func is None, \
"one of output_list and net_func must be specified"
self.output_list = output_list
[self.flatten_output_list, self.output_wrap_func] = \
recursive_flatten_with_wrap_func(
lambda x: tmf.is_tf_data(x), self.output_list)
self.data_module.reset()
self.cur_sample_end = 0
def run_single_batch(self, sess):
if self.cur_sample_end >= self.num_samples:
return None
if self.use_net_func:
output_val = self.net_func(sess)
else:
output_val = sess.run(self.flatten_output_list, {})
output_val = self.output_wrap_func(output_val)
batch_size = first_element_apply(
lambda x: isinstance(x, np.ndarray),
lambda x: x.shape[self.batch_axis], output_val)
self.batch_size = batch_size
new_end = self.cur_sample_end + batch_size
if new_end > self.num_samples:
effective_batch_size = \
batch_size - (new_end-self.num_samples)
slice_indexes = (slice(None),)*self.batch_axis + (slice(effective_batch_size),)
output_val = recursive_apply(
lambda x: isinstance(x, np.ndarray),
lambda x: x[slice_indexes], output_val)
self.cur_sample_end = new_end
return output_val
def run(self, sess):
disp_countdown = IfTimeout(self.disp_time_interval)
num_samples_total = self.num_samples
output_val_single = self.run_single_batch(sess)
output_val = []
while output_val_single is not None:
output_val += [output_val_single]
iter = self.data_module.iter()
if self.data_module.epoch() == 0:
num_samples_finished = self.data_module.num_samples_finished()
else:
num_samples_finished = self.num_samples
if disp_countdown.is_timeout():
epoch_percentage = num_samples_finished / num_samples_total * 100
print("%s] Iter %d (%4.1f%% = %d / %d)" %
(datetime.datetime.now().strftime('%Y-%m/%d-%H:%M:%S.%f'),
iter, epoch_percentage, num_samples_finished, num_samples_total))
disp_countdown = IfTimeout(self.disp_time_interval)
if self.is_large and (num_samples_finished % (100*self.batch_size) == 0 or num_samples_finished == self.num_samples):
output_val = recursive_apply(
lambda *args: isinstance(args[0], np.ndarray),
lambda *args: np.concatenate(args, axis=self.batch_axis),
*output_val)
self.dir_path = os.path.dirname(self.output_fn+'_'+'%06d'%num_samples_finished)
if not os.path.exists(self.dir_path):
os.makedirs(self.dir_path)
savemat(self.output_fn+'_'+'%06d'%num_samples_finished+'.mat',output_val)
print('Saving part of output to '+ self.output_fn+'_'+'%06d'%num_samples_finished+'.mat')
output_val = []
output_val_single = self.run_single_batch(sess)
if not self.is_large:
output_val = recursive_apply(
lambda *args: isinstance(args[0], np.ndarray),
lambda *args: np.concatenate(args, axis=self.batch_axis),
*output_val)
savemat(self.output_fn + ".mat", output_val)
print('Saving output to ' + self.output_fn + ".mat")
| [
"numpy.concatenate",
"datetime.datetime.now",
"scipy.io.savemat",
"zutils.tf_math_funcs.is_tf_data"
] | [((4642, 4686), 'scipy.io.savemat', 'savemat', (["(self.output_fn + '.mat')", 'output_val'], {}), "(self.output_fn + '.mat', output_val)\n", (4649, 4686), False, 'from scipy.io import savemat\n'), ((4118, 4204), 'scipy.io.savemat', 'savemat', (["(self.output_fn + '_' + '%06d' % num_samples_finished + '.mat')", 'output_val'], {}), "(self.output_fn + '_' + '%06d' % num_samples_finished + '.mat',\n output_val)\n", (4125, 4204), False, 'from scipy.io import savemat\n'), ((1455, 1472), 'zutils.tf_math_funcs.is_tf_data', 'tmf.is_tf_data', (['x'], {}), '(x)\n', (1469, 1472), True, 'import zutils.tf_math_funcs as tmf\n'), ((4557, 4599), 'numpy.concatenate', 'np.concatenate', (['args'], {'axis': 'self.batch_axis'}), '(args, axis=self.batch_axis)\n', (4571, 4599), True, 'import numpy as np\n'), ((3828, 3870), 'numpy.concatenate', 'np.concatenate', (['args'], {'axis': 'self.batch_axis'}), '(args, axis=self.batch_axis)\n', (3842, 3870), True, 'import numpy as np\n'), ((3322, 3345), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3343, 3345), False, 'import datetime\n')] |
#!/user/bin/env python
'''columnarStructureX.py
Inheritance class of ColumnarStructure
'''
__author__ = "<NAME>) Huang"
__maintainer__ = "Mars (Shih-Cheng) Huang"
__email__ = "<EMAIL>"
__version__ = "0.2.0"
__status__ = "Done"
import numpy as np
import sys
from mmtfPyspark.utils import ColumnarStructure
from sympy import Point3D
class ColumnarStructureX(ColumnarStructure):
'''Inheritance of class ColumnarStructure with additional functions
Attributes
----------
structure : mmtfStructure)
mmtf structure
firstModelOnly : bool
flag to use only the first model of the structure
'''
def __init__(self, structure, firstModelOnly = True):
ColumnarStructure.__init__(self, structure, firstModelOnly)
self.normalizedbFactors = None
self.clampedNormalizedbFactor = None
def get_normalized_b_factors(self):
'''Returns z-scores for B-factors (normalized B-factors).
Critical z-score values: Confidence level Tail Area z critical
90% 0.05 +- 1.645
95% 0.025 +- 1.96
99% 0.005 +- 2.576
'''
if self.normalizedbFactors is None:
self.get_entity_types()
self.bFactors = self.get_b_factors()
self.entityTypes = self.get_entity_types()
# Filter out DOD and HOH
stats = np.array([self.bFactors[i] for i in range(self.get_num_atoms())\
if self.entityTypes[i] is not 'WAT'])
# Define normalize function
normalize = lambda x: (x - stats.mean()) / stats.std()
if stats.std() != 0:
self.normalizedbFactors = [float(n) for n in normalize(self.bFactors)]
else:
self.normalizedbFactors = [sys.float_info.max] * len(self.bFactors)
return self.normalizedbFactors
def get_clamped_normalized_b_factors(self):
'''Returns a normalized B-factors that are clamped to the [-1,1] interval
using the method of Liu et at. B-factors are normalized and scaled the
90% Confidenceinterval of the B-factors to [-1,1]. Any value outside of
the 90% confidence interval is set to either -1 or 1, whichever is closer.
References
----------
- Liu et al. BMC Bioinformatics 2014, 15(Suppl 16):S3,
Use B-factor related features for accurate classification between
protein binding interfaces and crystal packing contacts
https://doi.org/10.1186/1471-2105-15-S16-S3
'''
if self.clampedNormalizedbFactor is None:
self.get_normalized_b_factors()
self.clampedNormalizedbFactor = self.normalizedbFactors.copy()
# Normalize and scale the 90% confidence interval of the B factor to [-1,1]
self.clampedNormalizedbFactor = self.clampedNormalizedbFactor / 1.645
# Set any value outside the 90% interval to either -1 or 1
self.clampedNormalizedbFactor[self.clampedNormalizedbFactor < -1.0] = -1.0
self.clampedNormalizedbFactor[self.clampedNormalizedbFactor > 1.0] = 1.0
return self.clampedNormalizedbFactor
def get_calpha_coordinates(self):
'''Get the coordinates for Calpha atoms
'''
self.get_calpha_atom_indices()
x = self.get_x_coords()
y = self.get_y_coords()
z = self.get_z_coords()
# TODO: Point3D extremely slow, only use if nessassary
#calpha_coords_list = [Point3D(x[i], y[i], z[i]) for i in self.caIndices]
calpha_coords_list = [np.array([x[i], y[i], z[i]]) for i in self.caIndices]
self.calpha_coords = np.array(calpha_coords_list)
return self.calpha_coords
def get_calpha_atom_indices(self):
'''Get the indices of Calpha atoms
'''
self.get_entity_types()
self.get_atom_names()
caIndices_list = [i for i in range(self.get_num_atoms()) \
if (self.atomNames[i] == "CA" \
and self.entityTypes[i] == "PRO")]
self.caIndices = np.array(caIndices_list)
return self.caIndices
| [
"numpy.array",
"mmtfPyspark.utils.ColumnarStructure.__init__"
] | [((696, 755), 'mmtfPyspark.utils.ColumnarStructure.__init__', 'ColumnarStructure.__init__', (['self', 'structure', 'firstModelOnly'], {}), '(self, structure, firstModelOnly)\n', (722, 755), False, 'from mmtfPyspark.utils import ColumnarStructure\n'), ((3669, 3697), 'numpy.array', 'np.array', (['calpha_coords_list'], {}), '(calpha_coords_list)\n', (3677, 3697), True, 'import numpy as np\n'), ((4105, 4129), 'numpy.array', 'np.array', (['caIndices_list'], {}), '(caIndices_list)\n', (4113, 4129), True, 'import numpy as np\n'), ((3586, 3614), 'numpy.array', 'np.array', (['[x[i], y[i], z[i]]'], {}), '([x[i], y[i], z[i]])\n', (3594, 3614), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.