code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os
import numpy as np
import matplotlib.pyplot as plt
import pyvips as Vips
NP_DTYPE_TO_VIPS_FORMAT = {
np.dtype('int8'): Vips.BandFormat.CHAR,
np.dtype('uint8'): Vips.BandFormat.UCHAR,
np.dtype('int16'): Vips.BandFormat.SHORT,
np.dtype('uint16'): Vips.BandFormat.USHORT,
np.dtype('int32'): Vips.BandFormat.INT,
np.dtype('float32'): Vips.BandFormat.FLOAT,
np.dtype('float64'): Vips.BandFormat.DOUBLE
}
VIPS_FORMAT_TO_NP_DTYPE = {v:k for k, v in NP_DTYPE_TO_VIPS_FORMAT.items()}
def array_vips(vips_image, verbose=False):
# dtype = np.dtype('u{}'.format(vips_image.BandFmt.bit_length() + 1))
dtype = VIPS_FORMAT_TO_NP_DTYPE[vips_image.format]
if verbose:
print(dtype, vips_image.height, vips_image.width, vips_image.bands)
return (np.fromstring(vips_image.write_to_memory(), dtype=dtype) #np.uint8)
.reshape(vips_image.height, vips_image.width, vips_image.bands))
def show_vips(vips_image, ax=plt, show=True, verbose=False):
if not isinstance(vips_image, Vips.Image):
return -1
im_np = array_vips(vips_image)
if verbose:
print(im_np.shape)
if vips_image.bands == 1:
ax.imshow(im_np.squeeze()/np.max(im_np), cmap=plt.get_cmap('gist_ncar'))
elif vips_image.bands == 2:
im_np = im_np[:,:,1]
ax.imshow(im_np/np.max(im_np), cmap=plt.get_cmap('gray'))
else:
ax.imshow(im_np)
if show:
plt.show()
def image_fields_dict(im_with_fields):
return {k:im_with_fields.get(k)
for k in im_with_fields.get_fields()
if im_with_fields.get_typeof(k)}
# from https://github.com/jcupitt/libvips/blob/master/doc/Examples.md
NP_DTYPE_TO_VIPS_FORMAT = {
np.dtype('int8'): Vips.BandFormat.CHAR,
np.dtype('uint8'): Vips.BandFormat.UCHAR,
np.dtype('int16'): Vips.BandFormat.SHORT,
np.dtype('uint16'): Vips.BandFormat.USHORT,
np.dtype('int32'): Vips.BandFormat.INT,
np.dtype('float32'): Vips.BandFormat.FLOAT,
np.dtype('float64'): Vips.BandFormat.DOUBLE
}
VIPS_FORMAT_TO_NP_DTYPE = {v:k for k, v in NP_DTYPE_TO_VIPS_FORMAT.items()}
def array_vips(vips_image, verbose=False):
# dtype = np.dtype('u{}'.format(vips_image.BandFmt.bit_length() + 1))
dtype = VIPS_FORMAT_TO_NP_DTYPE[vips_image.format]
if verbose:
print(dtype, vips_image.height, vips_image.width, vips_image.bands)
return (np.fromstring(vips_image.write_to_memory(), dtype=dtype) #np.uint8)
.reshape(vips_image.height, vips_image.width, vips_image.bands)).squeeze()
def show_vips(vips_image, ax=plt, show=True, verbose=False):
if not isinstance(vips_image, Vips.Image):
return -1
im_np = array_vips(vips_image)
if verbose:
print(im_np.shape)
if vips_image.bands == 1:
ax.imshow(im_np/np.max(im_np), cmap=plt.get_cmap('gist_ncar'))
elif vips_image.bands == 2:
im_np = im_np[:,:,1]
ax.imshow(im_np/np.max(im_np), cmap=plt.get_cmap('gray'))
else:
ax.imshow(im_np)
if show:
plt.show()
def image_fields_dict(im_with_fields):
return {k:im_with_fields.get(k)
for k in im_with_fields.get_fields()
if im_with_fields.get_typeof(k)}
def save_and_tile(image_to_segment, output_dir, tile_size=1536):
basename = os.path.basename(image_to_segment.filename)
base_dir_name = os.path.join(output_dir, basename.split('.svs')[0])
if not os.path.exists(base_dir_name):
os.makedirs(base_dir_name)
Vips.Image.dzsave(image_to_segment, base_dir_name,
layout='google',
suffix='.jpg[Q=90]',
tile_size=tile_size,
depth='one',
properties=True)
return None | [
"os.path.exists",
"matplotlib.pyplot.get_cmap",
"os.makedirs",
"numpy.max",
"os.path.basename",
"pyvips.Image.dzsave",
"numpy.dtype",
"matplotlib.pyplot.show"
] | [((121, 137), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (129, 137), True, 'import numpy as np\n'), ((169, 186), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (177, 186), True, 'import numpy as np\n'), ((219, 236), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (227, 236), True, 'import numpy as np\n'), ((269, 287), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (277, 287), True, 'import numpy as np\n'), ((321, 338), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (329, 338), True, 'import numpy as np\n'), ((369, 388), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (377, 388), True, 'import numpy as np\n'), ((421, 440), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (429, 440), True, 'import numpy as np\n'), ((1769, 1785), 'numpy.dtype', 'np.dtype', (['"""int8"""'], {}), "('int8')\n", (1777, 1785), True, 'import numpy as np\n'), ((1817, 1834), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (1825, 1834), True, 'import numpy as np\n'), ((1867, 1884), 'numpy.dtype', 'np.dtype', (['"""int16"""'], {}), "('int16')\n", (1875, 1884), True, 'import numpy as np\n'), ((1917, 1935), 'numpy.dtype', 'np.dtype', (['"""uint16"""'], {}), "('uint16')\n", (1925, 1935), True, 'import numpy as np\n'), ((1969, 1986), 'numpy.dtype', 'np.dtype', (['"""int32"""'], {}), "('int32')\n", (1977, 1986), True, 'import numpy as np\n'), ((2017, 2036), 'numpy.dtype', 'np.dtype', (['"""float32"""'], {}), "('float32')\n", (2025, 2036), True, 'import numpy as np\n'), ((2069, 2088), 'numpy.dtype', 'np.dtype', (['"""float64"""'], {}), "('float64')\n", (2077, 2088), True, 'import numpy as np\n'), ((3391, 3434), 'os.path.basename', 'os.path.basename', (['image_to_segment.filename'], {}), '(image_to_segment.filename)\n', (3407, 3434), False, 'import os\n'), ((3588, 3732), 'pyvips.Image.dzsave', 'Vips.Image.dzsave', (['image_to_segment', 'base_dir_name'], {'layout': '"""google"""', 'suffix': '""".jpg[Q=90]"""', 'tile_size': 'tile_size', 'depth': '"""one"""', 'properties': '(True)'}), "(image_to_segment, base_dir_name, layout='google', suffix=\n '.jpg[Q=90]', tile_size=tile_size, depth='one', properties=True)\n", (3605, 3732), True, 'import pyvips as Vips\n'), ((1475, 1485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1483, 1485), True, 'import matplotlib.pyplot as plt\n'), ((3123, 3133), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3131, 3133), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3547), 'os.path.exists', 'os.path.exists', (['base_dir_name'], {}), '(base_dir_name)\n', (3532, 3547), False, 'import os\n'), ((3557, 3583), 'os.makedirs', 'os.makedirs', (['base_dir_name'], {}), '(base_dir_name)\n', (3568, 3583), False, 'import os\n'), ((1245, 1258), 'numpy.max', 'np.max', (['im_np'], {}), '(im_np)\n', (1251, 1258), True, 'import numpy as np\n'), ((1265, 1290), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_ncar"""'], {}), "('gist_ncar')\n", (1277, 1290), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2906), 'numpy.max', 'np.max', (['im_np'], {}), '(im_np)\n', (2899, 2906), True, 'import numpy as np\n'), ((2913, 2938), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_ncar"""'], {}), "('gist_ncar')\n", (2925, 2938), True, 'import matplotlib.pyplot as plt\n'), ((1377, 1390), 'numpy.max', 'np.max', (['im_np'], {}), '(im_np)\n', (1383, 1390), True, 'import numpy as np\n'), ((1397, 1417), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (1409, 1417), True, 'import matplotlib.pyplot as plt\n'), ((3025, 3038), 'numpy.max', 'np.max', (['im_np'], {}), '(im_np)\n', (3031, 3038), True, 'import numpy as np\n'), ((3045, 3065), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (3057, 3065), True, 'import matplotlib.pyplot as plt\n')] |
"""
Make a compund path -- in this case two simple polygons, a rectangle
and a triangle. Use CLOSEOPOLY and MOVETO for the different parts of
the compound path
"""
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.pyplot as plt
vertices = []
codes = []
codes = [Path.MOVETO] + [Path.LINETO]*3 + [Path.CLOSEPOLY]
vertices = [(1,1), (1,2), (2, 2), (2, 1), (0,0)]
codes += [Path.MOVETO] + [Path.LINETO]*2 + [Path.CLOSEPOLY]
vertices += [(4,4), (5,5), (5, 4), (0,0)]
vertices = np.array(vertices, float)
path = Path(vertices, codes)
pathpatch = PathPatch(path, facecolor='None', edgecolor='green')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.add_patch(pathpatch)
ax.set_title('A compound path')
ax.dataLim.update_from_data_xy(vertices)
ax.autoscale_view()
plt.show()
| [
"matplotlib.path.Path",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.patches.PathPatch",
"matplotlib.pyplot.show"
] | [((541, 566), 'numpy.array', 'np.array', (['vertices', 'float'], {}), '(vertices, float)\n', (549, 566), True, 'import numpy as np\n'), ((574, 595), 'matplotlib.path.Path', 'Path', (['vertices', 'codes'], {}), '(vertices, codes)\n', (578, 595), False, 'from matplotlib.path import Path\n'), ((609, 661), 'matplotlib.patches.PathPatch', 'PathPatch', (['path'], {'facecolor': '"""None"""', 'edgecolor': '"""green"""'}), "(path, facecolor='None', edgecolor='green')\n", (618, 661), False, 'from matplotlib.patches import PathPatch\n'), ((669, 681), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (679, 681), True, 'import matplotlib.pyplot as plt\n'), ((828, 838), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (836, 838), True, 'import matplotlib.pyplot as plt\n')] |
"""
Results generated
0: Number of hospitals
1: Mean time to thrombolysis
2: Max time to thrombolysis
3: Mean time to thrombectomy
4: Maximum time to thrombectomy
5: Minimum thrombolysis admissions to any one hospital
6: Maximum thrombolysis admissions to any one hospital
7: Minimum thrombectomy admissions to any one hospital
8: Maximum thrombectomy admissions to any one hospital
9: Proportion patients within target thrombolysis time
10: Proportion patients attending unit with target first admissions
11: Proportion patients meeting both thrombolysis targets
12: Proportion patients within target thrombectomy time
13: Proportion patients attending unit with target thrombectomy
14: Proportion patients meeting targets both thrombectomy targets
15: Proportion patients meeting all thrombolysis + thrombectomy targets
16: 95th percentile time for thrombolysis
17: 95th percentile time for thrombectomy
18: Total transfers
19: Total transfer time
20: Clinical outcome (good outcomes) with no treatment
21: Clinical outcome (good outcomes) with treatment
22: Additional good outcomes per 1000 admissions
23: Median time to thrombolysis
24: Median time to thrombectomy
25: Minimum clinical outcome
26: 5th percentile clinical outcome
27: 95th percentile clinical outcome
28: Maximum clinical outcome
"""
# Import general modules
import datetime
import time
import numpy as np
import pandas as pd
# Import class modules
from classes.data import Data
from classes.population import Pop
from classes.score import Score_population
from classes.score_with_diagnostic import Score_population_with_diagnostic
from classes.pareto import Pareto
# from classes.pareto import Pareto
class Model():
"""
Main algorithm code.
1) Initialise algorithm object and load data
"""
def __init__(self):
"""
Set up algorithm environment.
Load:
Global variables
Underlying data for algorithm:
List of hospitals
Patients by LSOA
Travel matrix from all LSOA to all hospitals
"""
# Set up class instances (one instance per class)
self.data = Data()
self.pop = Pop()
self.time_of_last_save = 0
return
def initialise_population(self):
"""
This method creates a starting population.
This may consist of:
a) a random population,
b) a loaded population,
c) both
"""
self.pop.population = []
# Generate random population if required
if self.data.initial_random_population_size > 0:
self.pop.population = self.pop.create_random_population(
self.data.initial_random_population_size,
self.data.hospitals,
self.data.fix_hospital_status)
# Process loaded population in required
if len(self.data.loaded_population) > 0:
# Combine randome and loaded populatiosn if required
if len(self.pop.population) > 0:
self.pop.population = np.vstack((self.data.loaded_population,
self.pop.population))
else:
self.pop.population = self.data.loaded_population
# Fix hospital status (if required)
if self.data.fix_hospital_status:
self.pop.population = self.pop.fix_hospital_status(
self.data.hospitals, self.pop.population)
# Remove non-unique rows
self.pop.population = np.unique(self.pop.population, axis=0)
# Remove any rows with no hospitals
check_hospitals = np.sum(self.pop.population, axis=1) > 0
self.pop.population = self.pop.population[check_hospitals, :]
return
def run_algorithm(self):
# Create initial population
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'Loading coffee and biscuits')
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'Starting generations')
# Initialise peopulation
self.initialise_population()
# Score first popultion
# Score_population_with_diagnostic
if self.data.use_diagnostic:
self.score = Score_population_with_diagnostic(
self.data, self.pop.population)
else:
self.score = Score_population(self.data, self.pop.population)
# Get pareto front of first population (if required)
if self.data.apply_pareto_to_starting_population:
self.pareto_front = Pareto(self.score,
self.data.pareto_scores_used,
self.data.minimum_population_size,
self.data.maximum_population_size,
self.pop)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'Generation: Start, Patients in target distance/admissions: '
'%1.4f, Benefit: %2.1f, Population size %5.0f'
% (np.max(self.score.results[:, 11]), np.max(
self.score.results[:, 22]), self.pop.population.shape[0]))
for generation in range(self.data.maximum_generations):
# Add new random population
new_population_members_required = (
int(self.pop.population.shape[0] *
self.data.proportion_new_random_each_generation) + 1)
new_population = self.pop.create_random_population(
new_population_members_required,
self.data.hospitals,
self.data.fix_hospital_status)
# Combine populations before breeding
self.pop.population = np.vstack((self.pop.population,
new_population))
# Get new children
child_population = (self.pop.generate_child_population(
self.data.max_crossover_points,
self.data.mutation_rate,
self.data.hospitals,
self.data.fix_hospital_status))
# Combine populations
self.pop.population = np.vstack((self.pop.population,
child_population))
# Remove scenarios with no hospitals
check_hospitals = np.sum(self.pop.population, axis=1) > 0
self.pop.population = self.pop.population[check_hospitals, :]
# Remove non-unique rows
self.pop.population = np.unique(self.pop.population, axis=0)
# Score popultion
if self.data.use_diagnostic:
self.score = Score_population_with_diagnostic(
self.data, self.pop.population)
else:
self.score = Score_population(self.data, self.pop.population)
# Get parteo front (updates population and scores)
self.pareto_front = Pareto(self.score,
self.data.pareto_scores_used,
self.data.minimum_population_size,
self.data.maximum_population_size,
self.pop)
# save latest results if more than 120 min since last save
if time.time() - self.time_of_last_save > (15*60):
self.time_of_last_save = time.time()
self.save_results()
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'Generation: %5.0f, Patients in target distance/admissions: '
'%1.4f, Benefit: %2.1f, Population size %5.0f' % (
generation + 1, np.max(self.score.results[:, 11]), np.max(
self.score.results[:, 22]),
self.pop.population.shape[0]))
self.save_results()
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), 'End\n')
return
def save_results(self):
path = self.data.output_location + '/'
# Save results
# Column headings for results
cols = ['# hosp', 'mean_time_IVT', 'max_time_IVT', 'mean_time_ET',
'max_time_ET', 'min_IVT_adm', 'max_IVT_adm', 'min_ET_adm',
'max_ET_adm', 'prop_IVT_time', 'prop_IVT_adm', 'prop_both_IVT',
'prop_ET_time', 'prop_ET_adm', 'prop_both_ET',
'prop_all_targets', '95_cent_IVT', '95_cent_ET', 'transfers',
'transfer_time', 'good_no_treat', 'good_treat',
'add_good_per_1000', 'median_time_IVT', 'median_time_ET',
'min_add_outcome', '5_cent_add_outcome',
'95_cent_add_outcome', 'max_add_outcome']
results_df = pd.DataFrame(self.score.results, columns=cols)
results_df.index.name = 'scenario'
results_df.to_csv(path+'results.csv')
# save population
cols = list(self.data.hospitals['Hospital_name'])
population_df = pd.DataFrame(self.pop.population, columns=cols)
population_df.index.name = 'scenario'
population_df.to_csv(path+'population.csv')
first_admissions_df = pd.DataFrame(
self.score.hospital_first_admissions, columns=cols)
first_admissions_df.index.name = 'scenario'
first_admissions_df.to_csv(path+'first_hospital_admissions.csv')
hospital_thrombectomy_admissions_df = pd.DataFrame(
self.score.hospital_thrombectomy_admissions, columns=cols)
hospital_thrombectomy_admissions_df.index.name = 'scenario'
hospital_thrombectomy_admissions_df.to_csv(
path+'thrombectomy_admissions.csv')
return
if __name__ == '__main__':
# Initailise mode;
model = Model()
model.run_algorithm()
| [
"numpy.unique",
"pandas.DataFrame",
"numpy.max",
"classes.score.Score_population",
"numpy.sum",
"datetime.datetime.now",
"numpy.vstack",
"classes.pareto.Pareto",
"classes.data.Data",
"classes.score_with_diagnostic.Score_population_with_diagnostic",
"time.time",
"classes.population.Pop"
] | [((2174, 2180), 'classes.data.Data', 'Data', ([], {}), '()\n', (2178, 2180), False, 'from classes.data import Data\n'), ((2200, 2205), 'classes.population.Pop', 'Pop', ([], {}), '()\n', (2203, 2205), False, 'from classes.population import Pop\n'), ((8850, 8896), 'pandas.DataFrame', 'pd.DataFrame', (['self.score.results'], {'columns': 'cols'}), '(self.score.results, columns=cols)\n', (8862, 8896), True, 'import pandas as pd\n'), ((9095, 9142), 'pandas.DataFrame', 'pd.DataFrame', (['self.pop.population'], {'columns': 'cols'}), '(self.pop.population, columns=cols)\n', (9107, 9142), True, 'import pandas as pd\n'), ((9272, 9336), 'pandas.DataFrame', 'pd.DataFrame', (['self.score.hospital_first_admissions'], {'columns': 'cols'}), '(self.score.hospital_first_admissions, columns=cols)\n', (9284, 9336), True, 'import pandas as pd\n'), ((9523, 9594), 'pandas.DataFrame', 'pd.DataFrame', (['self.score.hospital_thrombectomy_admissions'], {'columns': 'cols'}), '(self.score.hospital_thrombectomy_admissions, columns=cols)\n', (9535, 9594), True, 'import pandas as pd\n'), ((3574, 3612), 'numpy.unique', 'np.unique', (['self.pop.population'], {'axis': '(0)'}), '(self.pop.population, axis=0)\n', (3583, 3612), True, 'import numpy as np\n'), ((3684, 3719), 'numpy.sum', 'np.sum', (['self.pop.population'], {'axis': '(1)'}), '(self.pop.population, axis=1)\n', (3690, 3719), True, 'import numpy as np\n'), ((4302, 4366), 'classes.score_with_diagnostic.Score_population_with_diagnostic', 'Score_population_with_diagnostic', (['self.data', 'self.pop.population'], {}), '(self.data, self.pop.population)\n', (4334, 4366), False, 'from classes.score_with_diagnostic import Score_population_with_diagnostic\n'), ((4423, 4471), 'classes.score.Score_population', 'Score_population', (['self.data', 'self.pop.population'], {}), '(self.data, self.pop.population)\n', (4439, 4471), False, 'from classes.score import Score_population\n'), ((4624, 4757), 'classes.pareto.Pareto', 'Pareto', (['self.score', 'self.data.pareto_scores_used', 'self.data.minimum_population_size', 'self.data.maximum_population_size', 'self.pop'], {}), '(self.score, self.data.pareto_scores_used, self.data.\n minimum_population_size, self.data.maximum_population_size, self.pop)\n', (4630, 4757), False, 'from classes.pareto import Pareto\n'), ((5811, 5859), 'numpy.vstack', 'np.vstack', (['(self.pop.population, new_population)'], {}), '((self.pop.population, new_population))\n', (5820, 5859), True, 'import numpy as np\n'), ((6248, 6298), 'numpy.vstack', 'np.vstack', (['(self.pop.population, child_population)'], {}), '((self.pop.population, child_population))\n', (6257, 6298), True, 'import numpy as np\n'), ((6610, 6648), 'numpy.unique', 'np.unique', (['self.pop.population'], {'axis': '(0)'}), '(self.pop.population, axis=0)\n', (6619, 6648), True, 'import numpy as np\n'), ((7028, 7161), 'classes.pareto.Pareto', 'Pareto', (['self.score', 'self.data.pareto_scores_used', 'self.data.minimum_population_size', 'self.data.maximum_population_size', 'self.pop'], {}), '(self.score, self.data.pareto_scores_used, self.data.\n minimum_population_size, self.data.maximum_population_size, self.pop)\n', (7034, 7161), False, 'from classes.pareto import Pareto\n'), ((3082, 3143), 'numpy.vstack', 'np.vstack', (['(self.data.loaded_population, self.pop.population)'], {}), '((self.data.loaded_population, self.pop.population))\n', (3091, 3143), True, 'import numpy as np\n'), ((6424, 6459), 'numpy.sum', 'np.sum', (['self.pop.population'], {'axis': '(1)'}), '(self.pop.population, axis=1)\n', (6430, 6459), True, 'import numpy as np\n'), ((6750, 6814), 'classes.score_with_diagnostic.Score_population_with_diagnostic', 'Score_population_with_diagnostic', (['self.data', 'self.pop.population'], {}), '(self.data, self.pop.population)\n', (6782, 6814), False, 'from classes.score_with_diagnostic import Score_population_with_diagnostic\n'), ((6883, 6931), 'classes.score.Score_population', 'Score_population', (['self.data', 'self.pop.population'], {}), '(self.data, self.pop.population)\n', (6899, 6931), False, 'from classes.score import Score_population\n'), ((7489, 7500), 'time.time', 'time.time', ([], {}), '()\n', (7498, 7500), False, 'import time\n'), ((3890, 3913), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3911, 3913), False, 'import datetime\n'), ((4001, 4024), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4022, 4024), False, 'import datetime\n'), ((4924, 4947), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4945, 4947), False, 'import datetime\n'), ((5130, 5163), 'numpy.max', 'np.max', (['self.score.results[:, 11]'], {}), '(self.score.results[:, 11])\n', (5136, 5163), True, 'import numpy as np\n'), ((5165, 5198), 'numpy.max', 'np.max', (['self.score.results[:, 22]'], {}), '(self.score.results[:, 22])\n', (5171, 5198), True, 'import numpy as np\n'), ((7400, 7411), 'time.time', 'time.time', ([], {}), '()\n', (7409, 7411), False, 'import time\n'), ((7988, 8011), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8009, 8011), False, 'import datetime\n'), ((7556, 7579), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7577, 7579), False, 'import datetime\n'), ((7795, 7828), 'numpy.max', 'np.max', (['self.score.results[:, 11]'], {}), '(self.score.results[:, 11])\n', (7801, 7828), True, 'import numpy as np\n'), ((7830, 7863), 'numpy.max', 'np.max', (['self.score.results[:, 22]'], {}), '(self.score.results[:, 22])\n', (7836, 7863), True, 'import numpy as np\n')] |
"""-----------------------------------------------------------------------------
sample.py (Last Updated: 01/26/2021)
The purpose of this script is to actually to run the sample project.
Specifically, it will initiate a call to file watcher that searches for incoming
dicom files, do some sort of analysis based on the dicom file that's been received,
and then output the answer.
The purpose of this *particular* script is to demonstrated how you can use the
various scripts, functions, etc. we have developed for your use! The functions
we will reference are in 'rt-cloud/rtCommon/'.
Finally, this script is called from the projectInterface which has a web interface
and accepts commands to 'start' or 'stop' a run. When the 'start' button is
pressed it will run this scirpt passing in whatever conifgurations have been
set in the web page as a configuration file. Note that projectInterface is
started from the script 'run-projectInterface.sh'.
-----------------------------------------------------------------------------"""
verbose = False
useInitWatch = True
if verbose:
# print a short introduction on the internet window
print(""
"-----------------------------------------------------------------------------\n"
"The purpose of this sample project is to demonstrate different ways you can\n"
"implement functions, structures, etc. that we have developed for your use.\n"
"You will find some comments printed on this html browser. However, if you want\n"
"more information about how things work please take a look at ‘sample.py’.\n"
"Good luck!\n"
"-----------------------------------------------------------------------------")
# import important modules
import os
import sys
import argparse
import warnings
import numpy as np
import nibabel as nib
import scipy.io as sio
if verbose:
print(''
'|||||||||||||||||||||||||||| IGNORE THIS WARNING ||||||||||||||||||||||||||||')
with warnings.catch_warnings():
if not verbose:
warnings.filterwarnings("ignore", category=UserWarning)
from nibabel.nicom import dicomreaders
if verbose:
print(''
'|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||')
# obtain full path for current directory: '.../rt-cloud/projects/sample'
currPath = os.path.dirname(os.path.realpath(__file__))
# obtain full path for root directory: '.../rt-cloud'
rootPath = os.path.dirname(os.path.dirname(currPath))
# add the path for the root directory to your python path so that you can import
# project modules from rt-cloud
sys.path.append(rootPath)
# import project modules from rt-cloud
from rtCommon.utils import loadConfigFile, stringPartialFormat
from rtCommon.clientInterface import ClientInterface
from rtCommon.imageHandling import readRetryDicomFromDataInterface, convertDicomImgToNifti
from rtCommon.dataInterface import DataInterface #added by QL
# obtain the full path for the configuration toml file
defaultConfig = os.path.join(currPath, 'conf/sample.toml')
def doRuns(cfg, dataInterface, subjInterface, webInterface):
"""
This function is called by 'main()' below. Here, we use the 'dataInterface'
to read in dicoms (presumably from the scanner, but here it's from a folder
with previously collected dicom files), doing some sort of analysis in the
cloud, and then sending the info to the web browser.
INPUT:
[1] cfg - configuration file with important variables)
[2] dataInterface - this will allow this script runnin in the cloud to access
files from the stimulus computer, which receives dicom files directly
from the MRI Scanner console
[3] subjInterface - this allows sending feedback (e.g. classification results)
to a subjectService running on the presentation computer to provide
feedback to the subject (and optionally get their response).
[4] webInterface - this allows updating information on the experimenter webpage.
For example to plot data points, or update status messages.
OUTPUT:
None.
"""
# variables we'll use throughout
scanNum = cfg.scanNum[0]
runNum = cfg.runNum[0]
print(f"Doing run {runNum}, scan {scanNum}")
"""
Before we get ahead of ourselves, we need to make sure that the necessary file
types are allowed (meaning, we are able to read them in)... in this example,
at the very least we need to have access to dicom and txt file types.
use the function 'allowedFileTypes' in 'fileClient.py' to check this!
If allowedTypes doesn't include the file types we need to use then the
file service (scannerDataService) running at the control room computer will
need to be restarted with the correct list of allowed types provided.
INPUT: None
OUTPUT:
[1] allowedFileTypes (list of allowed file types)
"""
allowedFileTypes = dataInterface.getAllowedFileTypes()
if verbose:
print(""
"-----------------------------------------------------------------------------\n"
"Before continuing, we need to make sure that dicoms are allowed. To verify\n"
"this, use the 'allowedFileTypes'.\n"
"Allowed file types: %s" %allowedFileTypes)
# obtain the path for the directory where the subject's dicoms live
if cfg.isSynthetic:
cfg.dicomDir = cfg.imgDir
else:
subj_imgDir = "{}.{}.{}".format(cfg.datestr, cfg.subjectName, cfg.subjectName)
cfg.dicomDir = os.path.join(cfg.imgDir, subj_imgDir)
if verbose:
print("Location of the subject's dicoms: \n" + cfg.dicomDir + "\n"
"-----------------------------------------------------------------------------")
# If a dicomNamePattern is supplied in the config file, such as
# "001_{SCAN:06d}_{TR:06d}.dcm", then call stringPartialFormat() to
# set the SCAN number for the series of Dicoms we will be streaming.
dicomScanNamePattern = stringPartialFormat(cfg.dicomNamePattern, 'SCAN', scanNum)
"""
There are several ways to receive Dicom data from the control room computer:
1. Using `initWatch()` and 'watchFile()` commands of dataInterface or the
helper function `readRetryDicomFromDataInterface()` which calls watchFile()
internally.
2. Using the streaming functions with `initScannerStream()` and `getImageData(stream)`
which are also part of the dataInterface.
"""
if useInitWatch is True:
"""
Initialize a watch for the entire dicom folder using the function 'initWatch'
of the dataInterface. (Later we will use watchFile() to look for a specific dicom)
INPUT:
[1] cfg.dicomDir (where the subject's dicom files live)
[2] cfg.dicomNamePattern (the naming pattern of dicom files)
[3] cfg.minExpectedDicomSize (a check on size to make sure we don't
accidentally grab a dicom before it's fully acquired)
"""
if verbose:
print("• initalize a watch for the dicoms using 'initWatch'")
dataInterface.initWatch(cfg.dicomDir, dicomScanNamePattern, cfg.minExpectedDicomSize)
else: # use Stream functions
"""
Initialize a Dicom stream by indicating the directory and dicom file pattern that
will be streamed.
INPUTs to initScannerStream():
[1] cfg.dicomDir (where the subject's dicom files live)
[2] dicomScanNamePattern (the naming pattern of dicom files)
[3] cfg.minExpectedDicomSize (a check on size to make sure we don't
accidentally grab a dicom before it's fully acquired)
"""
streamId = dataInterface.initScannerStream(cfg.dicomDir,
dicomScanNamePattern,
cfg.minExpectedDicomSize)
"""
We will use the function plotDataPoint in webInterface whenever we
want to send values to the web browser so that they can be plotted in the
--Data Plots-- tab.
However at the start of a run we will want to clear the plot, and we can use
clearRunPlot(runId), or clearAllPlots() also in the webInterface object.
"""
if verbose:
print("• clear any pre-existing plot for this run using 'clearRunPlot(runNum)'")
webInterface.clearRunPlot(runNum)
if verbose:
print(""
"-----------------------------------------------------------------------------\n"
"In this sample project, we will retrieve the dicom file for a given TR and\n"
"then convert the dicom file to a nifti object. **IMPORTANT: In this sample\n"
"we won't care about the exact location of voxel data (we're only going to\n"
"indiscriminately get the average activation value for all voxels). This\n"
"actually isn't something you want to actually do but we'll go through the\n"
"to get the data in the appropriate nifti format in the advanced sample\n"
"project (amygActivation).** We are doing things in this way because it is the simplest way\n"
"we can highlight the functionality of rt-cloud, which is the purpose of\n"
"this sample project.\n"
".............................................................................\n"
"NOTE: We will use the function readRetryDicomFromDataInterface() to retrieve\n"
"specific dicom files from the subject's dicom folder. This function calls\n"
"'dataInterface.watchFile' to look for the next dicom from the scanner.\n"
"Since we're using previously collected dicom data, this functionality is\n"
"not particularly relevant for this sample project but it is very important\n"
"when running real-time experiments.\n"
"-----------------------------------------------------------------------------\n")
num_total_TRs = 10 # number of TRs to use for example 1
if cfg.isSynthetic:
num_total_TRs = cfg.numSynthetic
all_avg_activations = np.zeros((num_total_TRs, 1))
for this_TR in np.arange(num_total_TRs):
# declare variables that are needed to use in get data requests
timeout_file = 5 # small number because of demo, can increase for real-time
dicomFilename = dicomScanNamePattern.format(TR=this_TR)
if useInitWatch is True:
"""
Use 'readRetryDicomFromDataInterface' in 'imageHandling.py' to wait for dicom
files to be written by the scanner (uses 'watchFile' internally) and then
reading the dicom file once it is available.
INPUT:
[1] dataInterface (allows a cloud script to access files from the
control room computer)
[2] filename (the dicom file we're watching for and want to load)
[3] timeout (time spent waiting for a file before timing out)
OUTPUT:
[1] dicomData (with class 'pydicom.dataset.FileDataset')
"""
print(f'Processing TR {this_TR}')
if verbose:
print("• use 'readRetryDicomFromDataInterface' to read dicom file for",
"TR %d, %s" %(this_TR, dicomFilename))
dicomData = readRetryDicomFromDataInterface(dataInterface, dicomFilename,
timeout_file)
else: # use Stream functions
"""
Use dataInterface.getImageData(streamId) to query a stream, waiting for a
dicom file to be written by the scanner and then reading the dicom file
once it is available.
INPUT:
[1] dataInterface (allows a cloud script to access files from the
control room computer)
[2] streamId - from initScannerStream() called above
[3] TR number - the image volume number to retrieve
[3] timeout (time spent waiting for a file before timing out)
OUTPUT:
[1] dicomData (with class 'pydicom.dataset.FileDataset')
"""
print(f'Processing TR {this_TR}')
if verbose:
print("• use dataInterface.getImageData() to read dicom file for"
"TR %d, %s" %(this_TR, dicomFilename))
dicomData = dataInterface.getImageData(streamId, int(this_TR), timeout_file)
if dicomData is None:
print('Error: getImageData returned None')
return
dicomData.convert_pixel_data()
if cfg.isSynthetic:
niftiObject = convertDicomImgToNifti(dicomData)
else:
# use 'dicomreaders.mosaic_to_nii' to convert the dicom data into a nifti
# object. additional steps need to be taken to get the nifti object in
# the correct orientation, but we will ignore those steps here. refer to
# the advanced sample project (amygActivation) for more info about that
if verbose:
print("| convert dicom data into a nifti object")
niftiObject = dicomreaders.mosaic_to_nii(dicomData)
# take the average of all the activation values
avg_niftiData = np.mean(niftiObject.get_fdata())
# avg_niftiData = np.round(avg_niftiData,decimals=2)
print("| average activation value for TR %d is %f" %(this_TR, avg_niftiData))
max_niftiData = np.amax(niftiObject.get_fdata())
if verbose:
print("| max activation value for TR %d is %d" %(this_TR, max_niftiData))
"""
INPUT:
[1] projectComm (the communication pipe)
[2] runNum (not to be confused with the scan number)
[3] this_TR (timepoint of interest)
[4] value (value you want to send over to the web browser)
** the inputs MUST be python integers; it won't work if it's a numpy int
here, we are clearing an already existing plot
"""
# Now we will send the result to be used to provide feedback for the subject.
# Using subjInterface.setResult() will send the classification result to a
# remote subjectService that can use the value to update the display on
# the presentation computer.
if verbose:
print("| send result to the presentation computer for provide subject feedback")
subjInterface.setResult(runNum, int(this_TR), float(avg_niftiData))
# Finally we will use use webInterface.plotDataPoint() to send the result
# to the web browser to be plotted in the --Data Plots-- tab.
# Each run will have its own data plot, the x-axis will the the TR vol
# number and the y-axis will be the classification value (float).
# IMPORTANT ** the inputs MUST be python integers or python floats;
# it won't work if it's a numpy int or numpy float **
if verbose:
print("| send result to the web, plotted in the 'Data Plots' tab")
webInterface.plotDataPoint(runNum, int(this_TR), float(avg_niftiData))
# save the activations value info into a vector that can be saved later
all_avg_activations[this_TR] = avg_niftiData
# create the full path filename of where we want to save the activation values vector.
# we're going to save things as .txt and .mat files
output_textFilename = '/tmp/cloud_directory/tmp/avg_activations.txt'
output_matFilename = os.path.join('/tmp/cloud_directory/tmp/avg_activations.mat')
output_textFilename = '/gpfs/milgram/pi/turk-browne/projects/rt-cloud/projects/sample/dicomDir/20190219.0219191_faceMatching.0219191_faceMatching/avg_activations.txt'
output_matFilename = os.path.join('/gpfs/milgram/pi/turk-browne/projects/rt-cloud/projects/sample/dicomDir/20190219.0219191_faceMatching.0219191_faceMatching/avg_activations.mat')
# use 'putFile' from the dataInterface to save the .txt file
# INPUT:
# [1] filename (full path!)
# [2] data (that you want to write into the file)
if verbose:
print(""
"-----------------------------------------------------------------------------\n"
"• save activation value as a text file to tmp folder")
dataInterface.putFile(output_textFilename, str(all_avg_activations))
# use sio.save mat from scipy to save the matlab file
if verbose:
print("• save activation value as a matlab file to tmp folder")
sio.savemat(output_matFilename, {'value':all_avg_activations})
if verbose:
print(""
"-----------------------------------------------------------------------------\n"
"REAL-TIME EXPERIMENT COMPLETE!")
return
def main(argv=None):
global verbose, useInitWatch
"""
This is the main function that is called when you run 'sample.py'.
Here, you will load the configuration settings specified in the toml configuration
file, initiate the clientInterface for communication with the projectServer (via
its sub-interfaces: dataInterface, subjInterface, and webInterface). Ant then call
the function 'doRuns' to actually start doing the experiment.
"""
# Some generally recommended arguments to parse for all experiment scripts
argParser = argparse.ArgumentParser()
argParser.add_argument('--config', '-c', default=defaultConfig, type=str,
help='experiment config file (.json or .toml)')
argParser.add_argument('--runs', '-r', default=None, type=str,
help='Comma separated list of run numbers')
argParser.add_argument('--scans', '-s', default=None, type=str,
help='Comma separated list of scan number')
argParser.add_argument('--yesToPrompts', '-y', default=False, action='store_true',
help='automatically answer tyes to any prompts')
# Some additional parameters only used for this sample project
argParser.add_argument('--useInitWatch', '-w', default=False, action='store_true',
help='use initWatch() functions instead of stream functions')
argParser.add_argument('--noVerbose', '-nv', default=False, action='store_true',
help='print verbose output')
args = argParser.parse_args(argv)
useInitWatch = args.useInitWatch
verbose = not args.noVerbose
# load the experiment configuration file
cfg = loadConfigFile(args.config)
# override config file run and scan values if specified
if args.runs is not None:
print("runs: ", args.runs)
cfg.runNum = [int(x) for x in args.runs.split(',')]
if args.scans is not None:
print("scans: ", args.scans)
cfg.ScanNum = [int(x) for x in args.scans.split(',')]
# Initialize the RPC connection to the projectInterface.
# This will give us a dataInterface for retrieving files,
# a subjectInterface for giving feedback, and a webInterface
# for updating what is displayed on the experimenter's webpage.
clientInterfaces = ClientInterface(yesToPrompts=args.yesToPrompts)
#dataInterface = clientInterfaces.dataInterface
subjInterface = clientInterfaces.subjInterface
webInterface = clientInterfaces.webInterface
## Added by QL
allowedDirs = ['*'] #['/gpfs/milgram/pi/turk-browne/projects/rt-cloud/projects/sample/dicomDir/20190219.0219191_faceMatching.0219191_faceMatching','/gpfs/milgram/project/turk-browne/projects/rt-cloud/projects/sample', '/gpfs/milgram/project/turk-browne/projects/rt-cloud/projects/sample/dicomDir']
allowedFileTypes = ['*'] #['.txt', '.dcm']
dataInterface = DataInterface(dataRemote=False,allowedDirs=allowedDirs,allowedFileTypes=allowedFileTypes) # Create an instance of local datainterface
# Also try the placeholder for bidsInterface (an upcoming feature)
bidsInterface = clientInterfaces.bidsInterface
res = bidsInterface.echo("test")
print(res)
# obtain paths for important directories (e.g. location of dicom files)
if cfg.imgDir is None:
cfg.imgDir = os.path.join(currPath, 'dicomDir')
cfg.codeDir = currPath
# now that we have the necessary variables, call the function 'doRuns' in order
# to actually start reading dicoms and doing your analyses of interest!
# INPUT:
# [1] cfg (configuration file with important variables)
# [2] dataInterface (this will allow a script from the cloud to access files
# from the stimulus computer that receives dicoms from the Siemens
# console computer)
# [3] subjInterface - this allows sending feedback (e.g. classification results)
# to a subjectService running on the presentation computer to provide
# feedback to the subject (and optionally get their response).
# [4] webInterface - this allows updating information on the experimenter webpage.
# For example to plot data points, or update status messages.
doRuns(cfg, dataInterface, subjInterface, webInterface)
return 0
if __name__ == "__main__":
"""
If 'sample.py' is invoked as a program, then actually go through all of the portions
of this script. This statement is not satisfied if functions are called from another
script using "from sample.py import FUNCTION"
"""
main()
sys.exit(0)
| [
"nibabel.nicom.dicomreaders.mosaic_to_nii",
"scipy.io.savemat",
"rtCommon.imageHandling.convertDicomImgToNifti",
"argparse.ArgumentParser",
"rtCommon.imageHandling.readRetryDicomFromDataInterface",
"rtCommon.dataInterface.DataInterface",
"os.path.join",
"warnings.catch_warnings",
"os.path.realpath",... | [((2594, 2619), 'sys.path.append', 'sys.path.append', (['rootPath'], {}), '(rootPath)\n', (2609, 2619), False, 'import sys\n'), ((3000, 3042), 'os.path.join', 'os.path.join', (['currPath', '"""conf/sample.toml"""'], {}), "(currPath, 'conf/sample.toml')\n", (3012, 3042), False, 'import os\n'), ((1972, 1997), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1995, 1997), False, 'import warnings\n'), ((2342, 2368), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2358, 2368), False, 'import os\n'), ((2451, 2476), 'os.path.dirname', 'os.path.dirname', (['currPath'], {}), '(currPath)\n', (2466, 2476), False, 'import os\n'), ((6039, 6097), 'rtCommon.utils.stringPartialFormat', 'stringPartialFormat', (['cfg.dicomNamePattern', '"""SCAN"""', 'scanNum'], {}), "(cfg.dicomNamePattern, 'SCAN', scanNum)\n", (6058, 6097), False, 'from rtCommon.utils import loadConfigFile, stringPartialFormat\n'), ((10133, 10161), 'numpy.zeros', 'np.zeros', (['(num_total_TRs, 1)'], {}), '((num_total_TRs, 1))\n', (10141, 10161), True, 'import numpy as np\n'), ((10181, 10205), 'numpy.arange', 'np.arange', (['num_total_TRs'], {}), '(num_total_TRs)\n', (10190, 10205), True, 'import numpy as np\n'), ((15574, 15634), 'os.path.join', 'os.path.join', (['"""/tmp/cloud_directory/tmp/avg_activations.mat"""'], {}), "('/tmp/cloud_directory/tmp/avg_activations.mat')\n", (15586, 15634), False, 'import os\n'), ((15831, 15999), 'os.path.join', 'os.path.join', (['"""/gpfs/milgram/pi/turk-browne/projects/rt-cloud/projects/sample/dicomDir/20190219.0219191_faceMatching.0219191_faceMatching/avg_activations.mat"""'], {}), "(\n '/gpfs/milgram/pi/turk-browne/projects/rt-cloud/projects/sample/dicomDir/20190219.0219191_faceMatching.0219191_faceMatching/avg_activations.mat'\n )\n", (15843, 15999), False, 'import os\n'), ((16584, 16647), 'scipy.io.savemat', 'sio.savemat', (['output_matFilename', "{'value': all_avg_activations}"], {}), "(output_matFilename, {'value': all_avg_activations})\n", (16595, 16647), True, 'import scipy.io as sio\n'), ((17390, 17415), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17413, 17415), False, 'import argparse\n'), ((18560, 18587), 'rtCommon.utils.loadConfigFile', 'loadConfigFile', (['args.config'], {}), '(args.config)\n', (18574, 18587), False, 'from rtCommon.utils import loadConfigFile, stringPartialFormat\n'), ((19184, 19231), 'rtCommon.clientInterface.ClientInterface', 'ClientInterface', ([], {'yesToPrompts': 'args.yesToPrompts'}), '(yesToPrompts=args.yesToPrompts)\n', (19199, 19231), False, 'from rtCommon.clientInterface import ClientInterface\n'), ((19774, 19870), 'rtCommon.dataInterface.DataInterface', 'DataInterface', ([], {'dataRemote': '(False)', 'allowedDirs': 'allowedDirs', 'allowedFileTypes': 'allowedFileTypes'}), '(dataRemote=False, allowedDirs=allowedDirs, allowedFileTypes=\n allowedFileTypes)\n', (19787, 19870), False, 'from rtCommon.dataInterface import DataInterface\n'), ((21503, 21514), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (21511, 21514), False, 'import sys\n'), ((2027, 2082), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (2050, 2082), False, 'import warnings\n'), ((5576, 5613), 'os.path.join', 'os.path.join', (['cfg.imgDir', 'subj_imgDir'], {}), '(cfg.imgDir, subj_imgDir)\n', (5588, 5613), False, 'import os\n'), ((20208, 20242), 'os.path.join', 'os.path.join', (['currPath', '"""dicomDir"""'], {}), "(currPath, 'dicomDir')\n", (20220, 20242), False, 'import os\n'), ((11372, 11447), 'rtCommon.imageHandling.readRetryDicomFromDataInterface', 'readRetryDicomFromDataInterface', (['dataInterface', 'dicomFilename', 'timeout_file'], {}), '(dataInterface, dicomFilename, timeout_file)\n', (11403, 11447), False, 'from rtCommon.imageHandling import readRetryDicomFromDataInterface, convertDicomImgToNifti\n'), ((12703, 12736), 'rtCommon.imageHandling.convertDicomImgToNifti', 'convertDicomImgToNifti', (['dicomData'], {}), '(dicomData)\n', (12725, 12736), False, 'from rtCommon.imageHandling import readRetryDicomFromDataInterface, convertDicomImgToNifti\n'), ((13211, 13248), 'nibabel.nicom.dicomreaders.mosaic_to_nii', 'dicomreaders.mosaic_to_nii', (['dicomData'], {}), '(dicomData)\n', (13237, 13248), False, 'from nibabel.nicom import dicomreaders\n')] |
import numpy as np
from .base_classifier import BaseClassifier
from regex.handlers import CaptureHandler
from collections import defaultdict
class CaptureClassifier(BaseClassifier):
"""Class specialized in capturing information of interest. E.g Country of Birth
"""
def __init__(self, classifier_name="CaptureClassifier", regexes=None, data=None, labels=None, ids=None, capture_biases=None, handler=CaptureHandler(), negative_label="None"):
"""
Keyword Arguments:
classifier_name {str} -- Name of classifier (default: {"CaptureClassifier"})
regexes {dictionary} -- A dictionary of regex_name to a list of Regex objects (default: {None})
data {list} -- List of data (default: {None})
labels {list} -- List of labels (default: {None})
ids {list} -- List of ids (default: {None})
capture_biases {dictionary} -- A dictionary which maps the capture class name to a bias value (default: {None})
handler {handler} --A (default: {CaptureHandler})
negative_label {str} -- Default negative label (Used in classifications which don't fit into any class or as the negative class in single class classifciation) (default: {"None"})
"""
super().__init__(classifier_name=classifier_name, data=data, labels=labels, ids=ids)
self.regexes = regexes
self.capture_biases = {capture: capture_biases[capture] for capture in capture_biases} if capture_biases else {}
self.capture_biases.update({negative_label: 0})
self.regexes.update({negative_label: []})
self.negative_label = negative_label
self.handler = handler
def classify(self, class_to_scores, threshold=0):
"""Given a dictionary of class_score, returns a tuple containing the class with the highest score and its score
Arguments:
class_to_scores {dictionary} -- class to scores
Keyword Arguments:
threshold {int} -- threshold value used to determine whether to return negative_label or classified label (default: {0})
Returns:
label {String} -- Assigned label
score {int} -- Assigned score
"""
#If no captures were found, return negative_label
if len(class_to_scores) == 0:
return self.negative_label, 0
#Return max score
class_name, score = max(class_to_scores.items(), key=lambda i: i[1])
#Return negative label if less than threshold
if score > threshold:
return class_name, score
else:
return self.negative_label, 0
def run_classifier(self, sets=["train", "valid"], class_threshold=0, preprocess_func=None, label_func=None, pwds=None, classify_func=None, **kwargs):
"""Runs the trained classifier on the given datasets. Note this datasets must be loaded into self.dataset object first or
intialized in some other manner
Keyword Arguments:
sets {list} -- Datasets to run the classifier on (default: {["train", "valid"]})
class_threshold {int} -- threshold used to classify data as classified label or negative_label (default: {0})
preprocess_func {function} -- sentence preprocessing function (default: {None})
label_func {function} -- function used in labelling. Mainly used to convert 1 set of labels to another (default: {None})
pwds {dictionary} -- Personalized word dictionaries which contain dictionary_name -> list of words (default: {None})
"""
print("\nRunning Classifier:", self.name)
for data_set in sets:
assert data_set in self.dataset, "%s not in dataset" % data_set
print("Currently classifying {} with {} datapoints".format(data_set, len(self.dataset[data_set]["data"])))
preds = []
data = self.dataset[data_set]["data"]
self.dataset[data_set]["matches"] = []
self.dataset[data_set]["scores"] = []
for datum in data:
capture_scores = defaultdict(int)
captures = {}
class_matches = {}
#Forced to do this because self.regexes stored as {"class": [Regex Objects list]}
for class_name in self.regexes:
matches = {}
#Ask handler to get capture_scores, captures, and matches
if len(self.regexes[class_name]) > 0:
matches, captures, capture_scores = self.handler.score_data(datum, self.regexes[class_name],
pwds=pwds, preprocess_func=preprocess_func,
capture_convert=label_func)
#Storing matches in object
class_matches[class_name] = matches
#Adding biases
for bias in self.capture_biases:
capture_scores[bias] += self.capture_biases[bias]
self.dataset[data_set]["matches"].append(class_matches)
self.dataset[data_set]["scores"].append(capture_scores)
#getting prediction
if not classify_func:
preds.append(self.classify(capture_scores, threshold=class_threshold)[0])
else:
preds.append(classify_func(class_matches, captures, capture_scores, **kwargs))
preds = np.array(preds)
self.dataset[data_set]["preds"] = preds
| [
"numpy.array",
"regex.handlers.CaptureHandler",
"collections.defaultdict"
] | [((413, 429), 'regex.handlers.CaptureHandler', 'CaptureHandler', ([], {}), '()\n', (427, 429), False, 'from regex.handlers import CaptureHandler\n'), ((5639, 5654), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (5647, 5654), True, 'import numpy as np\n'), ((4126, 4142), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4137, 4142), False, 'from collections import defaultdict\n')] |
from rllab.misc import logger
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import os.path as osp
import numpy as np
import math
import random
def line_intersect(pt1, pt2, ptA, ptB):
"""
Taken from https://www.cs.hmc.edu/ACM/lectures/intersections.html
this returns the intersection of Line(pt1,pt2) and Line(ptA,ptB)
returns a tuple: (xi, yi, valid, r, s), where
(xi, yi) is the intersection
r is the scalar multiple such that (xi,yi) = pt1 + r*(pt2-pt1)
s is the scalar multiple such that (xi,yi) = pt1 + s*(ptB-ptA)
valid == 0 if there are 0 or inf. intersections (invalid)
valid == 1 if it has a unique intersection ON the segment
"""
DET_TOLERANCE = 0.00000001
# the first line is pt1 + r*(pt2-pt1)
# in component form:
x1, y1 = pt1
x2, y2 = pt2
dx1 = x2 - x1
dy1 = y2 - y1
# the second line is ptA + s*(ptB-ptA)
x, y = ptA
xB, yB = ptB
dx = xB - x
dy = yB - y
# we need to find the (typically unique) values of r and s
# that will satisfy
#
# (x1, y1) + r(dx1, dy1) = (x, y) + s(dx, dy)
#
# which is the same as
#
# [ dx1 -dx ][ r ] = [ x-x1 ]
# [ dy1 -dy ][ s ] = [ y-y1 ]
#
# whose solution is
#
# [ r ] = _1_ [ -dy dx ] [ x-x1 ]
# [ s ] = DET [ -dy1 dx1 ] [ y-y1 ]
#
# where DET = (-dx1 * dy + dy1 * dx)
#
# if DET is too small, they're parallel
#
DET = (-dx1 * dy + dy1 * dx)
if math.fabs(DET) < DET_TOLERANCE: return (0, 0, 0, 0, 0)
# now, the determinant should be OK
DETinv = 1.0 / DET
# find the scalar amount along the "self" segment
r = DETinv * (-dy * (x - x1) + dx * (y - y1))
# find the scalar amount along the input line
s = DETinv * (-dy1 * (x - x1) + dx1 * (y - y1))
# return the average of the two descriptions
xi = (x1 + r * dx1 + x + s * dx) / 2.0
yi = (y1 + r * dy1 + y + s * dy) / 2.0
return (xi, yi, 1, r, s)
def ray_segment_intersect(ray, segment):
"""
Check if the ray originated from (x, y) with direction theta intersects the line segment (x1, y1) -- (x2, y2),
and return the intersection point if there is one
"""
(x, y), theta = ray
# (x1, y1), (x2, y2) = segment
pt1 = (x, y)
len = 1
pt2 = (x + len * math.cos(theta), y + len * math.sin(theta))
xo, yo, valid, r, s = line_intersect(pt1, pt2, *segment)
if valid and r >= 0 and 0 <= s <= 1:
return (xo, yo)
return None
def point_distance(p1, p2):
x1, y1 = p1
x2, y2 = p2
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
def construct_maze(maze_id=0, length=1):
# define the maze to use
if maze_id == 0:
if length != 1:
raise NotImplementedError("Maze_id 0 only has length 1!")
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 'g', 1],
[1, 1, 1, 1, 1],
]
# structure = [
# [0, 0, 0, 0, 0],
# [0, 'r', 0, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 0, 'g', 0],
# [0, 0, 0, 0, 0],
# ]
# structure = [
# [1, 1, 1, 1, 1, 1, 1],
# [1, 0, 0, 0, 0, 'g', 1],
# [1, 0, 0, 0, 0, 0, 1],
# [1, 'r', 0, 0, 0, 0, 1],
# [1, 1, 1, 1, 1, 1, 1],
# ]
elif maze_id == 1: # donuts maze: can reach the single goal by 2 equal paths
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
M[c - 2][c // 2] = 'g'
structure = M
elif maze_id == 2: # spiral maze: need to use all the keys (only makes sense for length >=3)
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
# now block one of the ways and put the goal on the other side
M[1][c // 2 - 1] = 1
M[1][c // 2 - 2] = 'g'
structure = M
elif maze_id == 3: # corridor with goals at the 2 extremes
structure = [
[1] * (2 * length + 5),
[1, 'g'] + [0] * length + ['r'] + [0] * length + ['g', 1],
[1] * (2 * length + 5),
]
elif 4 <= maze_id <= 7: # cross corridor, goal in
c = 2 * length + 5
M = np.ones((c, c))
M = M - np.diag(np.ones(c))
M = M - np.diag(np.ones(c - 1), 1) - np.diag(np.ones(c - 1), -1)
i = np.arange(c)
j = i[::-1]
M[i, j] = 0
M[i[:-1], j[1:]] = 0
M[i[1:], j[:-1]] = 0
M[np.array([0, c - 1]), :] = 1
M[:, np.array([0, c - 1])] = 1
M = M.astype(int).tolist()
M[c // 2][c // 2] = 'r'
if maze_id == 4:
M[1][1] = 'g'
if maze_id == 5:
M[1][c - 2] = 'g'
if maze_id == 6:
M[c - 2][1] = 'g'
if maze_id == 7:
M[c - 2][c - 2] = 'g'
structure = M
elif maze_id == 8: # reflexion of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 0, 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 9: # sym benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 10: # reflexion of sym of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 11: # four room
structure = [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1,'r',0,'g',1, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
if structure:
return structure
else:
raise NotImplementedError("The provided MazeId is not recognized")
def construct_maze_random(maze_id=0, length=1): # random start, only correct for maze_id == 8 for now!
x_relative = y_relative = 0 # for random start position
# define the maze to use
if maze_id == 0:
if length != 1:
raise NotImplementedError("Maze_id 0 only has length 1!")
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 'r', 0, 'g', 1],
[1, 1, 1, 1, 1],
]
elif maze_id == 1: # donuts maze: can reach the single goal by 2 equal paths
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
M[c - 2][c // 2] = 'g'
structure = M
elif maze_id == 2: # spiral maze: need to use all the keys (only makes sense for length >=3)
c = length + 4
M = np.ones((c, c))
M[1:c - 1, (1, c - 2)] = 0
M[(1, c - 2), 1:c - 1] = 0
M = M.astype(int).tolist()
M[1][c // 2] = 'r'
# now block one of the ways and put the goal on the other side
M[1][c // 2 - 1] = 1
M[1][c // 2 - 2] = 'g'
structure = M
elif maze_id == 3: # corridor with goals at the 2 extremes
structure = [
[1] * (2 * length + 5),
[1, 'g'] + [0] * length + ['r'] + [0] * length + ['g', 1],
[1] * (2 * length + 5),
]
elif 4 <= maze_id <= 7: # cross corridor, goal in
c = 2 * length + 5
M = np.ones((c, c))
M = M - np.diag(np.ones(c))
M = M - np.diag(np.ones(c - 1), 1) - np.diag(np.ones(c - 1), -1)
i = np.arange(c)
j = i[::-1]
M[i, j] = 0
M[i[:-1], j[1:]] = 0
M[i[1:], j[:-1]] = 0
M[np.array([0, c - 1]), :] = 1
M[:, np.array([0, c - 1])] = 1
M = M.astype(int).tolist()
M[c // 2][c // 2] = 'r'
if maze_id == 4:
M[1][1] = 'g'
if maze_id == 5:
M[1][c - 2] = 'g'
if maze_id == 6:
M[c - 2][1] = 'g'
if maze_id == 7:
M[c - 2][c - 2] = 'g'
structure = M
elif maze_id == 8: # benchmark maze
# not used random seed here. Not necessary?
# random.seed(1)
pool = [(1, 2), (1, 3), (2, 3), (3, 3), (3, 2), (3, 1)]
random_i = np.random.choice(len(pool), p=[0.07, 0.08, 0.11, 0.14, 0.2, 0.4])
x_r, y_r = pool[random_i]
# (3, 1) was the initial choice!
structure = [
[1, 1, 1, 1, 1],
[1, 'g', 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
]
x_r, y_r = 3, 1 # fix start, only for the maze
x_g, y_g = 1, 1 # useless for this env
structure[x_r][y_r] = 'r'
x_relative = x_r - 3 # the x index relative to (0, 0)
y_relative = y_r - 1
pool = [(1, 2), (1, 3), (2, 3), (3, 3), (3, 2), (3, 1)] # random pool
p = [0.07, 0.08, 0.11, 0.14, 0.2, 0.4] # choice probability from pool
elif maze_id == 9: # mirrored maze, for transfer
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
]
x_r, y_r = 3, 1 # fix start, only for the maze
x_g, y_g = 1, 3 # useless for this env
structure[x_r][y_r] = 'r'
# print("x_r, y_r", x_r, y_r)
x_relative = x_r - 3 # the x index relative to (0, 0)
y_relative = y_r - 1
pool = [(1, 2), (1, 1), (2, 1), (3, 1), (3, 2), (3, 3)]
p = [0.07, 0.08, 0.11, 0.14, 0.2, 0.4] # hand-crafted possibility, higher possibility for farther starting pt
elif maze_id == 11:
structure = [
[1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1,'r',0,'g',1, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1],
]
x_r, y_r = 3, 1
x_g, y_g = 3, 3 # this is hard-coded in maze_env.py!
x_relative = 0 # x_r, y_r relative to a fixed point
y_relative = 0
pool = [(1, 1), (1, 2), (1, 3), (1, 5), (1, 6), (1, 7),
(2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
(3, 1), (3, 2), (3, 3), (3, 5), (3, 6), (3, 7),
(4, 2), (4, 5), (4, 6), (4, 7),
(5, 1), (5, 2), (5, 3), (5, 6),
(6, 1), (6, 2), (6, 3), (6, 5), (6, 6), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6), (7, 7),
(8, 1), (8, 2), (8, 3), (8, 5), (8, 6), (8, 7),
] # random pool for r or g
p = [0.02174] * 45 + [0.0217] # choice probability from pool
elif maze_id == 12: # a maze that deviates from the original maze a little (to test its transferability)
structure = [ # a spiral matrix
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 'g', 1, 0, 1],
[1, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
]
x_r, y_r = 4, 1
# x_g, y_g = 2, 1 # these are hard-coded in maze_env.py! need to modify!!!
structure[x_r][y_r] = 'r'
x_relative = 1 # x_r, y_r relative to (3, 1)
y_relative = 0
pool = [(1, 1), (1, 2), (1, 3), (2, 3), (3, 3), (4, 3), (4, 2), (4, 1)] # random pool for r or g
p = [0.028, 0.056, 0.083, 0.11, 0.139, 0.17, 0.194, 0.22]
elif maze_id == 14: # a maze that deviates from the original maze a little (to test its transferability)
structure = [ # a mirrored spiral matrix
[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 1, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]
]
x_r, y_r = 4, 3
structure[x_r][y_r] = 'r'
x_relative = 1 # x_r, y_r relative to (3, 1)
y_relative = 2
pool = [(1, 3), (1, 2), (1, 1), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3)] # random pool for r or g
p = [0.028, 0.056, 0.083, 0.11, 0.139, 0.17, 0.194, 0.22]
elif maze_id == 13: # a maze that is elongated
structure = [
[1, 1, 1, 1, 1, 1, 1],
[1, 'g', 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1]
]
x_r, y_r = 5, 1
structure[x_r][y_r] = 'r'
x_relative = 2 # x_r, y_r relative to a fixed point (3, 1)
y_relative = 0
pool = [(1, 2), (1, 3), (1, 4), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (5, 4), (5, 3), (5, 2), (5, 1)] # random pool for r or g
p = [0.013, 0.026, 0.028, 0.051, 0.064, 0.077, 0.090, 0.103, 0.115, 0.128, 0.141, 0.164]
elif maze_id == 10: # reflexion of sym of benchmark maze
structure = [
[1, 1, 1, 1, 1],
[1, 0, 0, 'g', 1],
[1, 0, 1, 1, 1],
[1, 0, 0, 'r', 1],
[1, 1, 1, 1, 1],
]
if structure:
return pool, p, structure, x_relative, y_relative
else:
raise NotImplementedError("The provided MazeId is not recognized")
def plot_ray(self, reading, ray_idx, color='r'):
structure = self.MAZE_STRUCTURE
size_scaling = self.MAZE_SIZE_SCALING
# duplicate cells to plot the maze
structure_plot = np.zeros(((len(structure) - 1) * 2, (len(structure[0]) - 1) * 2))
for i in range(len(structure)):
for j in range(len(structure[0])):
cell = structure[i][j]
if type(cell) is not int:
cell = 0.3 if cell == 'r' else 0.7
if i == 0:
if j == 0:
structure_plot[i, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[i, 2 * j - 1] = cell
else:
structure_plot[i, 2 * j - 1:2 * j + 1] = cell
elif i == len(structure) - 1:
if j == 0:
structure_plot[2 * i - 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1, 2 * j - 1:2 * j + 1] = cell
else:
if j == 0:
structure_plot[2 * i - 1:2 * i + 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1:2 * j + 1] = cell
fig, ax = plt.subplots()
im = ax.pcolor(-np.array(structure_plot), cmap='gray', edgecolor='black', linestyle=':', lw=1)
x_labels = list(range(len(structure[0])))
y_labels = list(range(len(structure)))
ax.grid(True) # elimiate this to avoid inner lines
ax.xaxis.set(ticks=2 * np.arange(len(x_labels)), ticklabels=x_labels)
ax.yaxis.set(ticks=2 * np.arange(len(y_labels)), ticklabels=y_labels)
robot_xy = np.array(self.wrapped_env.get_body_com("torso")[:2]) # the coordinates of this are wrt the init!!
ori = self.get_ori() # for Ant this is computed with atan2, which gives [-pi, pi]
# compute origin cell i_o, j_o coordinates and center of it x_o, y_o (with 0,0 in the top-right corner of struc)
o_xy = np.array(self._find_robot()) # this is self.init_torso_x, self.init_torso_y !!: center of the cell xy!
o_ij = (o_xy / size_scaling).astype(int) # this is the position in the grid (check if correct..)
o_xy_plot = o_xy / size_scaling * 2
robot_xy_plot = o_xy_plot + robot_xy / size_scaling * 2
plt.scatter(*robot_xy_plot)
# for ray_idx in range(self._n_bins):
length_wall = self._sensor_range - reading * self._sensor_range if reading else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
if ray_ori > math.pi:
ray_ori -= 2 * math.pi
elif ray_ori < - math.pi:
ray_ori += 2 * math.pi
# find the end point wall
end_xy = (robot_xy + length_wall * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], color)
ax.set_title('sensors debug')
print('plotting now, close the window')
# plt.show(fig)
# plt.close()
def plot_state(self, name='sensors', state=None):
if state:
self.wrapped_env.reset(state)
structure = self.__class__.MAZE_STRUCTURE
size_scaling = self.__class__.MAZE_SIZE_SCALING
# duplicate cells to plot the maze
structure_plot = np.zeros(((len(structure) - 1) * 2, (len(structure[0]) - 1) * 2))
for i in range(len(structure)):
for j in range(len(structure[0])):
cell = structure[i][j]
if type(cell) is not int:
cell = 0.3 if cell == 'r' else 0.7
if i == 0:
if j == 0:
structure_plot[i, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[i, 2 * j - 1] = cell
else:
structure_plot[i, 2 * j - 1:2 * j + 1] = cell
elif i == len(structure) - 1:
if j == 0:
structure_plot[2 * i - 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1, 2 * j - 1:2 * j + 1] = cell
else:
if j == 0:
structure_plot[2 * i - 1:2 * i + 1, j] = cell
elif j == len(structure[0]) - 1:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1] = cell
else:
structure_plot[2 * i - 1:2 * i + 1, 2 * j - 1:2 * j + 1] = cell
fig, ax = plt.subplots()
im = ax.pcolor(-np.array(structure_plot), cmap='gray', edgecolor='black', linestyle=':', lw=1)
x_labels = list(range(len(structure[0])))
y_labels = list(range(len(structure)))
ax.grid(True) # elimiate this to avoid inner lines
ax.xaxis.set(ticks=2 * np.arange(len(x_labels)), ticklabels=x_labels)
ax.yaxis.set(ticks=2 * np.arange(len(y_labels)), ticklabels=y_labels)
obs = self.get_current_maze_obs()
robot_xy = np.array(self.wrapped_env.get_body_com("torso")[:2]) # the coordinates of this are wrt the init
ori = self.get_ori() # for Ant this is computed with atan2, which gives [-pi, pi]
# compute origin cell i_o, j_o coordinates and center of it x_o, y_o (with 0,0 in the top-right corner of struc)
o_xy = np.array(self._find_robot()) # this is self.init_torso_x, self.init_torso_y: center of the cell xy!
o_ij = (o_xy / size_scaling).astype(int) # this is the position in the grid
o_xy_plot = o_xy / size_scaling * 2
robot_xy_plot = o_xy_plot + robot_xy / size_scaling * 2
plt.scatter(*robot_xy_plot)
for ray_idx in range(self._n_bins):
length_wall = self._sensor_range - obs[ray_idx] * self._sensor_range if obs[ray_idx] else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
if ray_ori > math.pi:
ray_ori -= 2 * math.pi
elif ray_ori < - math.pi:
ray_ori += 2 * math.pi
# find the end point wall
end_xy = (robot_xy + length_wall * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], 'r')
length_goal = self._sensor_range - obs[ray_idx + self._n_bins] * self._sensor_range if obs[
ray_idx + self._n_bins] else 1e-6
ray_ori = ori - self._sensor_span * 0.5 + ray_idx / (self._n_bins - 1) * self._sensor_span
# find the end point goal
end_xy = (robot_xy + length_goal * np.array([math.cos(ray_ori), math.sin(ray_ori)]))
end_xy_plot = (o_ij + end_xy / size_scaling) * 2
plt.plot([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot[1]], 'g')
log_dir = logger.get_snapshot_dir()
ax.set_title('sensors: ' + name)
plt.savefig(osp.join(log_dir, name + '_sesors.png')) # this saves the current figure, here f
plt.close()
| [
"numpy.ones",
"matplotlib.use",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.array",
"math.cos",
"rllab.misc.logger.get_snapshot_dir",
"math.fabs",
"matplotlib.pyplot.scatter",
"math.sin",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((55, 69), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (62, 69), True, 'import matplotlib as mpl\n'), ((15460, 15474), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (15472, 15474), True, 'import matplotlib.pyplot as plt\n'), ((16510, 16537), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*robot_xy_plot'], {}), '(*robot_xy_plot)\n', (16521, 16537), True, 'import matplotlib.pyplot as plt\n'), ((17059, 17151), 'matplotlib.pyplot.plot', 'plt.plot', (['[robot_xy_plot[0], end_xy_plot[0]]', '[robot_xy_plot[1], end_xy_plot[1]]', 'color'], {}), '([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot\n [1]], color)\n', (17067, 17151), True, 'import matplotlib.pyplot as plt\n'), ((18776, 18790), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18788, 18790), True, 'import matplotlib.pyplot as plt\n'), ((19840, 19867), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*robot_xy_plot'], {}), '(*robot_xy_plot)\n', (19851, 19867), True, 'import matplotlib.pyplot as plt\n'), ((21062, 21087), 'rllab.misc.logger.get_snapshot_dir', 'logger.get_snapshot_dir', ([], {}), '()\n', (21085, 21087), False, 'from rllab.misc import logger\n'), ((21228, 21239), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21237, 21239), True, 'import matplotlib.pyplot as plt\n'), ((1516, 1530), 'math.fabs', 'math.fabs', (['DET'], {}), '(DET)\n', (1525, 1530), False, 'import math\n'), ((20437, 20527), 'matplotlib.pyplot.plot', 'plt.plot', (['[robot_xy_plot[0], end_xy_plot[0]]', '[robot_xy_plot[1], end_xy_plot[1]]', '"""r"""'], {}), "([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot\n [1]], 'r')\n", (20445, 20527), True, 'import matplotlib.pyplot as plt\n'), ((20961, 21051), 'matplotlib.pyplot.plot', 'plt.plot', (['[robot_xy_plot[0], end_xy_plot[0]]', '[robot_xy_plot[1], end_xy_plot[1]]', '"""g"""'], {}), "([robot_xy_plot[0], end_xy_plot[0]], [robot_xy_plot[1], end_xy_plot\n [1]], 'g')\n", (20969, 21051), True, 'import matplotlib.pyplot as plt\n'), ((21142, 21181), 'os.path.join', 'osp.join', (['log_dir', "(name + '_sesors.png')"], {}), "(log_dir, name + '_sesors.png')\n", (21150, 21181), True, 'import os.path as osp\n'), ((3550, 3565), 'numpy.ones', 'np.ones', (['(c, c)'], {}), '((c, c))\n', (3557, 3565), True, 'import numpy as np\n'), ((7083, 7098), 'numpy.ones', 'np.ones', (['(c, c)'], {}), '((c, c))\n', (7090, 7098), True, 'import numpy as np\n'), ((15495, 15519), 'numpy.array', 'np.array', (['structure_plot'], {}), '(structure_plot)\n', (15503, 15519), True, 'import numpy as np\n'), ((18811, 18835), 'numpy.array', 'np.array', (['structure_plot'], {}), '(structure_plot)\n', (18819, 18835), True, 'import numpy as np\n'), ((2345, 2360), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2353, 2360), False, 'import math\n'), ((2372, 2387), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2380, 2387), False, 'import math\n'), ((3885, 3900), 'numpy.ones', 'np.ones', (['(c, c)'], {}), '((c, c))\n', (3892, 3900), True, 'import numpy as np\n'), ((7418, 7433), 'numpy.ones', 'np.ones', (['(c, c)'], {}), '((c, c))\n', (7425, 7433), True, 'import numpy as np\n'), ((16962, 16979), 'math.cos', 'math.cos', (['ray_ori'], {}), '(ray_ori)\n', (16970, 16979), False, 'import math\n'), ((16981, 16998), 'math.sin', 'math.sin', (['ray_ori'], {}), '(ray_ori)\n', (16989, 16998), False, 'import math\n'), ((4525, 4540), 'numpy.ones', 'np.ones', (['(c, c)'], {}), '((c, c))\n', (4532, 4540), True, 'import numpy as np\n'), ((4662, 4674), 'numpy.arange', 'np.arange', (['c'], {}), '(c)\n', (4671, 4674), True, 'import numpy as np\n'), ((8054, 8069), 'numpy.ones', 'np.ones', (['(c, c)'], {}), '((c, c))\n', (8061, 8069), True, 'import numpy as np\n'), ((8191, 8203), 'numpy.arange', 'np.arange', (['c'], {}), '(c)\n', (8200, 8203), True, 'import numpy as np\n'), ((20332, 20349), 'math.cos', 'math.cos', (['ray_ori'], {}), '(ray_ori)\n', (20340, 20349), False, 'import math\n'), ((20351, 20368), 'math.sin', 'math.sin', (['ray_ori'], {}), '(ray_ori)\n', (20359, 20368), False, 'import math\n'), ((20856, 20873), 'math.cos', 'math.cos', (['ray_ori'], {}), '(ray_ori)\n', (20864, 20873), False, 'import math\n'), ((20875, 20892), 'math.sin', 'math.sin', (['ray_ori'], {}), '(ray_ori)\n', (20883, 20892), False, 'import math\n'), ((4565, 4575), 'numpy.ones', 'np.ones', (['c'], {}), '(c)\n', (4572, 4575), True, 'import numpy as np\n'), ((4630, 4644), 'numpy.ones', 'np.ones', (['(c - 1)'], {}), '(c - 1)\n', (4637, 4644), True, 'import numpy as np\n'), ((4783, 4803), 'numpy.array', 'np.array', (['[0, c - 1]'], {}), '([0, c - 1])\n', (4791, 4803), True, 'import numpy as np\n'), ((4825, 4845), 'numpy.array', 'np.array', (['[0, c - 1]'], {}), '([0, c - 1])\n', (4833, 4845), True, 'import numpy as np\n'), ((8094, 8104), 'numpy.ones', 'np.ones', (['c'], {}), '(c)\n', (8101, 8104), True, 'import numpy as np\n'), ((8159, 8173), 'numpy.ones', 'np.ones', (['(c - 1)'], {}), '(c - 1)\n', (8166, 8173), True, 'import numpy as np\n'), ((8312, 8332), 'numpy.array', 'np.array', (['[0, c - 1]'], {}), '([0, c - 1])\n', (8320, 8332), True, 'import numpy as np\n'), ((8354, 8374), 'numpy.array', 'np.array', (['[0, c - 1]'], {}), '([0, c - 1])\n', (8362, 8374), True, 'import numpy as np\n'), ((4601, 4615), 'numpy.ones', 'np.ones', (['(c - 1)'], {}), '(c - 1)\n', (4608, 4615), True, 'import numpy as np\n'), ((8130, 8144), 'numpy.ones', 'np.ones', (['(c - 1)'], {}), '(c - 1)\n', (8137, 8144), True, 'import numpy as np\n')] |
'''An example to show how to set up an pommerman game programmatically'''
import pommerman
from pommerman import agents
import numpy as np
from tqdm import tqdm
def main():
'''Simple function to bootstrap a game.
Use this as an example to set up your training env.
'''
# Print all possible environments in the Pommerman registry
print(pommerman.REGISTRY)
# dqAgent = agents.DeepQAgent()
# dqAgent.save('model')
# Create a set of agents (exactly four)
# agent_list = [
# agents.SimpleAgent(),
# agents.DeepQAgent().load('model')
#agents.SimpleAgent(),
# agents.RandomAgent(),
# agents.SimpleAgent(),
# agents.PlayerAgent(agent_control="arrows"), # arrows to move, space to lay bomb
# agents.RandomAgent(),
# agents.DockerAgent("pommerman/simple-agent", port=12345),
# ]
# Make the "Free-For-All" environment using the agent list
# env = pommerman.make('OneVsOne-v0', agent_list)
# Run the episodes just like OpenAI Gym
wins0 = 0
wins1 = 0
ties = 0
batch_size = 15
for i_episode in tqdm(range(10)):
agent_list = [agents.SimpleAgent()]
dqAgent = agents.DeepQAgent()
dqAgent.initDQ(learn=False, epsilon=0.5, action_size=5)
dqAgent.load('model_3x64_40_20_no_bombs')
agent_list.append(dqAgent)
env = pommerman.make('OneVsOne-v0', agent_list)
env.set_init_game_state('jsons/000.json')
env.seed(0)
state = env.reset()
done = False
while not done:
# env.render(record_json_dir='jsons/')
env.render()
actions = env.act(state)
prevState = np.ravel(state[1]['board'])
prevState= np.reshape(prevState, [1, 64])
# print('prev State shape:: ', prevState.shape)
# print('Actions:: ', actions)
state, reward, done, info = env.step(actions)
#print('state:: ', state[1]['board'])
nextState = np.ravel(state[1]['board'])
nextState= np.reshape(nextState, [1, 64])
# print('nextState shape:: ', nextState.shape)
dqAgent.remember(prevState, actions[1], reward[1], nextState, done )
# print('Memory:: ', len(dqAgent.memory))
if len(dqAgent.memory) > batch_size: # si el agente tiene suficiente experiencias en su memoria -> ajusta su modelo neuronal
# print('done replay')
dqAgent.replay(batch_size)
# print("Done replay:: memory lem", len(dqAgent.memory))
if done:
# print('Done:: ', reward)
# print('info:: ', info)
if reward[0] == 1 and reward[1] != 1:
wins0+=1
elif reward[1] == 1 and reward[0] != 1:
wins1+=1
else:
ties+=1
# print('Episode {} finished'.format(i_episode))
# dqAgent.save('model_3x64_40_20')
print('Results: {}/{}/{}'.format(wins0, wins1, ties))
env.close()
if __name__ == '__main__':
main()
| [
"numpy.reshape",
"pommerman.make",
"pommerman.agents.DeepQAgent",
"pommerman.agents.SimpleAgent",
"numpy.ravel"
] | [((1211, 1230), 'pommerman.agents.DeepQAgent', 'agents.DeepQAgent', ([], {}), '()\n', (1228, 1230), False, 'from pommerman import agents\n'), ((1394, 1435), 'pommerman.make', 'pommerman.make', (['"""OneVsOne-v0"""', 'agent_list'], {}), "('OneVsOne-v0', agent_list)\n", (1408, 1435), False, 'import pommerman\n'), ((1171, 1191), 'pommerman.agents.SimpleAgent', 'agents.SimpleAgent', ([], {}), '()\n', (1189, 1191), False, 'from pommerman import agents\n'), ((1725, 1752), 'numpy.ravel', 'np.ravel', (["state[1]['board']"], {}), "(state[1]['board'])\n", (1733, 1752), True, 'import numpy as np\n'), ((1776, 1806), 'numpy.reshape', 'np.reshape', (['prevState', '[1, 64]'], {}), '(prevState, [1, 64])\n', (1786, 1806), True, 'import numpy as np\n'), ((2044, 2071), 'numpy.ravel', 'np.ravel', (["state[1]['board']"], {}), "(state[1]['board'])\n", (2052, 2071), True, 'import numpy as np\n'), ((2095, 2125), 'numpy.reshape', 'np.reshape', (['nextState', '[1, 64]'], {}), '(nextState, [1, 64])\n', (2105, 2125), True, 'import numpy as np\n')] |
#Written by <NAME>
import numpy as np
import lc3asm
#2^16 16bit memory address's
memory = np.uint16([0]*0xFFFF)
#registers
reg = np.uint16([0]*8)
pc = np.int16(0x0200)
psr = 0xFFFC
halt = True
#special memory ptrs
kbsr_ptr = 0xFE00
kbdr_ptr = 0xFE02
dsr_ptr = 0xFE04
ddr_ptr = 0xFE06
mcr_ptr = 0xFFFE
#from stackoverflow
# https://stackoverflow.com/questions/32030412/twos-complement-sign-extension-python/32031543
def sign_extend(value, bits):
sign_bit = 1 << (bits - 1)
return (value & (sign_bit - 1)) - (value & sign_bit)
def logSign(num):
n = sign_extend(num, 16)
memory[psr]&=0b1111111111111000
if n == 0:
memory[psr]|=0b10
elif n < 0:
memory[psr]|=0b100
elif n > 0:
memory[psr]|=0b1
def getSign():
if (memory[psr]&0b100)>>2 == 0b1:
return -1
elif (memory[psr]&0b10)>>1 == 0b1:
return 0
elif memory[psr]&0b1 == 0b1:
return 1
def addOp(instruct):
ans = reg[(instruct>>6)&0b111]
if (instruct>>5)&0b1 == 0b0:
ans+=reg[instruct&0b111]
else:
ans+=sign_extend(instruct&0b11111, 5)
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def andOp(instruct):
ans = reg[(instruct>>6)&0b111]
if (instruct>>5)&0b1 == 0b0:
ans&=reg[instruct&0b111]
else:
ans&=sign_extend(instruct&0b11111, 5)
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def brOp(instruct):
global pc
if (instruct >> 11) & 0b1 == 0b1 and getSign() == -1:
pc+=sign_extend(instruct&0b111111111, 9)
return
elif (instruct >> 10) & 0b1 == 0b1 and getSign() == 0:
pc+=sign_extend(instruct&0b111111111, 9)
return
elif (instruct >> 9) & 0b1 == 0b1 and getSign() == 1:
pc+=sign_extend(instruct&0b111111111, 9)
def jmpOp(instruct):
pc = reg[(instruct>>6)&0b111]
def jsrOp(instruct):
reg[7] = pc
if (instruct>>11)&0b1 == 0b1:
pc = sign_extend(instruct&0b11111111111, 11)+pc
else:
pc = reg[(instruct>>6)&0b111]
def ldOp(instruct):
ans = memory[sign_extend(instruct&0b111111111, 9) + pc]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def ldiOp(instruct):
ad = memory[sign_extend(instruct&0b111111111, 9) + pc]
ans = memory[ad]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def ldrOp(instruct):
ans = memory[sign_extend(instruct&0b111111, 6) + reg[(instruct>>6)&0b111]]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def leaOp(instruct):
ans = pc+sign_extend(instruct&0b111111111, 9)
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def notOp(instruct):
ans = ~reg[(instruct>>6)&0b111]
logSign(ans)
reg[(instruct>>9)&0b111] = ans
def retOp(instruct):
pc = reg[7]
def rtiOp(instruct):
global pc, reg
reg[6]+=2
memory[psr] = memory[reg[6]-1]
pc = memory[reg[6]-2]
# print("PSR popped: " + hex(memory[reg[6]-1]))
# print("PC popped: " + hex(memory[reg[6]-2]))
# print("SysS:"+hex(reg[6]))
def stOp(instruct):
ans = reg[(instruct>>9)&0b111]
memory[sign_extend(instruct&0b111111111, 9) + pc] = ans
def stiOp(instruct):
ad = memory[sign_extend(instruct&0b111111111, 9) + pc]
memory[ad] = reg[(instruct>>9)&0b111]
def strOp(instruct):
memory[sign_extend(instruct&0b111111, 6) + reg[(instruct>>6)&0b111]] = reg[(instruct>>9)&0b111]
def trapOp(instruct):
global halt
if instruct&0b11111111 == 0x21:
print(chr(reg[0]), end='')
if instruct&0b11111111 == 0x25:
halt = True
if instruct&0b11111111 == 0x22:
ptr = reg[0]
while memory[ptr] != 0:
print(chr(memory[ptr]), end='')
ptr+=1
def rTrap(instruct):
global pc, reg
# print("rTrap " + hex(instruct))
if memory[psr]&0x8000 == 0x8000:
#set OS_SP
reg[6] = memory[lc3asm.symTable['OS_SP']]
#push PSR and PC
#if in user mode, set OS_SP
#change to super mode
reg[6]-=2
memory[reg[6]+1] = memory[psr]
memory[reg[6]] = pc
pc = np.int16(memory[instruct&0b11111111])
if memory[psr]&0x8000 == 0x8000:
#goto super mode
memory[psr]&=0x7FFF
# print("PSR pushed: " + hex(memory[reg[6]-1]))
# print("PC pushed: " + hex(memory[reg[6]]))
# print("SysS:"+hex(reg[6]))
def display():
if memory[ddr_ptr] != 0:
memory[dsr_ptr]&=0x7FFF
print(chr(memory[ddr_ptr]&0xFF), end='')
memory[ddr_ptr] = 0
memory[dsr_ptr]|=0x8000
else:
memory[dsr_ptr]|=0x8000
op_codes = {
0b0001: addOp,
0b0101: andOp,
0b0000: brOp,
0b1100: jmpOp,
0b0100: jsrOp,
0b0010: ldOp,
0b1010: ldiOp,
0b0110: ldrOp,
0b1110: leaOp,
0b1001: notOp,
0b1100: retOp,
0b1000: rtiOp,
0b0011: stOp,
0b1011: stiOp,
0b0111: strOp,
0b1111: trapOp
}
def parse(instruct, debug):
op_code = (instruct >> 12) & 0b1111
if op_code in op_codes:
if debug:
print(op_codes[op_code])
if instruct == 0xF025 or instruct == 0xF021 or instruct == 0xF022:
rTrap(instruct)
else:
op_codes[op_code](instruct)
else:
print("NOP")
def loadIn():
for ad, bina in lc3asm.out.items():
memory[ad] = bina
def run(debug = True):
global pc, halt, memory
halt = False
memory[mcr_ptr] = 0b1000000000000000
while memory[mcr_ptr] == 0b1000000000000000:
pc+=1
if debug:
print(hex(pc))
parse(memory[pc-1], debug)
display()
c = input("\n>") if debug else ''
if c == "r":
for i in range(0, 8):
print("R" + str(i) + ": \t" + hex(reg[i]))
print('PSR:\t' + bin(memory[psr]))
print('MCR:\t' + bin(memory[mcr_ptr]))
print('PC: \t' + hex(pc))
elif c == "p":
return
elif c == "ss":
print("----------------")
for i in range(reg[6], 0x3000):
print(hex(i) + ":\t"+hex(memory[i]))
print("----------------")
print("[LC3.py] Assembling LC3 OS")
lc3asm.loadFile("OS_vector_tables.asm")
lc3asm.asm()
loadIn()
pc = np.int16(0x200)
memory[psr] &=0x7FFF #set supervisor mode
print("[LC3.py] Starting LC3 OS")
run(debug = False)
# pc = np.int16(0x3000) | [
"lc3asm.loadFile",
"numpy.int16",
"lc3asm.asm",
"numpy.uint16",
"lc3asm.out.items"
] | [((101, 123), 'numpy.uint16', 'np.uint16', (['([0] * 65535)'], {}), '([0] * 65535)\n', (110, 123), True, 'import numpy as np\n'), ((142, 160), 'numpy.uint16', 'np.uint16', (['([0] * 8)'], {}), '([0] * 8)\n', (151, 160), True, 'import numpy as np\n'), ((165, 178), 'numpy.int16', 'np.int16', (['(512)'], {}), '(512)\n', (173, 178), True, 'import numpy as np\n'), ((6318, 6357), 'lc3asm.loadFile', 'lc3asm.loadFile', (['"""OS_vector_tables.asm"""'], {}), "('OS_vector_tables.asm')\n", (6333, 6357), False, 'import lc3asm\n'), ((6359, 6371), 'lc3asm.asm', 'lc3asm.asm', ([], {}), '()\n', (6369, 6371), False, 'import lc3asm\n'), ((6388, 6401), 'numpy.int16', 'np.int16', (['(512)'], {}), '(512)\n', (6396, 6401), True, 'import numpy as np\n'), ((4163, 4195), 'numpy.int16', 'np.int16', (['memory[instruct & 255]'], {}), '(memory[instruct & 255])\n', (4171, 4195), True, 'import numpy as np\n'), ((5398, 5416), 'lc3asm.out.items', 'lc3asm.out.items', ([], {}), '()\n', (5414, 5416), False, 'import lc3asm\n')] |
import pickle
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
output_file = 'model.bin'
print('Reading data')
df_raw = pd.read_csv('survey.csv')
print('Wrangling Data')
string_columns = df_raw.columns[(df_raw.dtypes == 'object')]
for col in string_columns:
df_raw[col] = df_raw[col].str.lower().str.replace(' ','_')
df_raw = df_raw[~df_raw.industry.isnull()]
underrepresented_industries = df_raw.industry.value_counts()[(df_raw.industry.value_counts() == 1)].index.to_list()
df_raw.industry[df_raw.industry.isin(underrepresented_industries)] = 'other'
df_raw.highest_level_of_education_completed = df_raw.highest_level_of_education_completed.fillna('None')
df_raw.job_title[df_raw.job_title == 'manager,_[department_name]'] = 'manager'
single_represented_jobs = df_raw.job_title.value_counts()[df_raw.job_title.value_counts() == 1].index.to_list()
df_raw.job_title[ df_raw.job_title.isin(single_represented_jobs) ] = 'singular_job_title'
df_raw.gender = df_raw.gender.fillna('other_or_prefer_not_to_answer')
df_raw.gender[df_raw.gender == 'prefer_not_to_answer'] = 'other_or_prefer_not_to_answer'
df_raw.country[df_raw.country.isin(['united_states','usa','us','u.s.','united_states_of_america','united__states'])] = 'united_states'
df_raw.country[df_raw.country.isin(['uk','united_kingdom','england,_gb','england,_uk.','united_kindom','united_kingdom_(england)','wales_(united_kingdom)','u.k.','uk_(england)'])] = 'united_kingdom'
df_raw.country[df_raw.country.isin(['canada,_ottawa,_ontario'])] = 'canada'
df_raw.other_monetary_comp = df_raw.other_monetary_comp.fillna(0)
df_raw.currency_other = df_raw.currency_other.fillna(df_raw.currency)
df_raw = df_raw[df_raw.currency != 'other'].reset_index(drop=True)
df_raw.state = df_raw.state.fillna('na')
df_raw.state = df_raw.state.str.replace(',_*','')
df_raw.city = df_raw.city.fillna('none_supplied')
single_represented_cities = df_raw.city.value_counts()[df_raw.city.value_counts() == 1].index.to_list()
df_raw.city[ df_raw.city.isin(single_represented_cities) ] = 'singular_represented_city'
df_raw.race = df_raw.race.fillna('another_option_not_listed_here_or_prefer_not_to_answer')
df_currency = pd.DataFrame({
'CAD' : 0.81
, 'GBP' : 1.37
, 'EUR' : 1.16
, 'AUD/NZD' : 0.74 #Rough average
, 'CHF' : 1.10
, 'SEK' : 0.12
, 'JPY' : 0.0088
, 'ZAR' : 0.065
, 'HKD' : 0.13
}.items(),columns=['currency','exchange_rate'])
df_currency.currency = df_currency.currency.str.lower()
df = df_raw.merge(df_currency,on='currency')
df['renumeration'] = df.annual_salary + df.other_monetary_comp
df['renumeration'] = df['renumeration'] * df['exchange_rate']
del df['timestamp']
del df['currency']
del df['currency_other']
del df['additional_context_on_job_title']
del df['additional_context_on_income']
del df['exchange_rate']
del df['state']
del df['race']
categorical_columns = df.columns[df.dtypes=='object' ].values
numerical_columns = df.columns[df.dtypes=='float64'].values
print('Splitting Dataset')
seed = 1
prop_test = 0.2
df_full_train, df_test = train_test_split(df, test_size=prop_test, random_state=seed)
def setup_tensors(df):
df = df.reset_index(drop=True)
y = np.log1p(df.renumeration.values)
for col in numerical_columns:
del df[col]
return df, y
df_full_train, y_full_train = setup_tensors(df_full_train)
df_test , y_test = setup_tensors(df_test )
print('Setting encoding')
dv = DictVectorizer(sparse=False)
def transform_set(columns, df,fit=False):
dicts = df[columns].to_dict(orient='records')
if fit:
X = dv.fit_transform(dicts)
else:
X = dv.transform(dicts)
return dicts, X
fit_columns = categorical_columns
_ , _ = transform_set(fit_columns, df , fit=True)
dicts_full_train, X_full_train = transform_set(fit_columns, df_full_train)
dicts_test , X_test = transform_set(fit_columns, df_test )
dv.get_feature_names()
dtrain_full = xgb.DMatrix(X_full_train, label=y_full_train, feature_names=dv.get_feature_names())
dtest = xgb.DMatrix(X_test , label=y_test , feature_names=dv.get_feature_names())
print('Training')
watchlist = [(dtrain_full, 'full_train'), (dtest, 'test')]
xgb_params = { 'eta':0.2, 'max_depth':25, 'min_child_weight':1, 'objective':'reg:squarederror', 'nthread':2, 'seed':1, 'verbosity':1 }
model = xgb.train(xgb_params, dtrain_full, num_boost_round=100,verbose_eval=5,evals=watchlist)
print('Saving Model')
with open(output_file,'w+b') as f:
pickle.dump((dv,model),f)
| [
"pickle.dump",
"xgboost.train",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.feature_extraction.DictVectorizer",
"numpy.log1p"
] | [((250, 275), 'pandas.read_csv', 'pd.read_csv', (['"""survey.csv"""'], {}), "('survey.csv')\n", (261, 275), True, 'import pandas as pd\n'), ((3187, 3247), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': 'prop_test', 'random_state': 'seed'}), '(df, test_size=prop_test, random_state=seed)\n', (3203, 3247), False, 'from sklearn.model_selection import train_test_split\n'), ((3571, 3599), 'sklearn.feature_extraction.DictVectorizer', 'DictVectorizer', ([], {'sparse': '(False)'}), '(sparse=False)\n', (3585, 3599), False, 'from sklearn.feature_extraction import DictVectorizer\n'), ((4514, 4606), 'xgboost.train', 'xgb.train', (['xgb_params', 'dtrain_full'], {'num_boost_round': '(100)', 'verbose_eval': '(5)', 'evals': 'watchlist'}), '(xgb_params, dtrain_full, num_boost_round=100, verbose_eval=5,\n evals=watchlist)\n', (4523, 4606), True, 'import xgboost as xgb\n'), ((3316, 3348), 'numpy.log1p', 'np.log1p', (['df.renumeration.values'], {}), '(df.renumeration.values)\n', (3324, 3348), True, 'import numpy as np\n'), ((4660, 4687), 'pickle.dump', 'pickle.dump', (['(dv, model)', 'f'], {}), '((dv, model), f)\n', (4671, 4687), False, 'import pickle\n')] |
"""
Functions and objects for working with LC-MS data
Objects
-------
Chromatogram
MSSpectrum
Roi
"""
import numpy as np
import pandas as pd
import pyopenms
from scipy.interpolate import interp1d
from typing import Optional, Iterable, Tuple, Union, List, Callable
from . import peaks
from . import validation
import bokeh.plotting
from bokeh.palettes import Set3
from collections import namedtuple
import warnings
from .utils import find_closest
ms_experiment_type = Union[pyopenms.MSExperiment, pyopenms.OnDiscMSExperiment]
def reader(path: str, on_disc: bool = True):
"""
Load `path` file into an OnDiskExperiment. If the file is not indexed, load
the file.
Parameters
----------
path : str
path to read mzML file from.
on_disc : bool
if True doesn't load the whole file on memory.
Returns
-------
pyopenms.OnDiskMSExperiment or pyopenms.MSExperiment
"""
if on_disc:
try:
exp_reader = pyopenms.OnDiscMSExperiment()
exp_reader.openFile(path)
# this checks if OnDiscMSExperiment can be used else switch
# to MSExperiment
exp_reader.getSpectrum(0)
except RuntimeError:
msg = "{} is not an indexed mzML file, switching to MSExperiment"
warnings.warn(msg.format(path))
exp_reader = pyopenms.MSExperiment()
pyopenms.MzMLFile().load(path, exp_reader)
else:
exp_reader = pyopenms.MSExperiment()
pyopenms.MzMLFile().load(path, exp_reader)
return exp_reader
def make_chromatograms(ms_experiment: ms_experiment_type, mz: Iterable[float],
window: float = 0.005, start: Optional[int] = None,
end: Optional[int] = None,
accumulator: str = "sum"
) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes extracted ion chromatograms for a list of m/z values from raw
data.
Parameters
----------
ms_experiment : MSExp or OnDiskMSExp.
mz : iterable[float]
mz values used to build the EICs.
start : int, optional
first scan to build the chromatograms
end : int, optional
last scan to build the chromatograms. The scan with `number` end is not
included in the chromatograms.
window : positive number.
Tolerance to build the EICs.
accumulator : {"sum", "mean"}
"mean" divides the intensity in the EIC using the number of points in
the window.
Returns
-------
rt : array of retention times
eic : array with rows of EICs.
"""
nsp = ms_experiment.getNrSpectra()
if not isinstance(mz, np.ndarray):
mz = np.array(mz)
if start is None:
start = 0
if end is None:
end = nsp
# validate params
params = {"start": start, "end": end, "window": window, "mz": mz,
"accumulator": accumulator}
validation.validate_make_chromatograms_params(nsp, params)
# mz_intervals has this shape to be compatible with reduce at
mz_intervals = (np.vstack((mz - window, mz + window))
.T.reshape(mz.size * 2))
eic = np.zeros((mz.size, end - start))
rt = np.zeros(end - start)
for ksp in range(start, end):
# find rt, mz and intensity values of the current scan
sp = ms_experiment.getSpectrum(ksp)
rt[ksp - start] = sp.getRT()
mz_sp, int_sp = sp.get_peaks()
ind_sp = np.searchsorted(mz_sp, mz_intervals)
# check if the slices aren't empty
has_mz = (ind_sp[1::2] - ind_sp[::2]) > 0
# elements added at the end of mz_sp raise IndexError
ind_sp[ind_sp >= int_sp.size] = int_sp.size - 1
# this function adds the values between two consecutive indices
tmp_eic = np.where(has_mz, np.add.reduceat(int_sp, ind_sp)[::2], 0)
if accumulator == "mean":
norm = ind_sp[1::2] - ind_sp[::2]
norm[norm == 0] = 1
tmp_eic = tmp_eic / norm
eic[:, ksp - start] = tmp_eic
return rt, eic
def accumulate_spectra(ms_experiment: ms_experiment_type, start: int, end: int,
subtract_left: Optional[int] = None,
subtract_right: Optional[int] = None,
kind: str = "linear") -> Tuple[np.ndarray, np.ndarray]:
"""
accumulates a spectra into a single spectrum.
Parameters
----------
ms_experiment : pyopenms.MSExperiment, pyopenms.OnDiskMSExperiment
start : int
start slice for scan accumulation
end : int
end slice for scan accumulation.
subtract_left : int, optional
Scans between `subtract_left` and `start` are subtracted from the
accumulated spectrum.
subtract_right : int, optional
Scans between `subtract_right` and `end` are subtracted from the
accumulated spectrum.
kind : str
kind of interpolator to use with scipy interp1d.
Returns
-------
accumulated_mz : array of m/z values
accumulated_int : array of cumulative intensities.
"""
if subtract_left is None:
subtract_left = start
if subtract_right is None:
subtract_right = end
# parameter validation
params = {"start": start, "end": end, "subtract_left": subtract_left,
"subtract_right": subtract_right, "kind": kind}
n_sp = ms_experiment.getNrSpectra()
validation.validate_accumulate_spectra_params(n_sp, params)
# creates a common mz reference value for the scans
mz, _ = ms_experiment.getSpectrum(start).get_peaks()
accumulated_mz = _get_uniform_mz(mz)
accumulated_sp = np.zeros_like(accumulated_mz)
# interpolates each scan to the reference. Removes values outside the
# min and max of the reference.
for scan in range(subtract_left, subtract_right):
mz_scan, int_scan = ms_experiment.getSpectrum(scan).get_peaks()
mz_min, mz_max = mz_scan.min(), mz_scan.max()
min_ind, max_ind = np.searchsorted(accumulated_mz, [mz_min, mz_max])
interpolator = interp1d(mz_scan, int_scan, kind=kind)
tmp_sp = interpolator(accumulated_mz[min_ind:max_ind])
# accumulate scans
if (scan < start) or (scan > end):
accumulated_sp[min_ind:max_ind] -= tmp_sp
else:
accumulated_sp[min_ind:max_ind] += tmp_sp
is_positive_sp = accumulated_sp > 0
accumulated_mz = accumulated_mz[is_positive_sp]
accumulated_sp = accumulated_sp[is_positive_sp]
return accumulated_mz, accumulated_sp
def _get_uniform_mz(mz: np.ndarray):
"""returns a new uniformly sampled m/z array."""
mz_min = mz.min()
mz_max = mz.max()
mz_res = np.diff(mz).min()
uniform_mz = np.arange(mz_min, mz_max, mz_res)
return uniform_mz
def make_widths_lc(mode: str) -> np.ndarray:
"""
Create an array of widths to use in CWT peak picking of LC data.
Parameters
----------
mode: {"hplc", "uplc"}
Returns
-------
widths: array
"""
if mode == "uplc":
widths = [np.linspace(0.25, 5, 20), np.linspace(6, 20, 8),
np.linspace(25, 60, 8)]
widths = np.hstack(widths)
elif mode == "hplc":
widths = [np.linspace(1, 10, 20), np.linspace(10, 30, 8),
np.linspace(40, 90, 8)]
widths = np.hstack(widths)
else:
msg = "Valid modes are `hplc` or `uplc`."
raise ValueError(msg)
return widths
def make_widths_ms(mode: str) -> np.ndarray:
"""
Create an array of widths to use in CWT peak picking of MS data.
Parameters
----------
mode : {"qtof", "orbitrap"}
Returns
-------
widths : array
"""
if mode == "qtof":
min_width = 0.005
middle = 0.1
max_width = 0.2
elif mode == "orbitrap":
min_width = 0.0005
middle = 0.001
max_width = 0.005
else:
msg = "mode must be `orbitrap` or `qtof`"
raise ValueError(msg)
# [:-1] prevents repeated value
widths = np.hstack((np.linspace(min_width, middle, 20)[:-1],
np.linspace(middle, max_width, 10)))
return widths
def get_lc_cwt_params(mode: str) -> dict:
"""
Return sane default values for performing CWT based peak picking on LC data.
Parameters
----------
mode : {"hplc", "uplc"}
HPLC assumes typical experimental conditions for HPLC experiments:
longer columns with particle size greater than 3 micron. UPLC is for
data acquired with short columns with particle size lower than 3 micron.
Returns
-------
cwt_params : dict
parameters to pass to .peak.pick_cwt function.
"""
cwt_params = {"snr": 10, "min_length": 5, "max_distance": 1,
"gap_threshold": 1, "estimators": "default"}
if mode == "hplc":
cwt_params["min_width"] = 10
cwt_params["max_width"] = 90
elif mode == "uplc":
cwt_params["min_width"] = 5
cwt_params["max_width"] = 60
else:
msg = "`mode` must be `hplc` or `uplc`"
raise ValueError(msg)
return cwt_params
def get_ms_cwt_params(mode: str) -> dict:
"""
Return sane default values for performing CWT based peak picking on MS data.
Parameters
----------
mode : {"qtof", "orbitrap"}
qtof assumes a peak width in the range of 0.01-0.05 Da. `orbitrap`
assumes a peak width in the range of 0.001-0.005 Da.
Returns
-------
cwt_params : dict
parameters to pass to .peak.pick_cwt function.
"""
cwt_params = {"snr": 10, "min_length": 5, "gap_threshold": 1,
"estimators": "default"}
if mode == "qtof":
cwt_params["min_width"] = 0.01
cwt_params["max_width"] = 0.2
cwt_params["max_distance"] = 0.005
elif mode == "orbitrap":
cwt_params["min_width"] = 0.0005
cwt_params["max_width"] = 0.005
cwt_params["max_distance"] = 0.0025
else:
msg = "`mode` must be `qtof` or `orbitrap`"
raise ValueError(msg)
return cwt_params
def get_roi_params(separation: str = "uplc", instrument: str = "qtof"):
"""
Creates a dictionary with recommended parameters for the make_roi function
in different use cases.
Parameters
----------
separation : {"uplc", "hplc"}
Mode in which the data was acquired. Used to set minimum length of the
roi and number of missing values.
instrument : {"qtof", "orbitrap"}
Type of MS instrument. Used to set the tolerance.
Returns
-------
roi_parameters : dict
"""
roi_params = {"min_intensity": 500, "multiple_match": "reduce"}
if separation == "uplc":
roi_params.update({"max_missing": 1, "min_length": 10})
elif separation == "hplc":
roi_params.update({"max_missing": 1, "min_length": 20})
else:
msg = "valid `separation` are uplc and hplc"
raise ValueError(msg)
if instrument == "qtof":
roi_params.update({"tolerance": 0.01})
elif instrument == "orbitrap":
roi_params.update({"tolerance": 0.005})
else:
msg = "valid `instrument` are qtof and orbitrap"
raise ValueError(msg)
roi_params["mode"] = separation
return roi_params
def _get_find_centroid_params(instrument: str):
"""
Set default parameters to find_centroid method using instrument information.
Parameters
----------
instrument : {"qtof", "orbitrap"}
Returns
-------
params : dict
"""
params = {"snr": 10}
if instrument == "qtof":
md = 0.01
else:
# valid values for instrument are qtof or orbitrap
md = 0.005
params["min_distance"] = md
return params
def _find_isotopic_distribution_aux(mz: np.ndarray, mz_ft: float,
q: int, n_isotopes: int,
tol: float):
"""
Finds the isotopic distribution for a given charge state. Auxiliary function
to find_isotopic_distribution.
Isotopes are searched based on the assumption that the mass difference
is due to the presence of a 13C atom.
Parameters
----------
mz : numpy.ndarray
List of peaks
mz_ft : float
Monoisotopic mass
q : charge state of the ion
n_isotopes : int
Number of isotopes to search in the distribution
tol: float
Mass tolerance, in absolute units
Returns
-------
match_ind : np.ndarray
array of indices for the isotopic distribution.
"""
# TODO: Remove this function when the isotope finder module is added.
mono_index = find_closest(mz, mz_ft)
mz_mono = mz[mono_index]
if abs(mz_mono - mz_ft) > tol:
match_ind = np.array([])
else:
dm = 1.003355
mz_theoretic = mz_mono + np.arange(n_isotopes) * dm / q
closest_ind = find_closest(mz, mz_theoretic)
match_ind = np.where(np.abs(mz[closest_ind] - mz_theoretic) <= tol)[0]
match_ind = closest_ind[match_ind]
return match_ind
def _find_isotopic_distribution(mz: np.ndarray, mz_mono: float,
q_max: int, n_isotopes: int,
tol: float):
"""
Finds the isotopic distribution within charge lower than q_max.
Isotopes are searched based on the assumption that the mass difference
is due to the presence of a 13C atom. If multiple charge states are
compatible with an isotopic distribution, the charge state with the largest
number of isotopes detected is kept.
Parameters
----------
mz : numpy.ndarray
List of peaks
mz_mono : float
Monoisotopic mass
q_max : int
max charge to analyze
n_isotopes : int
Number of isotopes to search in the distribution
tol : float
Mass tolerance, in absolute units
Returns
-------
best_peaks: numpy.ndarray
"""
# TODO: Remove this function when the isotope finder module is added.
best_peaks = np.array([], dtype=int)
n_peaks = 0
for q in range(1, q_max + 1):
tmp = _find_isotopic_distribution_aux(mz, mz_mono, q,
n_isotopes, tol)
if tmp.size > n_peaks:
best_peaks = tmp
return best_peaks
class Chromatogram:
"""
Representation of a chromatogram. Manages plotting and peak picking.
Attributes
----------
rt : array
retention time in each scan.
spint : array
intensity in each scan.
mode : str
used to set default parameter for peak picking.
"""
def __init__(self, rt: np.ndarray, spint: np.ndarray,
mode: Optional[str] = None):
"""
Constructor of the Chromatogram.
Parameters
----------
spint : array of non negative numbers.
Intensity values of each scan
rt : array of positive numbers.
Retention time values.
mode : {"uplc", "hplc"}, optional
used to set default parameters in peak picking. If None, `mode` is
set to uplc.
"""
if mode is None:
mode = "uplc"
self.mode = mode
self.rt = rt
self.spint = spint
self.peaks = None
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
valid_values = ["uplc", "hplc"]
if value in valid_values:
self._mode = value
else:
msg = "mode must be one of {}".format(valid_values)
raise ValueError(msg)
def find_peaks(self, cwt_params: Optional[dict] = None) -> dict:
"""
Find peaks with the modified version of the cwt algorithm described in
the centWave algorithm. Peaks are added to the peaks
attribute of the Chromatogram object.
Parameters
----------
cwt_params : dict
key-value parameters to overwrite the defaults in the pick_cwt
function. The default are obtained using the mode attribute.
Returns
-------
params : dict
dictionary of peak parameters
See Also
--------
peaks.detect_peaks : peak detection using the CWT algorithm.
lcms.get_lc_cwt_params : set default parameters for pick_cwt.
"""
default_params = get_lc_cwt_params(self.mode)
if cwt_params:
default_params.update(cwt_params)
widths = make_widths_lc(self.mode)
peak_list, peak_params = \
peaks.detect_peaks(self.rt, self.spint, widths, **default_params)
self.peaks = peak_list
return peak_params
def plot(self, draw: bool = True, fig_params: Optional[dict] = None,
line_params: Optional[dict] = None) -> bokeh.plotting.Figure:
"""
Plot the chromatogram.
Parameters
----------
draw : bool, optional
if True run bokeh show function.
fig_params : dict
key-value parameters to pass into bokeh figure function.
line_params : dict
key-value parameters to pass into bokeh line function.
Returns
-------
bokeh Figure
"""
default_line_params = {"line_width": 1, "line_color": "black",
"alpha": 0.8}
cmap = Set3[12]
if line_params is None:
line_params = default_line_params
else:
for params in line_params:
default_line_params[params] = line_params[params]
line_params = default_line_params
default_fig_params = {"aspect_ratio": 1.5}
if fig_params is None:
fig_params = default_fig_params
else:
default_fig_params.update(fig_params)
fig_params = default_fig_params
fig = bokeh.plotting.figure(**fig_params)
fig.line(self.rt, self.spint, **line_params)
if self.peaks:
for k, peak in enumerate(self.peaks):
fig.varea(self.rt[peak.start:(peak.end + 1)],
self.spint[peak.start:(peak.end + 1)], 0,
fill_alpha=0.8, fill_color=cmap[k % 12])
# k % 12 is used to cycle over the colormap
# figure appearance
fig.xaxis.axis_label = "Rt [s]"
fig.yaxis.axis_label = "intensity [au]"
fig.yaxis.axis_label_text_font_style = "bold"
fig.yaxis.formatter.precision = 2
fig.xaxis.axis_label_text_font_style = "bold"
if draw:
bokeh.plotting.show(fig)
return fig
class MSSpectrum:
"""
Representation of a Mass Spectrum in profile mode. Manages conversion to
centroids and plotting of data.
Attributes
----------
mz : array of m/z values
spint : array of intensity values.
mode : str
MS instrument type. Used to set default values in peak picking.
"""
def __init__(self, mz: np.ndarray, spint: np.ndarray,
mode: Optional[str] = None):
"""
Constructor of the MSSpectrum.
Parameters
----------
mz: array
m/z values.
spint: array
intensity values.
"""
self.mz = mz
self.spint = spint
self.peaks = None
if mode is None:
mode = "qtof"
self.mode = mode
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
valid_values = ["qtof", "orbitrap"]
if value in valid_values:
self._mode = value
else:
msg = "mode must be one of {}".format(valid_values)
raise ValueError(msg)
def find_centroids(self, snr: Optional[float] = None,
min_distance: Optional[float] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
r"""
Find centroids in the spectrum.
Centroids are found as local maxima above a noise value. See notes for
implementation details.
Parameters
----------
snr : positive number, optional
Minimum signal to noise ratio of the peaks. Overwrites values
set by mode. Default value is 10
min_distance : positive number, optional
Minimum distance between consecutive peaks. If None, sets the value
to 0.01 if the `mode` attribute is qtof. If the `mode` is orbitrap,
sets the value to 0.005
Returns
-------
centroids : array of peak centroids
area : array of peak area
centroid_index : index of the centroids in `mz`
Notes
-----
Peaks are found as local maxima in the signal. To remove low intensity
values, a baseline and noise is estimated assuming that y has additive
contributions from signal, baseline and noise:
.. math::
y[n] = s[n] + b[n] + \epsilon
Where :math:`\epsilon \sim N(0, \sigma)`. A peak is valid only if
.. math::
\frac{y[n_{peak}] - b[n_{peak}]}{\sigma} \geq SNR
The extension of the peak is computed as the closest minimum to the
peak. If two peaks are closer than `min_distance`, the peaks are merged.
"""
params = _get_find_centroid_params(self.mode)
if min_distance is not None:
params["min_distance"] = min_distance
if snr is not None:
params["snr"] = snr
centroids, area, centroid_index = \
peaks.find_centroids(self.mz, self.spint, **params)
return centroids, area, centroid_index
def plot(self, draw: bool = True, fig_params: Optional[dict] = None,
line_params: Optional[dict] = None) -> bokeh.plotting.Figure:
"""
Plot the spectrum.
Parameters
----------
draw : bool, optional
if True run bokeh show function.
fig_params : dict
key-value parameters to pass into bokeh figure function.
line_params : dict
key-value parameters to pass into bokeh line function.
Returns
-------
bokeh Figure
"""
default_line_params = {"line_width": 1, "line_color": "black",
"alpha": 0.8}
cmap = Set3[12]
if line_params is None:
line_params = default_line_params
else:
for params in line_params:
default_line_params[params] = line_params[params]
line_params = default_line_params
default_fig_params = {"aspect_ratio": 1.5}
if fig_params is None:
fig_params = default_fig_params
else:
default_fig_params.update(fig_params)
fig_params = default_fig_params
fig = bokeh.plotting.figure(**fig_params)
fig.line(self.mz, self.spint, **line_params)
if self.peaks:
for k, peak in enumerate(self.peaks):
fig.varea(self.mz[peak.start:(peak.end + 1)],
self.spint[peak.start:(peak.end + 1)], 0,
fill_alpha=0.8, fill_color=cmap[k % 12])
# k % 12 is used to cycle over the colormap
# figure appearance
fig.xaxis.axis_label = "m/z"
fig.yaxis.axis_label = "intensity [au]"
fig.yaxis.axis_label_text_font_style = "bold"
fig.yaxis.formatter.precision = 2
fig.xaxis.axis_label_text_font_style = "bold"
if draw:
bokeh.plotting.show(fig)
return fig
_TempRoi = namedtuple("TempRoi", ["mz", "sp", "scan"])
def _make_empty_temp_roi():
return _TempRoi(mz=list(), sp=list(), scan=list())
class Roi(Chromatogram):
"""
mz traces where a chromatographic peak may be found. Subclassed from
Chromatogram. To be used with the detect_features method of MSData.
Attributes
----------
rt : array
retention time in each scan.
spint : array
intensity in each scan.
mz : array
m/z in each scan.
first_scan : int
first scan in the raw data where the ROI was detected.
"""
def __init__(self, spint: np.ndarray, mz: np.ndarray, rt: np.ndarray,
first_scan: int, mode: Optional[str] = None):
super(Roi, self).__init__(rt, spint, mode=mode)
self.mz = mz
self.first_scan = first_scan
def fill_nan(self):
"""
fill missing intensity values using linear interpolation.
"""
# if the first or last values are missing, assign an intensity value
# of zero. This prevents errors in the interpolation and makes peak
# picking to work better.
if np.isnan(self.spint[0]):
self.spint[0] = 0
if np.isnan(self.spint[-1]):
self.spint[-1] = 0
missing = np.isnan(self.spint)
interpolator = interp1d(self.rt[~missing], self.spint[~missing])
mz_mean = np.nanmean(self.mz)
self.mz[missing] = mz_mean
self.spint[missing] = interpolator(self.rt[missing])
def get_peaks_mz(self):
"""
Computes the weighted mean of the m/z for each peak and the m/z
standard deviation
Returns
-------
mean_mz : array
mz_std : array
"""
mz_std = np.zeros(len(self.peaks))
mz_mean = np.zeros(len(self.peaks))
for k, peak in enumerate(self.peaks):
peak_mz = self.mz[peak.start:peak.end + 1]
peak_spint = self.spint[peak.start:peak.end + 1]
mz_mean[k] = np.average(peak_mz, weights=peak_spint)
mz_std[k] = peak_mz.std()
return mz_mean, mz_std
class _RoiProcessor:
"""
Class used by make_roi function to generate Roi instances.
Attributes
----------
mz_mean: numpy.ndarray
mean value of mz for a given row in mz_array. Used to add new values
based on a tolerance. its updated after adding a new column
n_missing: numpy.ndarray
number of consecutive missing values. Used to detect finished rois
roi: list[ROI]
"""
def __init__(self, mz_seed: np.ndarray, max_missing: int = 1,
min_length: int = 5, min_intensity: float = 0,
tolerance: float = 0.005, multiple_match: str = "closest",
mz_reduce: Union[str, Callable] = "mean",
sp_reduce: Union[str, Callable] = "sum",
mode: Optional[str] = None):
"""
Parameters
----------
mz_seed: numpy.ndarray
initial values to build rois
max_missing: int
maximum number of missing consecutive values. when a row surpass
this number the roi is flagged as finished.
min_length: int
The minimum length of a finished roi to be considered valid before
being added to the roi list.
min_intensity: float
tolerance: float
mz tolerance used to connect values.
multiple_match: {"closest", "reduce"}
how to match peaks when there is more than one match. If mode is
`closest`, then the closest peak is assigned as a match and the
others are assigned to no match. If mode is `reduce`, then a unique
mz and intensity value is generated using the reduce function in
`mz_reduce` and `spint_reduce` respectively.
mz_reduce: str or callable
function used to reduce mz values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. See the following prototype:
def mz_reduce(mz_match: np.ndarray) -> float:
pass
sp_reduce: str or callable
function used to reduce spint values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. To use custom functions see the prototype shown on
`mz_reduce`.
mode: str, optional
Mode used to create ROI.
"""
if len(mz_seed.shape) != 1:
msg = "array must be a vector"
raise ValueError(msg)
if multiple_match not in ["closest", "reduce"]:
msg = "Valid modes are closest or reduce"
raise ValueError(msg)
if mz_reduce == "mean":
self._mz_reduce = np.mean
else:
self._mz_reduce = mz_reduce
if sp_reduce == "mean":
self._spint_reduce = np.mean
elif sp_reduce == "sum":
self._spint_reduce = np.sum
else:
self._spint_reduce = sp_reduce
self.mz_mean = mz_seed.copy()
self.roi_index = np.arange(mz_seed.size)
self.n_missing = np.zeros_like(mz_seed, dtype=int)
self.max_intensity = np.zeros_like(mz_seed)
self.length = np.zeros_like(mz_seed, dtype=int)
self.index = 0
self.temp_roi_dict = {x: _make_empty_temp_roi() for x in self.roi_index}
self.roi = list()
self.min_intensity = min_intensity
self.max_missing = max_missing
self.min_length = min_length
self.tolerance = tolerance
self.multiple_match = multiple_match
self.mode = mode
def add(self, mz: np.ndarray, sp: np.ndarray, targeted: bool = False):
"""
Adds new mz and spint values to temporal roi.
"""
# find matching values
match_index, mz_match, sp_match, mz_no_match, sp_no_match = \
_match_mz(self.mz_mean, mz, sp, self.tolerance,
self.multiple_match, self._mz_reduce, self._spint_reduce)
for k, k_mz, k_sp in zip(match_index, mz_match, sp_match):
k_temp_roi = self.temp_roi_dict[self.roi_index[k]]
k_temp_roi.mz.append(k_mz)
k_temp_roi.sp.append(k_sp)
k_temp_roi.scan.append(self.index)
# update mz_mean and missing values
updated_mean = ((self.mz_mean[match_index] * self.length[match_index]
+ mz_match) / (self.length[match_index] + 1))
self.length[match_index] += 1
self.n_missing += 1
self.n_missing[match_index] = 0
self.max_intensity[match_index] = \
np.maximum(self.max_intensity[match_index], sp_match)
if not targeted:
self.mz_mean[match_index] = updated_mean
self.extend(mz_no_match, sp_no_match)
self.index += 1
def append_to_roi(self, rt: np.ndarray, targeted: bool = False):
"""
Remove completed ROI. Valid ROI are appended toi roi attribute.
"""
# check completed rois
is_completed = self.n_missing > self.max_missing
# the most common case are short rois that must be discarded
is_valid_roi = ((self.length >= self.min_length) &
(self.max_intensity >= self.min_intensity))
# add completed roi
completed_index = np.where(is_completed)[0]
for ind in completed_index:
roi_ind = self.roi_index[ind]
finished_roi = self.temp_roi_dict.pop(roi_ind)
if is_valid_roi[ind]:
roi = tmp_roi_to_roi(finished_roi, rt, mode=self.mode)
self.roi.append(roi)
if targeted:
self.n_missing[is_completed] = 0
self.length[is_completed] = 0
self.max_intensity[is_completed] = 0
max_roi_ind = self.roi_index.max()
n_completed = is_completed.sum()
new_indices = np.arange(max_roi_ind + 1,
max_roi_ind + 1 + n_completed)
self.roi_index[is_completed] = new_indices
new_tmp_roi = {k: _make_empty_temp_roi() for k in new_indices}
self.temp_roi_dict.update(new_tmp_roi)
else:
self.mz_mean = self.mz_mean[~is_completed]
self.n_missing = self.n_missing[~is_completed]
self.length = self.length[~is_completed]
self.roi_index = self.roi_index[~is_completed]
self.max_intensity = self.max_intensity[~is_completed]
def extend(self, mz: np.ndarray, sp: np.ndarray):
"""adds new mz values to mz_mean"""
max_index = self.roi_index.max()
new_indices = np.arange(mz.size) + max_index + 1
mz_mean_tmp = np.hstack((self.mz_mean, mz))
roi_index_tmp = np.hstack((self.roi_index, new_indices))
sorted_index = np.argsort(mz_mean_tmp)
n_missing_tmp = np.zeros_like(new_indices, dtype=int)
n_missing_tmp = np.hstack((self.n_missing, n_missing_tmp))
length_tmp = np.ones_like(new_indices, dtype=int)
length_tmp = np.hstack((self.length, length_tmp))
max_int_tmp = np.zeros_like(new_indices, dtype=float)
max_int_tmp = np.hstack((self.max_intensity, max_int_tmp))
for k_index, k_mz, k_sp in zip(new_indices, mz, sp):
new_roi = _TempRoi(mz=[k_mz], sp=[k_sp], scan=[self.index])
self.temp_roi_dict[k_index] = new_roi
self.mz_mean = mz_mean_tmp[sorted_index]
self.roi_index = roi_index_tmp[sorted_index]
self.n_missing = n_missing_tmp[sorted_index]
self.length = length_tmp[sorted_index]
self.max_intensity = max_int_tmp[sorted_index]
def flag_as_completed(self):
self.n_missing[:] = self.max_missing + 1
def _compare_max(old: np.ndarray, new: np.ndarray) -> np.ndarray:
"""
returns the element-wise maximum between old and new
Parameters
----------
old: numpy.ndarray
new: numpy.ndarray
can have nan
Returns
-------
numpy.ndarray
"""
new[np.isnan(new)] = 0
return np.maximum(old, new)
def _match_mz(mz1: np.ndarray, mz2: np.ndarray, sp2: np.ndarray,
tolerance: float, mode: str, mz_reduce: Callable,
sp_reduce: Callable):
"""
aux function to add method in _RoiProcessor. Find matched values.
Parameters
----------
mz1: numpy.ndarray
_RoiProcessor mz_mean
mz2: numpy.ndarray
mz values to match
sp2: numpy.ndarray
intensity values associated to mz2
tolerance: float
tolerance used to match values
mode: {"closest", "merge"}
Behaviour when more more than one peak in mz2 matches with a given peak
in mz1. If mode is `closest`, then the closest peak is assigned as a
match and the others are assigned to no match. If mode is `merge`, then
a unique mz and int value is generated using the average of the mz and
the sum of the intensities.
Returns
------
match_index: numpy.ndarray
index when of peaks matching in mz1.
mz_match: numpy.ndarray
values of mz2 that matches with mz1
sp_match: numpy.ndarray
values of sp2 that matches with mz1
mz_no_match: numpy.ndarray
sp_no_match: numpy.ndarray
"""
closest_index = find_closest(mz1, mz2)
dmz = np.abs(mz1[closest_index] - mz2)
match_mask = (dmz <= tolerance)
no_match_mask = ~match_mask
match_index = closest_index[match_mask]
# check multiple_matches
unique, first_index, count_index = np.unique(match_index,
return_counts=True,
return_index=True)
# set match values
match_index = unique
sp_match = sp2[match_mask][first_index]
mz_match = mz2[match_mask][first_index]
# compute matches for duplicates
multiple_match_mask = count_index > 1
first_index = first_index[multiple_match_mask]
if first_index.size > 0:
first_index_index = np.where(count_index > 1)[0]
count_index = count_index[multiple_match_mask]
iterator = zip(first_index_index, first_index, count_index)
if mode == "closest":
rm_index = list() # list of duplicate index to remove
mz_replace = list()
spint_replace = list()
for first_ind, index, count in iterator:
# check which of the duplicate is closest, the rest are removed
closest = \
np.argmin(dmz[match_mask][index:(index + count)]) + index
mz_replace.append(mz2[match_mask][closest])
spint_replace.append(sp2[match_mask][closest])
remove = np.arange(index, index + count)
remove = np.setdiff1d(remove, closest)
rm_index.extend(remove)
# fix rm_index to full mz2 size
rm_index = np.where(match_mask)[0][rm_index]
no_match_mask[rm_index] = True
mz_match[first_index_index] = mz_replace
sp_match[first_index_index] = spint_replace
elif mode == "reduce":
for first_ind, index, count in iterator:
# check which of the duplicate is closest
mz_multiple_match = mz2[match_mask][index:(index + count)]
sp_multiple_match = sp2[match_mask][index:(index + count)]
mz_match[first_ind] = mz_reduce(mz_multiple_match)
sp_match[first_ind] = sp_reduce(sp_multiple_match)
else:
msg = "mode must be `closest` or `merge`"
raise ValueError(msg)
mz_no_match = mz2[no_match_mask]
sp_no_match = sp2[no_match_mask]
return match_index, mz_match, sp_match, mz_no_match, sp_no_match
def tmp_roi_to_roi(tmp_roi: _TempRoi, rt: np.ndarray,
mode: Optional[str] = None) -> Roi:
first_scan = tmp_roi.scan[0]
last_scan = tmp_roi.scan[-1]
size = last_scan + 1 - first_scan
mz_tmp = np.ones(size) * np.nan
spint_tmp = mz_tmp.copy()
tmp_index = np.array(tmp_roi.scan) - tmp_roi.scan[0]
rt_tmp = rt[first_scan:(last_scan + 1)]
mz_tmp[tmp_index] = tmp_roi.mz
spint_tmp[tmp_index] = tmp_roi.sp
roi = Roi(spint_tmp, mz_tmp, rt_tmp, first_scan, mode=mode)
return roi
def make_roi(ms_experiment: ms_experiment_type, tolerance: float,
max_missing: int, min_length: int, min_intensity: float,
multiple_match: str, targeted_mz: Optional[np.ndarray] = None,
start: Optional[int] = None, end: Optional[int] = None,
mz_reduce: Union[str, Callable] = "mean",
sp_reduce: Union[str, Callable] = "sum",
mode: Optional[str] = None
) -> List[Roi]:
"""
Make Region of interest from MS data in centroid mode.
Used by MSData to as the first step of the centWave algorithm.
Parameters
----------
ms_experiment: pyopenms.MSExperiment
max_missing : int
maximum number of missing consecutive values. when a row surpass this
number the roi is considered as finished and is added to the roi list if
it meets the length and intensity criteria.
min_length : int
The minimum length of a roi to be considered valid.
min_intensity : float
Minimum intensity in a roi to be considered valid.
tolerance : float
mz tolerance to connect values across scans
start : int, optional
First scan to analyze. If None starts at scan 0
end : int, optional
Last scan to analyze. If None, uses the last scan number.
multiple_match : {"closest", "reduce"}
How to match peaks when there is more than one match. If mode is
`closest`, then the closest peak is assigned as a match and the
others are assigned to no match. If mode is `reduce`, then unique
mz and intensity values are generated using the reduce function in
`mz_reduce` and `sp_reduce` respectively.
mz_reduce : "mean" or Callable
function used to reduce mz values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. See the following prototype:
.. code-block:: python
def mz_reduce(mz_match: np.ndarray) -> float:
pass
sp_reduce : {"mean", "sum"} or Callable
function used to reduce intensity values. Can be a function accepting
numpy arrays and returning numbers. Only used when `multiple_match`
is reduce. To use custom functions see the prototype shown on
`mz_reduce`.
targeted_mz : numpy.ndarray, optional
if a list of mz is provided, roi are searched only using this list.
mode : str, optional
mode used to create Roi objects.
Returns
-------
roi: list[Roi]
Notes
-----
To create a ROI, m/z values in consecutive scans are connected if they are
within the tolerance`. If there's more than one possible m/z value to
connect in the next scan, two different strategies are available, using the
`multiple_match` parameter: If "closest" is used, then m/z values are
matched to the closest ones, and the others are used to create new ROI. If
"reduce" is used, then all values within the tolerance are combined. m/z and
intensity values are combined using the `mz_reduce` and `sp_reduce`
parameters respectively. If no matching value has be found in a scan, a NaN
is added to the ROI. If no matching values are found in `max_missing`
consecutive scans the ROI is flagged as finished. In this stage, two
checks are made before the ROI is considered valid:
1. The number of non missing values must be higher than `min_length`.
2. The maximum intensity value in the ROI must be higher than
`min_intensity`.
If the two conditions are meet, the ROI is added to the list of valid ROI.
"""
if start is None:
start = 0
if end is None:
end = ms_experiment.getNrSpectra()
if targeted_mz is None:
mz_seed, _ = ms_experiment.getSpectrum(start).get_peaks()
targeted = False
else:
mz_seed = targeted_mz
targeted = True
size = end - start
rt = np.zeros(size)
processor = _RoiProcessor(mz_seed, max_missing=max_missing,
min_length=min_length,
min_intensity=min_intensity, tolerance=tolerance,
multiple_match=multiple_match,
mz_reduce=mz_reduce, sp_reduce=sp_reduce,
mode=mode)
for k_scan in range(start, end):
sp = ms_experiment.getSpectrum(k_scan)
rt[k_scan - start] = sp.getRT()
mz, spint = sp.get_peaks()
processor.add(mz, spint, targeted=targeted)
processor.append_to_roi(rt, targeted=targeted)
# add roi not completed during last scan
processor.flag_as_completed()
processor.append_to_roi(rt)
return processor.roi
def detect_roi_peaks(roi: List[Roi],
cwt_params: Optional[dict] = None) -> pd.DataFrame:
if cwt_params is None:
cwt_params = dict()
roi_index_list = list()
peak_index_list = list()
mz_mean_list = list()
mz_std_list = list()
peak_params = list()
for roi_index, k_roi in enumerate(roi):
k_roi.fill_nan()
k_params = k_roi.find_peaks(cwt_params=cwt_params)
n_features = len(k_params)
peak_params.extend(k_params)
k_mz_mean, k_mz_std = k_roi.get_peaks_mz()
roi_index_list.append([roi_index] * n_features)
peak_index_list.append(range(n_features))
mz_mean_list.append(k_mz_mean)
mz_std_list.append(k_mz_std)
roi_index_list = np.hstack(roi_index_list)
peak_index_list = np.hstack(peak_index_list)
mz_mean_list = np.hstack(mz_mean_list)
mz_std_list = np.hstack(mz_std_list)
peak_params = pd.DataFrame(data=peak_params)
peak_params = peak_params.rename(columns={"loc": "rt"})
peak_params["mz"] = mz_mean_list
peak_params["mz std"] = mz_std_list
peak_params["roi index"] = roi_index_list
peak_params["peak index"] = peak_index_list
peak_params = peak_params.dropna(axis=0)
return peak_params
# TODO: test ROI, test roi processor, test make roi
| [
"numpy.hstack",
"scipy.interpolate.interp1d",
"numpy.argsort",
"numpy.array",
"numpy.nanmean",
"pyopenms.OnDiscMSExperiment",
"numpy.arange",
"pyopenms.MzMLFile",
"pyopenms.MSExperiment",
"numpy.searchsorted",
"numpy.where",
"numpy.diff",
"numpy.linspace",
"numpy.vstack",
"pandas.DataFra... | [((23814, 23857), 'collections.namedtuple', 'namedtuple', (['"""TempRoi"""', "['mz', 'sp', 'scan']"], {}), "('TempRoi', ['mz', 'sp', 'scan'])\n", (23824, 23857), False, 'from collections import namedtuple\n'), ((3203, 3235), 'numpy.zeros', 'np.zeros', (['(mz.size, end - start)'], {}), '((mz.size, end - start))\n', (3211, 3235), True, 'import numpy as np\n'), ((3245, 3266), 'numpy.zeros', 'np.zeros', (['(end - start)'], {}), '(end - start)\n', (3253, 3266), True, 'import numpy as np\n'), ((5701, 5730), 'numpy.zeros_like', 'np.zeros_like', (['accumulated_mz'], {}), '(accumulated_mz)\n', (5714, 5730), True, 'import numpy as np\n'), ((6787, 6820), 'numpy.arange', 'np.arange', (['mz_min', 'mz_max', 'mz_res'], {}), '(mz_min, mz_max, mz_res)\n', (6796, 6820), True, 'import numpy as np\n'), ((14128, 14151), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (14136, 14151), True, 'import numpy as np\n'), ((34031, 34051), 'numpy.maximum', 'np.maximum', (['old', 'new'], {}), '(old, new)\n', (34041, 34051), True, 'import numpy as np\n'), ((35307, 35339), 'numpy.abs', 'np.abs', (['(mz1[closest_index] - mz2)'], {}), '(mz1[closest_index] - mz2)\n', (35313, 35339), True, 'import numpy as np\n'), ((35521, 35582), 'numpy.unique', 'np.unique', (['match_index'], {'return_counts': '(True)', 'return_index': '(True)'}), '(match_index, return_counts=True, return_index=True)\n', (35530, 35582), True, 'import numpy as np\n'), ((42278, 42292), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (42286, 42292), True, 'import numpy as np\n'), ((43824, 43849), 'numpy.hstack', 'np.hstack', (['roi_index_list'], {}), '(roi_index_list)\n', (43833, 43849), True, 'import numpy as np\n'), ((43872, 43898), 'numpy.hstack', 'np.hstack', (['peak_index_list'], {}), '(peak_index_list)\n', (43881, 43898), True, 'import numpy as np\n'), ((43918, 43941), 'numpy.hstack', 'np.hstack', (['mz_mean_list'], {}), '(mz_mean_list)\n', (43927, 43941), True, 'import numpy as np\n'), ((43960, 43982), 'numpy.hstack', 'np.hstack', (['mz_std_list'], {}), '(mz_std_list)\n', (43969, 43982), True, 'import numpy as np\n'), ((44002, 44032), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'peak_params'}), '(data=peak_params)\n', (44014, 44032), True, 'import pandas as pd\n'), ((1474, 1497), 'pyopenms.MSExperiment', 'pyopenms.MSExperiment', ([], {}), '()\n', (1495, 1497), False, 'import pyopenms\n'), ((2731, 2743), 'numpy.array', 'np.array', (['mz'], {}), '(mz)\n', (2739, 2743), True, 'import numpy as np\n'), ((3501, 3537), 'numpy.searchsorted', 'np.searchsorted', (['mz_sp', 'mz_intervals'], {}), '(mz_sp, mz_intervals)\n', (3516, 3537), True, 'import numpy as np\n'), ((6049, 6098), 'numpy.searchsorted', 'np.searchsorted', (['accumulated_mz', '[mz_min, mz_max]'], {}), '(accumulated_mz, [mz_min, mz_max])\n', (6064, 6098), True, 'import numpy as np\n'), ((6122, 6160), 'scipy.interpolate.interp1d', 'interp1d', (['mz_scan', 'int_scan'], {'kind': 'kind'}), '(mz_scan, int_scan, kind=kind)\n', (6130, 6160), False, 'from scipy.interpolate import interp1d\n'), ((7225, 7242), 'numpy.hstack', 'np.hstack', (['widths'], {}), '(widths)\n', (7234, 7242), True, 'import numpy as np\n'), ((12848, 12860), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12856, 12860), True, 'import numpy as np\n'), ((24956, 24979), 'numpy.isnan', 'np.isnan', (['self.spint[0]'], {}), '(self.spint[0])\n', (24964, 24979), True, 'import numpy as np\n'), ((25022, 25046), 'numpy.isnan', 'np.isnan', (['self.spint[-1]'], {}), '(self.spint[-1])\n', (25030, 25046), True, 'import numpy as np\n'), ((25098, 25118), 'numpy.isnan', 'np.isnan', (['self.spint'], {}), '(self.spint)\n', (25106, 25118), True, 'import numpy as np\n'), ((25142, 25191), 'scipy.interpolate.interp1d', 'interp1d', (['self.rt[~missing]', 'self.spint[~missing]'], {}), '(self.rt[~missing], self.spint[~missing])\n', (25150, 25191), False, 'from scipy.interpolate import interp1d\n'), ((25210, 25229), 'numpy.nanmean', 'np.nanmean', (['self.mz'], {}), '(self.mz)\n', (25220, 25229), True, 'import numpy as np\n'), ((29023, 29046), 'numpy.arange', 'np.arange', (['mz_seed.size'], {}), '(mz_seed.size)\n', (29032, 29046), True, 'import numpy as np\n'), ((29072, 29105), 'numpy.zeros_like', 'np.zeros_like', (['mz_seed'], {'dtype': 'int'}), '(mz_seed, dtype=int)\n', (29085, 29105), True, 'import numpy as np\n'), ((29135, 29157), 'numpy.zeros_like', 'np.zeros_like', (['mz_seed'], {}), '(mz_seed)\n', (29148, 29157), True, 'import numpy as np\n'), ((29180, 29213), 'numpy.zeros_like', 'np.zeros_like', (['mz_seed'], {'dtype': 'int'}), '(mz_seed, dtype=int)\n', (29193, 29213), True, 'import numpy as np\n'), ((30577, 30630), 'numpy.maximum', 'np.maximum', (['self.max_intensity[match_index]', 'sp_match'], {}), '(self.max_intensity[match_index], sp_match)\n', (30587, 30630), True, 'import numpy as np\n'), ((32671, 32700), 'numpy.hstack', 'np.hstack', (['(self.mz_mean, mz)'], {}), '((self.mz_mean, mz))\n', (32680, 32700), True, 'import numpy as np\n'), ((32725, 32765), 'numpy.hstack', 'np.hstack', (['(self.roi_index, new_indices)'], {}), '((self.roi_index, new_indices))\n', (32734, 32765), True, 'import numpy as np\n'), ((32789, 32812), 'numpy.argsort', 'np.argsort', (['mz_mean_tmp'], {}), '(mz_mean_tmp)\n', (32799, 32812), True, 'import numpy as np\n'), ((32837, 32874), 'numpy.zeros_like', 'np.zeros_like', (['new_indices'], {'dtype': 'int'}), '(new_indices, dtype=int)\n', (32850, 32874), True, 'import numpy as np\n'), ((32899, 32941), 'numpy.hstack', 'np.hstack', (['(self.n_missing, n_missing_tmp)'], {}), '((self.n_missing, n_missing_tmp))\n', (32908, 32941), True, 'import numpy as np\n'), ((32963, 32999), 'numpy.ones_like', 'np.ones_like', (['new_indices'], {'dtype': 'int'}), '(new_indices, dtype=int)\n', (32975, 32999), True, 'import numpy as np\n'), ((33021, 33057), 'numpy.hstack', 'np.hstack', (['(self.length, length_tmp)'], {}), '((self.length, length_tmp))\n', (33030, 33057), True, 'import numpy as np\n'), ((33080, 33119), 'numpy.zeros_like', 'np.zeros_like', (['new_indices'], {'dtype': 'float'}), '(new_indices, dtype=float)\n', (33093, 33119), True, 'import numpy as np\n'), ((33142, 33186), 'numpy.hstack', 'np.hstack', (['(self.max_intensity, max_int_tmp)'], {}), '((self.max_intensity, max_int_tmp))\n', (33151, 33186), True, 'import numpy as np\n'), ((34001, 34014), 'numpy.isnan', 'np.isnan', (['new'], {}), '(new)\n', (34009, 34014), True, 'import numpy as np\n'), ((37990, 38003), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (37997, 38003), True, 'import numpy as np\n'), ((38059, 38081), 'numpy.array', 'np.array', (['tmp_roi.scan'], {}), '(tmp_roi.scan)\n', (38067, 38081), True, 'import numpy as np\n'), ((980, 1009), 'pyopenms.OnDiscMSExperiment', 'pyopenms.OnDiscMSExperiment', ([], {}), '()\n', (1007, 1009), False, 'import pyopenms\n'), ((6752, 6763), 'numpy.diff', 'np.diff', (['mz'], {}), '(mz)\n', (6759, 6763), True, 'import numpy as np\n'), ((7117, 7141), 'numpy.linspace', 'np.linspace', (['(0.25)', '(5)', '(20)'], {}), '(0.25, 5, 20)\n', (7128, 7141), True, 'import numpy as np\n'), ((7143, 7164), 'numpy.linspace', 'np.linspace', (['(6)', '(20)', '(8)'], {}), '(6, 20, 8)\n', (7154, 7164), True, 'import numpy as np\n'), ((7184, 7206), 'numpy.linspace', 'np.linspace', (['(25)', '(60)', '(8)'], {}), '(25, 60, 8)\n', (7195, 7206), True, 'import numpy as np\n'), ((7393, 7410), 'numpy.hstack', 'np.hstack', (['widths'], {}), '(widths)\n', (7402, 7410), True, 'import numpy as np\n'), ((8172, 8206), 'numpy.linspace', 'np.linspace', (['middle', 'max_width', '(10)'], {}), '(middle, max_width, 10)\n', (8183, 8206), True, 'import numpy as np\n'), ((25832, 25871), 'numpy.average', 'np.average', (['peak_mz'], {'weights': 'peak_spint'}), '(peak_mz, weights=peak_spint)\n', (25842, 25871), True, 'import numpy as np\n'), ((31290, 31312), 'numpy.where', 'np.where', (['is_completed'], {}), '(is_completed)\n', (31298, 31312), True, 'import numpy as np\n'), ((31870, 31927), 'numpy.arange', 'np.arange', (['(max_roi_ind + 1)', '(max_roi_ind + 1 + n_completed)'], {}), '(max_roi_ind + 1, max_roi_ind + 1 + n_completed)\n', (31879, 31927), True, 'import numpy as np\n'), ((36006, 36031), 'numpy.where', 'np.where', (['(count_index > 1)'], {}), '(count_index > 1)\n', (36014, 36031), True, 'import numpy as np\n'), ((1364, 1387), 'pyopenms.MSExperiment', 'pyopenms.MSExperiment', ([], {}), '()\n', (1385, 1387), False, 'import pyopenms\n'), ((1506, 1525), 'pyopenms.MzMLFile', 'pyopenms.MzMLFile', ([], {}), '()\n', (1523, 1525), False, 'import pyopenms\n'), ((3109, 3146), 'numpy.vstack', 'np.vstack', (['(mz - window, mz + window)'], {}), '((mz - window, mz + window))\n', (3118, 3146), True, 'import numpy as np\n'), ((3857, 3888), 'numpy.add.reduceat', 'np.add.reduceat', (['int_sp', 'ind_sp'], {}), '(int_sp, ind_sp)\n', (3872, 3888), True, 'import numpy as np\n'), ((7286, 7308), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(20)'], {}), '(1, 10, 20)\n', (7297, 7308), True, 'import numpy as np\n'), ((7310, 7332), 'numpy.linspace', 'np.linspace', (['(10)', '(30)', '(8)'], {}), '(10, 30, 8)\n', (7321, 7332), True, 'import numpy as np\n'), ((7352, 7374), 'numpy.linspace', 'np.linspace', (['(40)', '(90)', '(8)'], {}), '(40, 90, 8)\n', (7363, 7374), True, 'import numpy as np\n'), ((8107, 8141), 'numpy.linspace', 'np.linspace', (['min_width', 'middle', '(20)'], {}), '(min_width, middle, 20)\n', (8118, 8141), True, 'import numpy as np\n'), ((32614, 32632), 'numpy.arange', 'np.arange', (['mz.size'], {}), '(mz.size)\n', (32623, 32632), True, 'import numpy as np\n'), ((36710, 36741), 'numpy.arange', 'np.arange', (['index', '(index + count)'], {}), '(index, index + count)\n', (36719, 36741), True, 'import numpy as np\n'), ((36767, 36796), 'numpy.setdiff1d', 'np.setdiff1d', (['remove', 'closest'], {}), '(remove, closest)\n', (36779, 36796), True, 'import numpy as np\n'), ((12926, 12947), 'numpy.arange', 'np.arange', (['n_isotopes'], {}), '(n_isotopes)\n', (12935, 12947), True, 'import numpy as np\n'), ((13039, 13077), 'numpy.abs', 'np.abs', (['(mz[closest_ind] - mz_theoretic)'], {}), '(mz[closest_ind] - mz_theoretic)\n', (13045, 13077), True, 'import numpy as np\n'), ((36504, 36551), 'numpy.argmin', 'np.argmin', (['dmz[match_mask][index:index + count]'], {}), '(dmz[match_mask][index:index + count])\n', (36513, 36551), True, 'import numpy as np\n'), ((36904, 36924), 'numpy.where', 'np.where', (['match_mask'], {}), '(match_mask)\n', (36912, 36924), True, 'import numpy as np\n'), ((1400, 1419), 'pyopenms.MzMLFile', 'pyopenms.MzMLFile', ([], {}), '()\n', (1417, 1419), False, 'import pyopenms\n')] |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
gmm = __import__('11-gmm').gmm
if __name__ == '__main__':
np.random.seed(11)
a = np.random.multivariate_normal([30, 40], [[75, 5], [5, 75]], size=10000)
b = np.random.multivariate_normal([5, 25], [[16, 10], [10, 16]], size=750)
c = np.random.multivariate_normal([60, 30], [[16, 0], [0, 16]], size=750)
d = np.random.multivariate_normal([20, 70], [[35, 10], [10, 35]], size=1000)
X = np.concatenate((a, b, c, d), axis=0)
np.random.shuffle(X)
pi, m, S, clss, bic = gmm(X, 4)
print(pi)
print(m)
print(S)
print(bic)
plt.scatter(X[:, 0], X[:, 1], s=10, c=clss)
plt.scatter(m[:, 0], m[:, 1], s=50, marker='*', c=list(range(4)))
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.random.multivariate_normal",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.concatenate",
"numpy.random.shuffle"
] | [((137, 155), 'numpy.random.seed', 'np.random.seed', (['(11)'], {}), '(11)\n', (151, 155), True, 'import numpy as np\n'), ((164, 235), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[30, 40]', '[[75, 5], [5, 75]]'], {'size': '(10000)'}), '([30, 40], [[75, 5], [5, 75]], size=10000)\n', (193, 235), True, 'import numpy as np\n'), ((244, 314), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[5, 25]', '[[16, 10], [10, 16]]'], {'size': '(750)'}), '([5, 25], [[16, 10], [10, 16]], size=750)\n', (273, 314), True, 'import numpy as np\n'), ((323, 392), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[60, 30]', '[[16, 0], [0, 16]]'], {'size': '(750)'}), '([60, 30], [[16, 0], [0, 16]], size=750)\n', (352, 392), True, 'import numpy as np\n'), ((401, 473), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[20, 70]', '[[35, 10], [10, 35]]'], {'size': '(1000)'}), '([20, 70], [[35, 10], [10, 35]], size=1000)\n', (430, 473), True, 'import numpy as np\n'), ((482, 518), 'numpy.concatenate', 'np.concatenate', (['(a, b, c, d)'], {'axis': '(0)'}), '((a, b, c, d), axis=0)\n', (496, 518), True, 'import numpy as np\n'), ((523, 543), 'numpy.random.shuffle', 'np.random.shuffle', (['X'], {}), '(X)\n', (540, 543), True, 'import numpy as np\n'), ((640, 683), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'s': '(10)', 'c': 'clss'}), '(X[:, 0], X[:, 1], s=10, c=clss)\n', (651, 683), True, 'import matplotlib.pyplot as plt\n'), ((758, 768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (766, 768), True, 'import matplotlib.pyplot as plt\n')] |
import argparse
import copy
import math
import numpy as np
import pygame
from pygame.locals import *
from timeit import default_timer as timer
import traceback
import os
from minos.lib import common
from minos.config.sim_args import parse_sim_args
from minos.lib.Simulator import Simulator
from minos.lib.util.ActionTraces import ActionTraces
from minos.lib.util.StateSet import StateSet
from minos.lib.util.VideoWriter import VideoWriter
import random,time
REPLAY_MODES = ['actions', 'positions']
VIDEO_WRITER = None
TMP_SURFS = {}
def blit_img_to_surf(img, surf, position=(0, 0), surf_key='*'):
global TMP_SURFS
if len(img.shape) == 2: # gray (y)
img = np.dstack([img, img, img, np.ones(img.shape, dtype=np.uint8)*255]) # y -> yyy1
else:
img = img[:, :, [2, 1, 0, 3]] # bgra -> rgba
img_shape = (img.shape[0], img.shape[1])
TMP_SURF = TMP_SURFS.get(surf_key)
if not TMP_SURF or TMP_SURF.get_size() != img_shape:
# print('create new surf %dx%d' % img_shape)
TMP_SURF = pygame.Surface(img_shape, 0, 32)
TMP_SURFS[surf_key] = TMP_SURF
bv = TMP_SURF.get_view("0")
bv.write(img.tostring())
del bv
surf.blit(TMP_SURF, position)
def display_episode_info(episode_info, display_surf, camera_outputs, show_goals=False):
displayed = episode_info.get('displayed',0)
if displayed < 1:
print('episode_info', {k: episode_info[k] for k in episode_info if k != 'goalObservations'})
if show_goals and 'goalObservations' in episode_info:
# NOTE: There can be multiple goals with separate goal observations for each
# We currently just handle one
goalObservations = episode_info['goalObservations']
if len(goalObservations) > 0:
# Call display_response but not write to video
display_response(goalObservations[0], display_surf, camera_outputs, print_observation=False, write_video=False)
episode_info['displayed'] = displayed + 1
def draw_forces(forces, display_surf, area):
r = 5
size = round(0.45*min(area.width, area.height)-r)
center = area.center
pygame.draw.rect(display_surf, (0,0,0), area, 0) # fill with black
# assume forces are radially positioned evenly around agent
# TODO: Actual get force sensor positions and visualize them
dt = -2*math.pi/forces.shape[0]
theta = math.pi/2
for i in range(forces.shape[0]):
x = round(center[0] + math.cos(theta)*size)
y = round(center[1] + math.sin(theta)*size)
width = 0 if forces[i] else 1
pygame.draw.circle(display_surf, (255,255,0), (x,y), r, width)
theta += dt
def draw_offset(offset, display_surf, area, color=(0,0,255)):
dir = (offset[0], offset[2])
mag = math.sqrt(dir[0]*dir[0] + dir[1]*dir[1])
if mag:
dir = (dir[0]/mag, dir[1]/mag)
size = round(0.45*min(area.width, area.height))
center = area.center
target = (round(center[0]+dir[0]*size), round(center[1]+dir[1]*size))
pygame.draw.rect(display_surf, (0,0,0), area, 0) # fill with black
pygame.draw.circle(display_surf, (255,255,255), center, size, 0)
pygame.draw.line(display_surf, color, center, target, 1)
pygame.draw.circle(display_surf, color, target, 4, 0)
def display_response(response, display_surf, camera_outputs, print_observation=False, write_video=False):
global VIDEO_WRITER
observation = response.get('observation')
sensor_data = observation.get('sensors')
measurements = observation.get('measurements')
def printable(x): return type(x) is not bytearray and type(x) is not np.ndarray
if observation is not None and print_observation:
simple_observations = {k: v for k, v in observation.items() if k not in ['measurements', 'sensors']}
dicts = [simple_observations, observation.get('measurements'), observation.get('sensors')]
for d in dicts:
for k, v in d.items():
if type(v) is not dict:
info = '%s: %s' % (k,v)
print(info[:75] + (info[75:] and '..'))
else:
print('%s: %s' % (k, str({i: v[i] for i in v if printable(v[i])})))
if 'forces' in sensor_data:
print('forces: %s' % str(sensor_data['forces']['data']))
if 'info' in response:
print('info: %s' % str(response['info']))
if 'offset' in camera_outputs:
draw_offset(measurements.get('offset_to_goal'), display_surf, camera_outputs['offset']['area'])
for obs, config in camera_outputs.items():
if obs not in sensor_data:
continue
if obs == 'forces':
draw_forces(sensor_data['forces']['data'], display_surf, config['area'])
continue
img = sensor_data[obs]['data']
img_viz = sensor_data[obs].get('data_viz')
if obs == 'depth':
img *= (255.0 / img.max()) # naive rescaling for visualization
img = img.astype(np.uint8)
elif img_viz is not None:
img = img_viz
blit_img_to_surf(img, display_surf, config.get('position'))
# TODO: consider support for writing to video of all camera modalities together
if obs == 'color':
if write_video and VIDEO_WRITER:
if len(img.shape) == 2:
VIDEO_WRITER.add_frame(np.dstack([img, img, img])) # yyy
else:
VIDEO_WRITER.add_frame(img[:, :, :-1]) # rgb
if 'audio' in sensor_data:
audio_data = sensor_data['audio']['data']
pygame.sndarray.make_sound(audio_data).play()
# pygame.mixer.Sound(audio_data).play()
def write_text(display_surf, text, position, font=None, fontname='monospace', fontsize=12, color=(255,255,224), align=None):
"""
text -> string of text.
fontname-> string having the name of the font.
fontsize -> int, size of the font.
color -> tuple, adhering to the color format in pygame.
position -> tuple (x,y) coordinate of text object.
"""
font_object = font if font is not None else pygame.font.SysFont(fontname, fontsize)
text_surface = font_object.render(text, True, color)
if align is not None:
text_rectangle = text_surface.get_rect()
if align == 'center':
text_rectangle.center = position[0], position[1]
else:
text_rectangle.topleft = position
display_surf.blit(text_surface, text_rectangle)
else:
display_surf.blit(text_surface, position)
previous_action = 119
collision_counter = 10
final_time = 0
count=0
def get_angle(x_vector,y_vector):
angle = 0.0
if abs(x_vector) > 0:
angle = math.atan2(y_vector, x_vector)*180/math.pi
if angle >0:
angle=angle-180
else:
angle=angle+180
#print('Direction to goal: ', angle)
return angle
def get_random_action():
actions = [119, 115, 97, 100, 276, 275]
try:
rand_no = random.randint(0, 5)
next_action = actions[rand_no]
except IndexError:
next_action = actions[5]
return next_action
def classifier():
print("Running Classifier")
os.system('/home/romi/SingleImageClassifier.py')
fd = "/home/romi/abc2.txt"
file = open(fd, 'r')
text = file.read()
if text=="Forward":
return("119")
elif text=="Back":
return("115")
elif text=="CW":
return("275")
elif text=="CCW":
return("276")
elif text=="Right":
return("100")
elif text=="Left":
return("97")
normal_counter = 0
def generate_key_press(has_collided, direction, distance):
global previous_action, collision_counter, normal_counter
time.sleep(0.8)
if normal_counter%2 == 0:
next_action = 119
else:
angle = get_angle(x_vector=direction[2], y_vector=direction[0])
#print('Angle: ', angle)
if abs(angle) < 13:
# The angle difference is very Small (Keep Moving Unless You Collide)
next_action = 119
if has_collided:
print('Collision Detected: Taking Random Action')
next_action = get_random_action()
else:
print('No Collision: Moving Forward')
next_action = 119
else:
# Need to Adjust Angle
print('Adjusting Angle')
if angle > 0:
# Turn Left
next_action = 276
else:
# Turn Right
next_action = 275
if distance < 0.3:
print("Goal Reached")
time.sleep(3)
scene_index = (scene_index + 1) % len(args.scene_ids)
scene_id = args.scene_ids[scene_index]
id = scene_dataset + '.' + scene_id
print('next_scene loading %s ...' % id)
sim.set_scene(id)
sim.episode_info = sim.start()
normal_counter = normal_counter + 1
previous_action = next_action
empty_keys = np.zeros(323, dtype='i')
empty_keys[next_action] = 1
return tuple(empty_keys)
def interactive_loop(sim, args):
# initialize
pygame.mixer.pre_init(frequency=8000, channels=1)
pygame.init()
pygame.key.set_repeat(500, 50) # delay, interval
clock = pygame.time.Clock()
# Set up display
font_spacing = 20
display_height = args.height + font_spacing*3
all_camera_observations = ['color', 'depth', 'normal', 'objectId', 'objectType', 'roomId', 'roomType']
label_positions = {
'curr': {},
'goal': {}
}
camera_outputs = {
'curr': {},
'goal': {}
}
# row with observations and goals
nimages = 0
for obs in all_camera_observations:
if args.observations.get(obs):
label_positions['curr'][obs] = (args.width*nimages, font_spacing*2)
camera_outputs['curr'][obs] = { 'position': (args.width*nimages, font_spacing*3) }
if args.show_goals:
label_positions['goal'][obs] = (args.width*nimages, display_height + font_spacing*2)
camera_outputs['goal'][obs] = { 'position': (args.width*nimages, display_height + font_spacing*3) }
nimages += 1
global final_time
global count
if args.show_goals:
display_height += args.height + font_spacing*3
# Row with offset and map
plot_size = max(min(args.height, 128), 64)
display_height += font_spacing
label_positions['curr']['offset'] = (0, display_height)
camera_outputs['curr']['offset'] = { 'area': pygame.Rect(0, display_height + font_spacing, plot_size, plot_size)}
next_start_x = plot_size
if args.observations.get('forces'):
label_positions['curr']['forces'] = (next_start_x, display_height)
camera_outputs['curr']['forces'] = { 'area': pygame.Rect(next_start_x, display_height + font_spacing, plot_size, plot_size)}
next_start_x += plot_size
if args.observations.get('map'):
label_positions['map'] = (next_start_x, display_height)
camera_outputs['map'] = { 'position': (next_start_x, display_height + font_spacing) }
display_height += font_spacing
display_height += plot_size
display_shape = [max(args.width * nimages, next_start_x), display_height]
display_surf = pygame.display.set_mode((display_shape[0], display_shape[1]), pygame.RESIZABLE | pygame.DOUBLEBUF)
# Write text
label_positions['title'] = (display_shape[0]/2, font_spacing/2)
write_text(display_surf, 'MINOS', fontsize=20, position = label_positions['title'], align='center')
write_text(display_surf, 'dir_to_goal', position = label_positions['curr']['offset'])
if args.observations.get('forces'):
write_text(display_surf, 'forces', position = label_positions['curr']['forces'])
if args.observations.get('map'):
write_text(display_surf, 'map', position = label_positions['map'])
write_text(display_surf, 'observations | controls: WASD+Arrows', position = (0, font_spacing))
if args.show_goals:
write_text(display_surf, 'goal', position = (0, args.height + font_spacing*3 + font_spacing))
for obs in all_camera_observations:
if args.observations.get(obs):
write_text(display_surf, obs, position = label_positions['curr'][obs])
if args.show_goals:
write_text(display_surf, obs, position = label_positions['goal'][obs])
# Other initialization
scene_index = 0
scene_dataset = args.scene.dataset
init_time = timer()
num_frames = 0
prev_key = ''
replay = args.replay
action_traces = args.action_traces
action_trace = action_traces.curr_trace() if action_traces is not None else None
replay_auto = False
replay_mode = args.replay_mode
replay_mode_index = REPLAY_MODES.index(replay_mode)
print('***\n***')
print('CONTROLS: WASD+Arrows = move agent, R = respawn, N = next state/scene, O = print observation, Q = quit')
if replay:
print('P = toggle auto replay, E = toggle replay using %s '
% str([m + ('*' if m == replay_mode else '') for m in REPLAY_MODES]))
print('***\n***')
has_collided=False
direction=[0.0,0.0,0.0]
distance=0.0
total_frames=0
pos= [10.8726722805803, 3.17766, -1.0168381949239895]
ang=4.88692 #angle in radian
tilt=0 #tilt angle in radian (keep it zero)
print('\nMoving to Starting Point\t',pos,ang)
sim.move_to(pos,ang,tilt) #define starting point here by pressing 'v'v
'''
House 17DRP5sb8fy
Point A:
[1.3307827641472185, 0.53861988, -10.146044853235205]
[3.5180930438444764, 0.566234, -1.8922092359314986] dining room'
[-1.4967893299263657, 0.5536211000000001, -9.28489025596529]
Point B:
[2.4621904181013585, 0.5086211, 1.905232439043702]
[1.5371551074831127, 0.566234, -7.069940355796163] bedroom
[2.614761534032291, 0.5443598460000001, 2.082176819455741]
House JeFG25nYj2p
Point A:
[-6.74225409821948, 0.55074358, 9.639630625521972] lounge
[-4.808781002856764, 0.579616, -3.2566622905687566]
A: [6.161385541472788, 0.55074358, 1.0423787109815281]
[-9.969136013947448, 0.5890924000000001, 4.443266425597109]
[-3.8024725023701844, 0.5667730000000001, -3.897040174086085] hallway
Point B: (Set in env_config file)
[5.324160089316311, 0.5293569, 1.0104915805896062]
[0.09539571149637265, 0.579616, 8.280566401485888] familyroom/lounge
[3.234156347243452, 0.54744289, 5.132335702885289] kitchen
B: [-5.547085140683584, 0.55074358, -5.553243897709698]
House ZMojNkEp431
Point A:
[-1.648939148401732, -0.316777, 21.77250735917539]
[-7.257402680352993, -0.316777, 22.62233552621379]
Point B:
[1.5515085926612524, -0.316777, 27.586585491887465]
House q9vSo1VnCiC
Point A:
[-9.295046451025712, 0.54650591, 9.32940161221944]
Point B:
[-2.93651621783526, 0.5278996, -8.961261587263957]
House YVUC4YcDtcY (prob)
Point A:
[-22.444534161392085, 0.54291507, -16.989977211158624]
Point B:
[-30.20203545489092, 0.54291507, -7.346167078006564]
House qoiz87JEwZ2
Point A:
[4.309296503924577, 0.69781, -2.2073896572614493]
Point B:
[12.669347833403256, -2.7127, -0.9483520109703518]
'''
while sim.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sim.running = False
# read keys
fd = "/home/romi/abc.txt"
file = open(fd, 'r')
text = file.read()
text3="Tracking"
text2=""
text1="Track is lost"
#print("Text :", text)
#print("Count : " ,count)
time_taken = timer() - final_time
print(' fps=%f' % ( num_frames / time_taken))
total_frames=total_frames+num_frames
num_frames=0
final_time=timer()
if text==text3 and count >2:
count=0
elif text==text2 or text==text3 or count >=6:
if count>=10:
print('Failed Case, please recover tracks manually\n')
keys = pygame.key.get_pressed()
else:
#keys = pygame.key.get_pressed()
keys = generate_key_press(has_collided, direction,distance)
#print('key pressed',action['name'])
#time.sleep(0.5)
open('/home/romi/abc2.txt', 'w').close()
open('/home/romi/abc.txt', 'w').close()
elif text==text1:
open('/home/romi/abc2.txt', 'w').close()
open('/home/romi/abc.txt', 'w').close()
print("\nTaking Random Steps to Recover\n")
text4 = generate_key_press(has_collided, direction,distance)
open('/home/romi/abc2.txt', 'w').close()
open('/home/romi/abc.txt', 'w').close()
empty_keys = np.zeros(323, dtype='i')
empty_keys[text4] = 1
keys= tuple(empty_keys)
count+=1
print("\nNumber of Random Movements done : ",count)
#keys = pygame.key.get_pressed()
print_next_observation = False
if keys[K_q]:
break
if keys[K_o]:
print_next_observation = True
elif keys[K_n]:
prev_key = 'n' if prev_key is not 'n' else ''
if 'state_set' in args and prev_key is 'n':
state = args.state_set.get_next_state()
if not state: # roll over to beginning
print('Restarting from beginning of states file...')
state = args.state_set.get_next_state()
id = scene_dataset + '.' + state['scene_id']
print('next_scene loading %s ...' % id)
sim.set_scene(id)
sim.move_to(state['start']['position'], state['start']['angle'])
sim.episode_info = sim.start()
elif prev_key is 'n':
scene_index = (scene_index + 1) % len(args.scene_ids)
scene_id = args.scene_ids[scene_index]
id = scene_dataset + '.' + scene_id
print('next_scene loading %s ...' % id)
sim.set_scene(id)
sim.episode_info = sim.start()
elif keys[K_v]:
prev_key = 'v' if prev_key is not 'v' else ''
if prev_key is 'v':
pos=[0.37536343739988054, 0.49121938, 1.7367364232544902]
ang=4.88692 #angle in radian
tilt=0 #tilt angle in radian (keep it zero)
print('\nMoving to Starting Point\t',pos,ang)
sim.move_to(pos,ang,tilt) #define starting point here by pressing 'v'v
elif keys[K_r]:
prev_key = 'r' if prev_key is not 'r' else ''
if prev_key is 'r':
sim.episode_info = sim.reset()
else:
# Figure out action
action = {'name': 'idle', 'strength': 1, 'angle': math.radians(5)}
actions = []
if replay:
unprocessed_keypressed = any(keys)
if keys[K_p]:
prev_key = 'p' if prev_key is not 'p' else ''
if prev_key == 'p':
replay_auto = not replay_auto
unprocessed_keypressed = False
elif keys[K_e]:
prev_key = 'e' if prev_key is not 'e' else ''
if prev_key == 'e':
replay_mode_index = (replay_mode_index + 1) % len(REPLAY_MODES)
replay_mode = REPLAY_MODES[replay_mode_index]
unprocessed_keypressed = False
print('Replay using %s' % replay_mode)
if replay_auto or unprocessed_keypressed:
# get next action and do it
rec = action_trace.next_action_record()
if rec is None:
# go to next trace
action_trace = action_traces.next_trace()
start_state = action_trace.start_state()
print('start_state', start_state)
sim.configure(start_state)
sim.episode_info = sim.start()
else:
if replay_mode == 'actions':
actnames = rec['actions'].split('+')
for actname in actnames:
if actname != 'reset':
act = copy.copy(action)
act['name'] = actname
actions.append(act)
elif replay_mode == 'positions':
sim.move_to([rec['px'], rec['py'], rec['pz']], rec['rotation'])
else:
if keys[K_w]:
action['name'] = 'forwards'
print('Action: Forward')
elif keys[K_s]:
action['name'] = 'backwards'
print('Action: Backward')
elif keys[K_LEFT]:
# ASCII Code 276
action['name'] = 'turnLeft'
print('Action: Rotate Left')
elif keys[K_RIGHT]:
# ASCII Code 275
action['name'] = 'turnRight'
print('Action: Rotate Right')
elif keys[K_a]:
action['name'] = 'strafeLeft'
print('Action: Strafe Left')
elif keys[K_d]:
action['name'] = 'strafeRight'
print('Action: Strafe Right')
elif keys[K_UP]:
action['name'] = 'lookUp'
elif keys[K_DOWN]:
action['name'] = 'lookDown'
else:
action['name'] = 'idle'
actions = [action]
# step simulator and get observation
response = sim.step(actions, 1)
if response is None:
break
display_episode_info(sim.episode_info, display_surf, camera_outputs['goal'], show_goals=args.show_goals)
# Handle map
observation = response.get('observation')
direction = observation['measurements']['shortest_path_to_goal']['direction']
distance = observation['measurements']['shortest_path_to_goal']['distance']
has_collided = observation['collision']
map = observation.get('map')
print('Total Distance Remaining ',distance)
if map is not None:
# TODO: handle multiple maps
if isinstance(map, list):
map = map[0]
config = camera_outputs['map']
img = map['data']
rw = map['shape'][0] + config.get('position')[0]
rh = map['shape'][1] + config.get('position')[1]
w = display_surf.get_width()
h = display_surf.get_height()
if w < rw or h < rh:
# Resize display (copying old stuff over)
old_display_surf = display_surf.convert()
display_surf = pygame.display.set_mode((max(rw,w), max(rh,h)), pygame.RESIZABLE | pygame.DOUBLEBUF)
display_surf.blit(old_display_surf, (0,0))
write_text(display_surf, 'map', position = label_positions['map'])
blit_img_to_surf(img, display_surf, config.get('position'), surf_key='map')
# Handle other response
display_response(response, display_surf, camera_outputs['curr'], print_observation=print_next_observation, write_video=True)
pygame.display.flip()
num_frames += 1
clock.tick(30) # constraint to max 30 fps
# NOTE: log_action_trace handled by javascript side
# if args.log_action_trace:
# trace = sim.get_action_trace()
# print(trace['data'])
# cleanup and quit
time_taken = timer() - init_time
print('time=%f sec, fps=%f' % (time_taken, total_frames / time_taken))
print('Thank you for playing - Goodbye!')
pygame.quit()
def main():
global VIDEO_WRITER
parser = argparse.ArgumentParser(description='Interactive interface to Simulator')
parser.add_argument('--navmap', action='store_true',
default=False,
help='Use navigation map')
group = parser.add_mutually_exclusive_group()
group.add_argument('--state_set_file',
help='State set file')
group.add_argument('--replay',
help='Load and replay action trace from file')
group.add_argument('--replay_mode',
choices=REPLAY_MODES,
default='positions',
help='Use actions or positions for replay')
group.add_argument('--show_goals', action='store_true',
default=False,
help='show goal observations')
args = parse_sim_args(parser)
args.visualize_sensors = True
sim = Simulator(vars(args))
common.attach_exit_handler(sim)
if 'state_set_file' in args and args.state_set_file is not None:
args.state_set = StateSet(args.state_set_file, 1)
if 'save_video' in args and len(args.save_video):
filename = args.save_video if type(args.save_video) is str else 'out.mp4'
is_rgb = args.color_encoding == 'rgba'
VIDEO_WRITER = VideoWriter(filename, framerate=24, resolution=(args.width, args.height), rgb=is_rgb)
if 'replay' in args and args.replay is not None:
print('Initializing simulator using action traces %s...' % args.replay)
args.action_traces = ActionTraces(args.replay)
action_trace = args.action_traces.next_trace()
sim.init()
start_state = action_trace.start_state()
print('start_state', start_state)
sim.configure(start_state)
else:
args.action_traces = None
args.replay = None
try:
print('Starting simulator...')
ep_info = sim.start()
if ep_info:
print('observation_space', sim.get_observation_space())
sim.episode_info = ep_info
print('Simulator started.')
interactive_loop(sim, args)
except:
traceback.print_exc()
print('Error running simulator. Aborting.')
if sim is not None:
sim.kill()
del sim
if VIDEO_WRITER is not None:
VIDEO_WRITER.close()
if __name__ == "__main__":
main()
| [
"minos.lib.util.VideoWriter.VideoWriter",
"pygame.init",
"pygame.quit",
"math.sqrt",
"time.sleep",
"math.cos",
"copy.copy",
"argparse.ArgumentParser",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.draw.rect",
"minos.lib.util.StateSet.StateSet",
"minos.lib.common.attach_exit_handl... | [((2154, 2204), 'pygame.draw.rect', 'pygame.draw.rect', (['display_surf', '(0, 0, 0)', 'area', '(0)'], {}), '(display_surf, (0, 0, 0), area, 0)\n', (2170, 2204), False, 'import pygame\n'), ((2785, 2829), 'math.sqrt', 'math.sqrt', (['(dir[0] * dir[0] + dir[1] * dir[1])'], {}), '(dir[0] * dir[0] + dir[1] * dir[1])\n', (2794, 2829), False, 'import math\n'), ((3032, 3082), 'pygame.draw.rect', 'pygame.draw.rect', (['display_surf', '(0, 0, 0)', 'area', '(0)'], {}), '(display_surf, (0, 0, 0), area, 0)\n', (3048, 3082), False, 'import pygame\n'), ((3104, 3170), 'pygame.draw.circle', 'pygame.draw.circle', (['display_surf', '(255, 255, 255)', 'center', 'size', '(0)'], {}), '(display_surf, (255, 255, 255), center, size, 0)\n', (3122, 3170), False, 'import pygame\n'), ((3173, 3229), 'pygame.draw.line', 'pygame.draw.line', (['display_surf', 'color', 'center', 'target', '(1)'], {}), '(display_surf, color, center, target, 1)\n', (3189, 3229), False, 'import pygame\n'), ((3234, 3287), 'pygame.draw.circle', 'pygame.draw.circle', (['display_surf', 'color', 'target', '(4)', '(0)'], {}), '(display_surf, color, target, 4, 0)\n', (3252, 3287), False, 'import pygame\n'), ((7214, 7262), 'os.system', 'os.system', (['"""/home/romi/SingleImageClassifier.py"""'], {}), "('/home/romi/SingleImageClassifier.py')\n", (7223, 7262), False, 'import os\n'), ((7757, 7772), 'time.sleep', 'time.sleep', (['(0.8)'], {}), '(0.8)\n', (7767, 7772), False, 'import random, time\n'), ((9063, 9087), 'numpy.zeros', 'np.zeros', (['(323)'], {'dtype': '"""i"""'}), "(323, dtype='i')\n", (9071, 9087), True, 'import numpy as np\n'), ((9205, 9254), 'pygame.mixer.pre_init', 'pygame.mixer.pre_init', ([], {'frequency': '(8000)', 'channels': '(1)'}), '(frequency=8000, channels=1)\n', (9226, 9254), False, 'import pygame\n'), ((9259, 9272), 'pygame.init', 'pygame.init', ([], {}), '()\n', (9270, 9272), False, 'import pygame\n'), ((9277, 9307), 'pygame.key.set_repeat', 'pygame.key.set_repeat', (['(500)', '(50)'], {}), '(500, 50)\n', (9298, 9307), False, 'import pygame\n'), ((9339, 9358), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (9356, 9358), False, 'import pygame\n'), ((11363, 11466), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(display_shape[0], display_shape[1])', '(pygame.RESIZABLE | pygame.DOUBLEBUF)'], {}), '((display_shape[0], display_shape[1]), pygame.\n RESIZABLE | pygame.DOUBLEBUF)\n', (11386, 11466), False, 'import pygame\n'), ((12593, 12600), 'timeit.default_timer', 'timer', ([], {}), '()\n', (12598, 12600), True, 'from timeit import default_timer as timer\n'), ((24346, 24359), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (24357, 24359), False, 'import pygame\n'), ((24411, 24484), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Interactive interface to Simulator"""'}), "(description='Interactive interface to Simulator')\n", (24434, 24484), False, 'import argparse\n'), ((25236, 25258), 'minos.config.sim_args.parse_sim_args', 'parse_sim_args', (['parser'], {}), '(parser)\n', (25250, 25258), False, 'from minos.config.sim_args import parse_sim_args\n'), ((25329, 25360), 'minos.lib.common.attach_exit_handler', 'common.attach_exit_handler', (['sim'], {}), '(sim)\n', (25355, 25360), False, 'from minos.lib import common\n'), ((1034, 1066), 'pygame.Surface', 'pygame.Surface', (['img_shape', '(0)', '(32)'], {}), '(img_shape, 0, 32)\n', (1048, 1066), False, 'import pygame\n'), ((2596, 2661), 'pygame.draw.circle', 'pygame.draw.circle', (['display_surf', '(255, 255, 0)', '(x, y)', 'r', 'width'], {}), '(display_surf, (255, 255, 0), (x, y), r, width)\n', (2614, 2661), False, 'import pygame\n'), ((6124, 6163), 'pygame.font.SysFont', 'pygame.font.SysFont', (['fontname', 'fontsize'], {}), '(fontname, fontsize)\n', (6143, 6163), False, 'import pygame\n'), ((7020, 7040), 'random.randint', 'random.randint', (['(0)', '(5)'], {}), '(0, 5)\n', (7034, 7040), False, 'import random, time\n'), ((10620, 10687), 'pygame.Rect', 'pygame.Rect', (['(0)', '(display_height + font_spacing)', 'plot_size', 'plot_size'], {}), '(0, display_height + font_spacing, plot_size, plot_size)\n', (10631, 10687), False, 'import pygame\n'), ((15481, 15499), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (15497, 15499), False, 'import pygame\n'), ((16012, 16019), 'timeit.default_timer', 'timer', ([], {}), '()\n', (16017, 16019), True, 'from timeit import default_timer as timer\n'), ((23902, 23923), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (23921, 23923), False, 'import pygame\n'), ((24201, 24208), 'timeit.default_timer', 'timer', ([], {}), '()\n', (24206, 24208), True, 'from timeit import default_timer as timer\n'), ((25456, 25488), 'minos.lib.util.StateSet.StateSet', 'StateSet', (['args.state_set_file', '(1)'], {}), '(args.state_set_file, 1)\n', (25464, 25488), False, 'from minos.lib.util.StateSet import StateSet\n'), ((25695, 25784), 'minos.lib.util.VideoWriter.VideoWriter', 'VideoWriter', (['filename'], {'framerate': '(24)', 'resolution': '(args.width, args.height)', 'rgb': 'is_rgb'}), '(filename, framerate=24, resolution=(args.width, args.height),\n rgb=is_rgb)\n', (25706, 25784), False, 'from minos.lib.util.VideoWriter import VideoWriter\n'), ((25943, 25968), 'minos.lib.util.ActionTraces.ActionTraces', 'ActionTraces', (['args.replay'], {}), '(args.replay)\n', (25955, 25968), False, 'from minos.lib.util.ActionTraces import ActionTraces\n'), ((8666, 8679), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (8676, 8679), False, 'import random, time\n'), ((10887, 10965), 'pygame.Rect', 'pygame.Rect', (['next_start_x', '(display_height + font_spacing)', 'plot_size', 'plot_size'], {}), '(next_start_x, display_height + font_spacing, plot_size, plot_size)\n', (10898, 10965), False, 'import pygame\n'), ((15851, 15858), 'timeit.default_timer', 'timer', ([], {}), '()\n', (15856, 15858), True, 'from timeit import default_timer as timer\n'), ((26546, 26567), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (26565, 26567), False, 'import traceback\n'), ((5606, 5644), 'pygame.sndarray.make_sound', 'pygame.sndarray.make_sound', (['audio_data'], {}), '(audio_data)\n', (5632, 5644), False, 'import pygame\n'), ((6726, 6756), 'math.atan2', 'math.atan2', (['y_vector', 'x_vector'], {}), '(y_vector, x_vector)\n', (6736, 6756), False, 'import math\n'), ((703, 737), 'numpy.ones', 'np.ones', (['img.shape'], {'dtype': 'np.uint8'}), '(img.shape, dtype=np.uint8)\n', (710, 737), True, 'import numpy as np\n'), ((2476, 2491), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2484, 2491), False, 'import math\n'), ((2528, 2543), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2536, 2543), False, 'import math\n'), ((16255, 16279), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (16277, 16279), False, 'import pygame\n'), ((17021, 17045), 'numpy.zeros', 'np.zeros', (['(323)'], {'dtype': '"""i"""'}), "(323, dtype='i')\n", (17029, 17045), True, 'import numpy as np\n'), ((5393, 5419), 'numpy.dstack', 'np.dstack', (['[img, img, img]'], {}), '([img, img, img])\n', (5402, 5419), True, 'import numpy as np\n'), ((19154, 19169), 'math.radians', 'math.radians', (['(5)'], {}), '(5)\n', (19166, 19169), False, 'import math\n'), ((20764, 20781), 'copy.copy', 'copy.copy', (['action'], {}), '(action)\n', (20773, 20781), False, 'import copy\n')] |
from abc import abstractmethod, abstractproperty
import os
from pathlib import Path
import collections.abc
import logging
import pkg_resources
import uuid
from urllib.parse import urlparse
from typing import Set, List
import threading
import numpy as np
from frozendict import frozendict
import pyarrow as pa
import vaex
import vaex.execution
import vaex.settings
import vaex.utils
from vaex.array_types import data_type
from .column import Column, ColumnIndexed, supported_column_types
from . import array_types
from vaex import encoding
logger = logging.getLogger('vaex.dataset')
opener_classes = []
HASH_VERSION = "1"
HASH_VERSION_KEY = "version"
chunk_size_default = vaex.settings.main.chunk.size or 1024**2
_dataset_types = {}
lock = threading.Lock()
def register(cls, name=None):
name = name or getattr(cls, 'snake_name') or cls.__name__
_dataset_types[name] = cls
return cls
@encoding.register('dataset')
class dataset_encoding:
@staticmethod
def encode(encoding, dataset):
return dataset.encode(encoding)
@staticmethod
def decode(encoding, dataset_spec):
dataset_spec = dataset_spec.copy()
type = dataset_spec.pop('dataset_type')
cls = _dataset_types[type]
return cls.decode(encoding, dataset_spec)
def open(path, fs_options={}, fs=None, *args, **kwargs):
failures = []
with lock: # since we cache, make this thread save
if not opener_classes:
for entry in pkg_resources.iter_entry_points(group='vaex.dataset.opener'):
logger.debug('trying opener: ' + entry.name)
try:
opener = entry.load()
opener_classes.append(opener)
except Exception as e:
logger.exception('issue loading ' + entry.name)
failures.append((e, entry))
# first the quick path
for opener in opener_classes:
if opener.quick_test(path, fs_options=fs_options, fs=fs):
if opener.can_open(path, fs_options=fs_options, fs=fs, *args, **kwargs):
return opener.open(path, fs_options=fs_options, fs=fs, *args, **kwargs)
# otherwise try all openers
for opener in opener_classes:
try:
if opener.can_open(path, fs_options=fs_options, fs=fs, *args, **kwargs):
return opener.open(path, fs_options=fs_options, fs=fs, *args, **kwargs)
except Exception as e:
failures.append((e, opener))
failures = "\n".join([f'\n-----{who}-----\n:' + vaex.utils.format_exception_trace(e) for e, who in failures])
if failures:
raise IOError(f'Cannot open {path}, failures: {failures}.')
else:
raise IOError(f'Cannot open {path} nobody knows how to read it.')
def _to_bytes(ar):
try:
return ar.view(np.uint8)
except ValueError:
return ar.copy().view(np.uint8)
def hash_combine(*hashes):
hasher = vaex.utils.create_hasher(large_data=False)
for hash in hashes:
hasher.update(hash.encode())
return hasher.hexdigest()
def hash_slice(hash, start, end):
hasher = vaex.utils.create_hasher(hash.encode(), large_data=False)
slice = np.array([start, end], dtype=np.int64)
hasher.update(_to_bytes(slice))
return hasher.hexdigest()
def hash_array_data(ar):
# this function should stay consistent with all future versions
# since this is the expensive part of the hashing
if isinstance(ar, np.ndarray):
ar = ar.ravel()
if ar.dtype == np.object_:
return {"type": "numpy", "data": str(uuid.uuid4()), "mask": None}
if np.ma.isMaskedArray(ar):
data_byte_ar = _to_bytes(ar.data)
hasher = vaex.utils.create_hasher(data_byte_ar, large_data=True)
hash_data = {"type": "numpy", "data": hasher.hexdigest(), "mask": None}
if ar.mask is not True and ar.mask is not False and ar.mask is not np.True_ and ar.mask is not np.False_:
mask_byte_ar = _to_bytes(ar.mask)
hasher = vaex.utils.create_hasher(mask_byte_ar, large_data=True)
hash_data["mask"] = hasher.hexdigest()
return hash_data
else:
try:
byte_ar = _to_bytes(ar)
except ValueError:
byte_ar = ar.copy().view(np.uint8)
hasher = vaex.utils.create_hasher(byte_ar, large_data=True)
hash_data = {"type": "numpy", "data": hasher.hexdigest(), "mask": None}
elif isinstance(ar, (pa.Array, pa.ChunkedArray)):
hasher = vaex.utils.create_hasher(large_data=True)
buffer_hashes = []
hash_data = {"type": "arrow", "buffers": buffer_hashes}
if isinstance(ar, pa.ChunkedArray):
chunks = ar.chunks
else:
chunks = [ar]
for chunk in chunks:
for buffer in chunk.buffers():
if buffer is not None:
hasher.update(memoryview(buffer))
buffer_hashes.append(hasher.hexdigest())
else:
buffer_hashes.append(None)
elif isinstance(ar, vaex.column.Column):
hash_data = {"type": "column", "fingerprint": ar.fingerprint()}
else:
raise TypeError
return hash_data
def hash_array(ar, hash_info=None, return_info=False):
# this function can change over time, as it builds on top of the expensive part
# (hash_array_data), so we can cheaply calculate new hashes if we pass on hash_info
if hash_info is None:
hash_info = hash_array_data(ar)
if hash_info.get(HASH_VERSION_KEY) == HASH_VERSION: # TODO: semver check?
return hash_info['hash'], hash_info
if isinstance(ar, np.ndarray):
if ar.dtype == np.object_:
return hash_info['data'] # uuid, so always unique
if np.ma.isMaskedArray(ar):
if not (hash_info['type'] == 'numpy' and hash_info['data'] and hash_info['mask']):
hash_info = hash_array_data(ar)
else:
if not (hash_info['type'] == 'numpy' and hash_info['data']):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION, hash_info['type'], hash_info['data']]
if hash_info['mask']:
keys.append(hash_info['mask'])
elif isinstance(ar, vaex.array_types.supported_arrow_array_types):
if not (hash_info['type'] == 'arrow' and hash_info['buffers']):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION]
keys.extend(["NO_BUFFER" if not b else b for b in hash_info['buffers']])
elif isinstance(ar, vaex.column.Column):
if not (hash_info['type'] == 'column'):
hash_info = hash_array_data(ar)
keys = [HASH_VERSION]
keys.append(hash_info['fingerprint'])
hasher = vaex.utils.create_hasher(large_data=False) # small amounts of data
for key in keys:
hasher.update(key.encode('ascii'))
hash = hasher.hexdigest()
if return_info:
hash_info['hash'] = hash
hash_info[HASH_VERSION_KEY] = HASH_VERSION
return hash, hash_info
else:
return hash
def to_supported_array(ar):
if not isinstance(ar, supported_column_types):
ar = np.asanyarray(ar)
if isinstance(ar, np.ndarray) and ar.dtype.kind == 'U':
ar = vaex.column.ColumnArrowLazyCast(ar, pa.string())
elif isinstance(ar, np.ndarray) and ar.dtype.kind == 'O':
ar_data = ar
if np.ma.isMaskedArray(ar):
ar_data = ar.data
try:
# "k != k" is a way to detect NaN's and NaT's
types = list({type(k) for k in ar_data if k is not None and k == k})
except ValueError:
# If there is an array value in the column, Numpy throws a ValueError
# "The truth value of an array with more than one element is ambiguous".
# We don't handle this by default as it is a bit slower.
def is_missing(k):
if k is None:
return True
try:
# a way to detect NaN's and NaT
return not (k == k)
except ValueError:
# if a value is an array, this will fail, and it is a non-missing
return False
types = list({type(k) for k in ar_data if k is not is_missing(k)})
if len(types) == 1 and issubclass(types[0], str):
# TODO: how do we know it should not be large_string?
# self._dtypes_override[valid_name] = pa.string()
ar = vaex.column.ColumnArrowLazyCast(ar, pa.string())
if len(types) == 0: # can only be if all nan right?
ar = ar.astype(np.float64)
return ar
def _concat_chunk_list(list_of_chunks):
dict_of_list_of_arrays = collections.defaultdict(list)
for chunks in list_of_chunks:
for name, array in chunks.items():
if isinstance(array, pa.ChunkedArray):
dict_of_list_of_arrays[name].extend(array.chunks)
else:
dict_of_list_of_arrays[name].append(array)
chunks = {name: vaex.array_types.concat(arrays) for name, arrays in dict_of_list_of_arrays.items()}
return chunks
def _slice_of_chunks(chunks_ready_list, chunk_size):
current_row_count = 0
chunks_current_list = []
while current_row_count < chunk_size and chunks_ready_list:
chunks_current = chunks_ready_list.pop(0)
chunk = list(chunks_current.values())[0]
# chunks too large, split, and put back a part
if current_row_count + len(chunk) > chunk_size:
strict = True
if strict:
needed_length = chunk_size - current_row_count
current_row_count += needed_length
assert current_row_count == chunk_size
chunks_head = {name: vaex.array_types.slice(chunk, 0, needed_length) for name, chunk in chunks_current.items()}
chunks_current_list.append(chunks_head)
chunks_extra = {name: vaex.array_types.slice(chunk, needed_length) for name, chunk in chunks_current.items()}
chunks_ready_list.insert(0, chunks_extra) # put back the extra in front
else:
current_row_count += len(chunk)
chunks_current_list.append(chunks_current)
else:
current_row_count += len(chunk)
chunks_current_list.append(chunks_current)
return chunks_current_list, current_row_count
def chunk_rechunk(chunk_iter, chunk_size):
chunks_ready_list = []
i1 = i2 = 0
for _, _, chunks in chunk_iter:
chunks_ready_list.append(chunks)
total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])
if total_row_count > chunk_size:
chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
i2 += current_row_count
chunks = vaex.dataset._concat_chunk_list(chunks_current_list)
yield i1, i2, chunks
i1 = i2
while chunks_ready_list:
chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
i2 += current_row_count
chunks = vaex.dataset._concat_chunk_list(chunks_current_list)
yield i1, i2, chunks
i1 = i2
def _rechunk(chunk_iter, chunk_size):
def wrapper():
i1 = i2 = 0
for chunks in chunk_iter:
i2 += len(list(chunks.values())[0])
yield i1, i2, chunks
i1 = i2
yield from chunk_rechunk(wrapper(), chunk_size)
def empty_chunk_iterator(start, end, chunk_size):
length = end - start
i1 = 0
i2 = min(length, i1 + chunk_size)
while i1 < length:
yield i1, i2, {}
i1 = i2
i2 = min(length, i1 + chunk_size)
class Dataset(collections.abc.Mapping):
def __init__(self):
super().__init__()
self._columns = frozendict()
self._row_count = None
self._id = str(uuid.uuid4())
self._cached_fingerprint = None
def __repr__(self):
import yaml
data = self.__repr_data__()
return yaml.dump(data, sort_keys=False, indent=4)
def __repr_data__(self):
state = self.__getstate__()
def normalize(v):
if isinstance(v, Dataset):
return v.__repr_data__()
if isinstance(v, frozendict):
return dict(v)
if isinstance(v, vaex.dataframe.DataFrame):
return {'type': 'dataframe', 'repr': repr(v)}
if isinstance(v, np.ndarray):
return v.tolist()
return v
return {'type': self.snake_name, **{k: normalize(v) for k, v in state.items() if not k.startswith('_')}}
@property
def id(self):
'''id that uniquely identifies a dataset at runtime'''
return self.fingerprint
@property
def fingerprint(self):
'''id that uniquely identifies a dataset cross runtime, might be more expensive and require hasing'''
if self._cached_fingerprint is None:
self._cached_fingerprint = self._fingerprint
return self._cached_fingerprint
@abstractproperty
def _fingerprint(self):
pass
def encode(self, encoding):
if not encoding.has_object_spec(self.id):
spec = self._encode(encoding)
encoding.set_object_spec(self.id, spec)
return {'dataset_type': self.snake_name, 'object-id': self.id}
@classmethod
def decode(cls, encoding, spec):
id = spec['object-id']
if not encoding.has_object(id):
spec = encoding.get_object_spec(id)
ds = cls._decode(encoding, spec)
encoding.set_object(id, ds)
return encoding.get_object(id)
@abstractmethod
def _create_columns(self):
pass
@property
def name(self):
# TODO: in the future, we might want to use self.fingerprint or self.id
return "no-name"
def __getstate__(self):
state = self.__dict__.copy()
del state['_columns']
del state['_cached_fingerprint']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._cached_fingerprint = None
self._create_columns()
def schema(self, array_type=None):
return {name: vaex.array_types.data_type(col) for name, col in self.items()}
def shapes(self):
return {name: self.shape(name) for name, col in self.items()}
def _set_row_count(self):
if not self._columns:
return
values = list(self._columns.values())
self._row_count = len(values[0])
for name, value in list(self._columns.items())[1:]:
if len(value) != self._row_count:
raise ValueError(f'First columns has length {self._row_count}, while column {name} has length {len(value)}')
@property
def row_count(self):
return self._row_count
def project(self, *names):
all = set(self)
drop = all - set(names)
# we want a deterministic order for fingerprints
drop = list(drop)
drop.sort()
return self.dropped(*list(drop))
def concat(self, *others, resolver='flexible'):
datasets = []
if isinstance(self, DatasetConcatenated):
datasets.extend(self.datasets)
else:
datasets.extend([self])
for other in others:
if isinstance(other, DatasetConcatenated):
datasets.extend(other.datasets)
else:
datasets.extend([other])
return DatasetConcatenated(datasets, resolver=resolver)
def take(self, indices, masked=False):
return DatasetTake(self, indices, masked=masked)
def renamed(self, renaming):
return DatasetRenamed(self, renaming)
def merged(self, rhs):
return DatasetMerged(self, rhs)
def dropped(self, *names):
return DatasetDropped(self, names)
def __getitem__(self, item):
if isinstance(item, slice):
assert item.step in [1, None]
return self.slice(item.start or 0, item.stop or self.row_count)
return self._columns[item]
def __len__(self):
return len(self._columns)
def __iter__(self):
return iter(self._columns)
def get_data(self, i1, i2, names):
raise NotImplementedError
def __eq__(self, rhs):
if not isinstance(rhs, Dataset):
return NotImplemented
# simple case, if fingerprints are equal, the data is equal
if self.fingerprint == rhs.fingerprint:
return True
# but no the other way around
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
return self.fingerprint == rhs.fingerprint
keys = set(rhs)
keys_hashed = set(rhs._ids)
missing = keys ^ keys_hashed
if missing:
return self.fingerprint == rhs.fingerprint
return self._ids == rhs._ids
def __hash__(self):
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
# if we don't have hashes for all columns, we just use the fingerprint
return hash(self.fingerprint)
return hash(tuple(self._ids.items()))
def _default_lazy_chunk_iterator(self, array_map, columns, chunk_size, reverse=False):
chunk_size = chunk_size or 1024**2
chunk_count = (self.row_count + chunk_size - 1) // chunk_size
chunks = range(chunk_count)
if reverse:
chunks = reversed(chunks)
for i in chunks:
i1 = i * chunk_size
i2 = min((i + 1) * chunk_size, self.row_count)
def reader(i1=i1, i2=i2):
chunks = {k: array_map[k][i1:i2] for k in columns}
length = i2 - i1
for name, chunk in chunks.items():
assert len(chunk) == length, f'Oops, got a chunk ({name}) of length {len(chunk)} while it is expected to be of length {length} (at {i1}-{i2}'
return chunks
yield i1, i2, reader
def _default_chunk_iterator(self, array_map, columns, chunk_size, reverse=False):
for i1, i2, reader in self._default_lazy_chunk_iterator(array_map, columns, chunk_size, reverse):
yield i1, i2, reader()
@abstractmethod
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
pass
@abstractmethod
def is_masked(self, column):
pass
@abstractmethod
def shape(self, column):
pass
@abstractmethod
def close(self):
'''Close file handles or other resources, the DataFrame will not be in a usable state afterwards.'''
pass
@abstractmethod
def slice(self, start, end):
pass
@abstractmethod
def hashed(self):
pass
@abstractmethod
def leafs(self) -> List["Dataset"]:
pass
class DatasetDecorator(Dataset):
def __init__(self, original):
super().__init__()
self.original = original
def leafs(self) -> List[Dataset]:
return self.original.leafs()
def close(self):
self.original.close()
def is_masked(self, column):
return self.original.is_masked(column)
def shape(self, column):
return self.original.shape(column)
class ColumnProxy(vaex.column.Column):
'''To give the Dataset._columns object useful containers for debugging'''
ds: Dataset
def __init__(self, ds, name, type):
self.ds = ds
self.name = name
self.dtype = type
def _fingerprint(self):
fp = vaex.cache.fingerprint(self.ds.fingerprint, self.name)
return f'column-proxy-{fp}'
def __len__(self):
return self.ds.row_count
def to_numpy(self):
values = self[:]
return np.array(values)
def __getitem__(self, item):
if isinstance(item, slice):
array_chunks = []
ds = self.ds.__getitem__(item)
for chunk_start, chunk_end, chunks in ds.chunk_iterator([self.name]):
ar = chunks[self.name]
if isinstance(ar, pa.ChunkedArray):
array_chunks.extend(ar.chunks)
else:
array_chunks.append(ar)
if len(array_chunks) == 1:
return array_chunks[0]
if len(array_chunks) == 0:
return vaex.dtype(self.dtype).create_array([])
return vaex.array_types.concat(array_chunks)
else:
raise NotImplementedError
@register
class DatasetRenamed(DatasetDecorator):
snake_name = 'rename'
def __init__(self, original, renaming):
super().__init__(original)
self.renaming = renaming
self.reverse = {v: k for k, v in renaming.items()}
self._create_columns()
self._ids = frozendict({renaming.get(name, name): ar for name, ar in original._ids.items()})
self._set_row_count()
def renamed(self, renaming):
# # {'a': 'x', 'b': 'y'} and {'x': 'a', 'b': 'z', 'c', 'q'} -> {'b': 'z', 'c': 'q'}
resulting = {}
renaming = renaming.copy() # we'll modify in place
for old, new in self.renaming.items():
if new in renaming:
if old == renaming[new]:
pass # e.g. x->a->x
else:
resulting[old] = renaming[new]
del renaming[new] # we already covered this
else:
# e.g. x->a->a
resulting[old] = new
# e.g. x->x->a
resulting.update(renaming)
return DatasetRenamed(self.original, resulting)
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.renaming)
return f'dataset-{self.snake_name}-{self.original.fingerprint}'
def _create_columns(self):
self._columns = frozendict({self.renaming.get(name, name): ar for name, ar in self.original.items()})
def _encode(self, encoding):
dataset_spec = encoding.encode('dataset', self.original)
return {'renaming': dict(self.renaming), 'dataset': dataset_spec}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['renaming'])
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
for name in columns:
if name in self.renaming:
rename = self.renaming[name]
raise KeyError(f'Oops, you tried to get column {name}, but you renamed it to {rename}')
columns = [self.reverse.get(name, name) for name in columns]
for i1, i2, chunks in self.original.chunk_iterator(columns, chunk_size, reverse=reverse):
yield i1, i2, {self.renaming.get(name, name): ar for name, ar in chunks.items()}
def is_masked(self, column):
return self.original.is_masked(self.reverse.get(column, column))
def shape(self, column):
return self.original.shape(self.reverse.get(column, column))
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self.renaming)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.renaming)
@register
class DatasetConcatenated(Dataset):
snake_name = "concat"
def __init__(self, datasets, resolver):
super().__init__()
self.datasets = datasets
self.resolver = resolver
if self.resolver == 'strict':
for dataset in datasets[1:]:
if set(dataset) != set(datasets[0]):
l = set(dataset)
r = set(datasets[0])
diff = l ^ r
raise NameError(f'Concatenating datasets with different names: {l} and {r} (difference: {diff})')
self._schema = datasets[0].schema()
self._shapes = datasets[0].shapes()
for dataset in datasets[1:]:
if dataset.shapes() != self._shapes:
raise ValueError(f'Cannot concatenate with different shapes: {self._shapes} != {dataset.shapes()}')
for dataset in datasets[1:]:
schema = dataset.schema()
if dataset.schema() != self._schema:
raise ValueError(f'Cannot concatenate with different schemas: {self._shapes} != {dataset.shapes()}')
elif self.resolver == 'flexible':
schemas = [ds.schema() for ds in datasets]
shapes = [ds.shapes() for ds in datasets]
# try to keep the order of the original dataset
schema_list_map = {}
for schema in schemas:
for name, type in schema.items():
if name not in schema_list_map:
schema_list_map[name] = []
for name, type_list in schema_list_map.items():
for schema in schemas:
# None means it is means the column is missing
type_list.append(schema.get(name))
from .schema import resolver_flexible
# shapes
shape_list_map = {}
for shape in shapes:
for name, type in shape.items():
if name not in shape_list_map:
shape_list_map[name] = []
for name, shape_list in shape_list_map.items():
for shapes_ in shapes:
# None means it is means the column is missing
shape_list.append(shapes_.get(name))
self._schema = {}
self._shapes = {}
for name in shape_list_map:
self._schema[name], self._shapes[name] = resolver_flexible.resolve(schema_list_map[name], shape_list_map[name])
else:
raise ValueError(f'Invalid resolver {resolver}, choose between "strict" or "flexible"')
self._create_columns()
self._set_row_count()
@property
def _fingerprint(self):
ids = [ds.fingerprint for ds in self.datasets]
id = vaex.cache.fingerprint(*ids)
return f'dataset-{self.snake_name}-{id}'
def _create_columns(self):
columns = {}
hashes = {}
for name in self._schema:
columns[name] = ColumnProxy(self, name, self._schema[name])
if all(name in ds._ids for ds in self.datasets):
hashes[name] = hash_combine(*[ds._ids[name] for ds in self.datasets])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
datasets = encoding.encode_list('dataset', self.datasets)
spec = {'dataset_type': self.snake_name, 'datasets': datasets, 'resolver': self.resolver}
return spec
@classmethod
def _decode(cls, encoding, spec):
datasets = encoding.decode_list('dataset', spec['datasets'])
ds = cls(datasets, spec['resolver'])
return ds
def is_masked(self, column):
for dataset in self.datasets:
if column not in dataset:
return True
return any(k.is_masked(column) for k in self.datasets)
def shape(self, column):
return self._shapes[column]
def _set_row_count(self):
self._row_count = sum(ds.row_count for ds in self.datasets)
def schema(self, array_type=None):
return self._schema.copy()
def _chunk_iterator_non_strict(self, columns, chunk_size=None, reverse=False, start=0, end=None):
end = self.row_count if end is None else end
offset = 0
for dataset in self.datasets:
present = [k for k in columns if k in dataset]
# skip over whole datasets
if start >= offset + dataset.row_count:
offset += dataset.row_count
continue
# we are past the end
if end <= offset:
break
for i1, i2, chunks in dataset.chunk_iterator(present, chunk_size=chunk_size, reverse=reverse):
# chunks = {name: vaex.array_types.to_arrow(ar) for name, ar in chunks.items()}
length = i2 - i1
chunk_start = offset
chunk_end = offset + length
if start >= chunk_end: # we didn't find the beginning yet
offset += length
continue
if end <= chunk_start: # we are past the end
# assert False
break
if start > chunk_start:
# this means we have to cut off a piece of the beginning
if end < chunk_end:
# AND the end
length = end - chunk_start # without the start cut off
length -= start - chunk_start # correcting for the start cut off
assert length > 0
chunks = {name: vaex.array_types.slice(ar, start - chunk_start, length) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
else:
length -= start - chunk_start # correcting for the start cut off
assert length > 0
chunks = {name: vaex.array_types.slice(ar, start - chunk_start) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
else:
if end < chunk_end:
# we only need to cut off a piece of the end
length = end - chunk_start
assert length > 0
chunks = {name: vaex.array_types.slice(ar, 0, length) for name, ar in chunks.items()}
for name, ar in chunks.items():
assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
from .schema import resolver_flexible
allchunks = {name: resolver_flexible.align(length, chunks.get(name), self._schema[name], self._shapes[name]) for name in columns}
yield {k: allchunks[k] for k in columns}
offset += (i2 - i1)
def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):
chunk_size = chunk_size or 1024*1024
i1 = 0
i1 = i2 = 0
if not columns:
end = self.row_count if end is None else end
yield from empty_chunk_iterator(start, end, chunk_size)
else:
chunk_iterator = self._chunk_iterator_non_strict(columns, chunk_size, reverse=reverse, start=start, end=self.row_count if end is None else end)
yield from _rechunk(chunk_iterator, chunk_size)
def close(self):
for ds in self.datasets:
ds.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
# TODO: we can be smarter here, and trim off some datasets
return DatasetSliced(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)([dataset.hashed() for dataset in self.datasets], resolver=self.resolver)
def leafs(self) -> List[Dataset]:
return [self]
# def leafs(self) -> List[Dataset]:
# leafs = list()
# for ds in self.datasets:
# leafs.extend(ds.leafs())
# return leafs
@register
class DatasetTake(DatasetDecorator):
snake_name = "take"
def __init__(self, original, indices, masked):
super().__init__(original)
self.indices = indices
self.masked = masked
self._lazy_hash_index = None
self._create_columns()
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self._hash_index, self.masked)
return f'dataset-{self.snake_name}-{id}'
@property
def _hash_index(self):
if self._lazy_hash_index is None:
self._lazy_hash_index = hash_array(self.indices)
return self._lazy_hash_index
def _create_columns(self):
# if the columns in ds already have a ColumnIndex
# we could do, direct_indices = df.column['bla'].indices[indices]
# which should be shared among multiple ColumnIndex'es, so we store
# them in this dict
direct_indices_map = {}
columns = {}
hashes = {}
for name, column in self.original.items():
columns[name] = ColumnIndexed.index(column, self.indices, direct_indices_map, masked=self.masked)
if name in self.original._ids:
hashes[name] = hash_combine(self._hash_index, self.original._ids[name])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
spec = {'dataset_type': self.snake_name, 'dataset': dataset_spec}
spec['indices'] = encoding.encode('array', self.indices)
spec['masked'] = self.masked
return spec
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
indices = encoding.decode('array', spec['indices'])
ds = cls(dataset, indices, spec['masked'])
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
# TODO: we may be able to do this slightly more efficient by first
# materializing the columns
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.indices, self.masked)
@register
class DatasetFiltered(DatasetDecorator):
snake_name = 'filter'
def __init__(self, original, filter, expected_length=None, state=None, selection=None):
super().__init__(original)
self._filter = filter
self._lazy_hash_filter = None
self._create_columns()
self._row_count = np.sum(self._filter).item()
self.state = state
self.selection = selection
if expected_length is not None:
if expected_length != self._row_count:
raise ValueError(f'Expected filter to have {expected_length} true values, but counted {self._row_count}')
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.id, self._hash_index, self.state, self.selection)
return f'dataset-{self.snake_name}-{id}'
@property
def _hash_index(self):
if self._lazy_hash_filter is None:
self._lazy_hash_filter = hash_array(self._filter)
return self._lazy_hash_filter
def _create_columns(self):
columns = {name: vaex.dataset.ColumnProxy(self, name, data_type(col)) for name, col in self.original._columns.items()}
hashes = {}
for name, column in self.original.items():
if name in self.original._ids:
hashes[name] = hash_combine(self._hash_index, self.original._ids[name])
self._columns = frozendict(columns)
self._ids = frozendict(hashes)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
spec = {'dataset': dataset_spec}
if self.state is not None and self.selection is not None:
spec['state'] = encoding.encode('dataframe-state', self.state)
spec['selection'] = encoding.encode('selection', self.selection)
spec['filter_array'] = encoding.encode('array', self._filter)
return spec
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
if 'filter_array' in spec:
filter = encoding.decode('array', spec['filter_array'])
ds = cls(dataset, filter)
else:
state = encoding.decode('dataframe-state', spec['state'])
selection = encoding.decode('selection', spec['selection'])
df = vaex.from_dataset(dataset)
df.state_set(state)
df.set_selection(vaex.dataframe.FILTER_SELECTION_NAME, selection)
df._push_down_filter()
filter = df.dataset.filter
ds = cls(dataset, filter, state=state, selection=selection)
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
chunk_size = chunk_size or 1024**2
if not columns:
end = self.row_count
length = end
i1 = i2 = 0
i2 = min(length, i1 + chunk_size)
while i1 < length:
yield i1, i2, {}
i1 = i2
i2 = min(length, i1 + chunk_size)
return
def filtered_chunks():
for i1, i2, chunks in self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse):
chunks_filtered = {name: vaex.array_types.filter(ar, self._filter[i1:i2]) for name, ar in chunks.items()}
yield chunks_filtered
yield from _rechunk(filtered_chunks(), chunk_size)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self._filter)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
expected_length = end - start
mask = vaex.superutils.Mask(memoryview(self._filter))
start, end = mask.indices(start, end-1)
end += 1
filter = self._filter[start:end]
assert filter.sum() == expected_length
return type(self)(self.original.slice(start, end), filter)
@register
class DatasetSliced(DatasetDecorator):
snake_name = "slice"
def __init__(self, original, start, end):
super().__init__(original)
self.start = start
self.end = end
self._row_count = end - start
self._create_columns()
# self._ids = {}
self._ids = frozendict({name: hash_slice(hash, start, end) for name, hash in original._ids.items()})
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.start, self.end)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
# we don't want to propagate slicing
return [self]
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'start': self.start, 'end': self.end}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['start'], spec['end'])
def _create_columns(self):
self._columns = {name: vaex.dataset.ColumnProxy(self, name, data_type(col)) for name, col in self.original._columns.items()}
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse, start=self.start, end=self.end)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.start, self.end)
def slice(self, start, end):
length = end - start
start += self.start
end = start + length
if end > self.original.row_count:
raise IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')
return type(self)(self.original, start, end)
@register
class DatasetSlicedArrays(DatasetDecorator):
snake_name = 'slice_arrays'
def __init__(self, original, start, end):
super().__init__(original)
# maybe we want to avoid slicing twice, and collapse it to 1?
self.start = start
self.end = end
# TODO: this is the old dataframe.trim method, we somehow need to test/capture that
# if isinstance(column, array_types.supported_array_types): # real array
# df.columns[name] = column[self._index_start:self._index_end]
# else:
# df.columns[name] = column.trim(self._index_start, self._index_end)
self._create_columns()
self._ids = frozendict({name: hash_slice(hash, start, end) for name, hash in original._ids.items()})
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self.start, self.end)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
# we don't want to propagate slicing
return [self]
def _create_columns(self):
columns = {}
for name, column in self.original.items():
if isinstance(column, array_types.supported_array_types): # real array
column = column[self.start:self.end]
else:
column = column.trim(self.start, self.end)
columns[name] = column
self._columns = frozendict(columns)
def _encode(self, encoding, skip=set()):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'start': self.start, 'end': self.end}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
return cls(dataset, spec['start'], spec['end'])
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.start, self.end)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
length = end - start
start += self.start
end = start + length
if end > self.original.row_count:
raise IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')
return type(self)(self.original, start, end)
@register
class DatasetDropped(DatasetDecorator):
snake_name = "drop"
def __init__(self, original, names):
super().__init__(original)
self._dropped_names = tuple(names)
self._create_columns()
self._ids = frozendict({name: ar for name, ar in original._ids.items() if name not in names})
self._set_row_count()
def dropped(self, *names):
return DatasetDropped(self.original, self._dropped_names + names)
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.original.fingerprint, self._dropped_names)
return f'dataset-{self.snake_name}-{id}'
def _create_columns(self):
self._columns = frozendict({name: ar for name, ar in self.original.items() if name not in self._dropped_names})
def _encode(self, encoding):
dataset_spec = encoding.encode('dataset', self.original)
return {'dataset': dataset_spec, 'names': list(self._dropped_names)}
@classmethod
def _decode(cls, encoding, spec):
dataset = encoding.decode('dataset', spec['dataset'])
ds = cls(dataset, spec['names'])
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
for column in columns:
if column in self._dropped_names:
raise KeyError(f'Oops, you tried to get column {column} while it is actually dropped')
yield from self.original.chunk_iterator(columns, chunk_size=chunk_size, reverse=reverse)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self._dropped_names)
def close(self):
self.original.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self._dropped_names)
@register
class DatasetMerged(Dataset):
snake_name = "merge"
def __init__(self, left, right):
super().__init__()
self.left = left
self.right = right
if self.left.row_count != self.right.row_count:
raise ValueError(f'Merging datasets with unequal row counts ({self.left.row_count} != {self.right.row_count})')
self._row_count = self.left.row_count
overlap = set(left) & set(right)
if overlap:
raise NameError(f'Duplicate names: {overlap}')
self._create_columns()
self._ids = frozendict({**left._ids, **right._ids})
self._set_row_count()
@property
def _fingerprint(self):
id = vaex.cache.fingerprint(self.left.fingerprint, self.right.fingerprint)
return f'dataset-{self.snake_name}-{id}'
def leafs(self) -> List[Dataset]:
return self.left.leafs() + self.right.leafs()
def _create_columns(self):
# TODO: for DatasetArray, we might want to just do this?
# self._columns = frozendict({**left._columns, **right._columns})
self._columns = {**{name: ColumnProxy(self.left, name, data_type(col)) for name, col in self.left._columns.items()},
**{name: ColumnProxy(self.right, name, data_type(col)) for name, col in self.right._columns.items()}}
def _encode(self, encoding, skip=set()):
dataset_spec_left = encoding.encode('dataset', self.left)
dataset_spec_right = encoding.encode('dataset', self.right)
spec = {'left': dataset_spec_left, 'right': dataset_spec_right}
return spec
@classmethod
def _decode(cls, encoding, spec):
left = encoding.decode('dataset', spec['left'])
right = encoding.decode('dataset', spec['right'])
ds = cls(left, right)
return ds
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
columns_left = [k for k in columns if k in self.left]
columns_right = [k for k in columns if k in self.right]
if not columns_left:
yield from self.right.chunk_iterator(columns, chunk_size, reverse=reverse)
elif not columns_right:
yield from self.left.chunk_iterator(columns, chunk_size, reverse=reverse)
else:
for (i1, i2, ichunks), (j1, j2, jchunks) in zip(
self.left.chunk_iterator(columns_left, chunk_size, reverse=reverse),
self.right.chunk_iterator(columns_right, chunk_size, reverse=reverse)):
# TODO: if one of the datasets does not respect the chunk_size (e.g. parquet)
# this might fail
assert i1 == j1
assert i2 == j2
yield i1, i2, {**ichunks, **jchunks}
def is_masked(self, column):
if column in self.left:
return self.left.is_masked(column)
else:
return self.right.is_masked(column)
def shape(self, column):
if column in self.left:
return self.left.shape(column)
else:
return self.right.shape(column)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.left.hashed(), self.right.hashed())
def close(self):
self.left.close()
self.right.close()
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.left.slice(start, end), self.right.slice(start, end))
@register
class DatasetArrays(Dataset):
snake_name = "arrays"
def __init__(self, mapping=None, hashed=True, **kwargs):
super().__init__()
if mapping is None:
mapping = {}
columns = {**mapping, **kwargs}
columns = {key: to_supported_array(ar) for key, ar in columns.items()}
# TODO: we finally want to get rid of datasets with no columns
self._columns = frozendict(columns)
if hashed:
self._ids = frozendict({key: hash_array(array) for key, array in self._columns.items()})
else:
self._ids = frozendict()
self._set_row_count()
@property
def id(self):
try:
# requires hashing and is expensive
return self.fingerprint
except ValueError:
return f'dataset-{self.snake_name}-uuid4-{self._id}'
@property
def _fingerprint(self):
keys = set(self)
keys_hashed = set(self._ids)
missing = keys ^ keys_hashed
if missing:
# if we don't have hashes for all columns, we do it like id
return f'dataset-{self.snake_name}-uuid4-{self._id}'
# self.__hash__() # invoke just to check we don't have missing hashes
# but Python's hash functions are not deterministic (cross processs)
fp = vaex.cache.fingerprint(tuple(self._ids.items()))
return f'dataset-{self.snake_name}-hashed-{fp}'
def leafs(self) -> List[Dataset]:
return [self]
def _encode(self, encoding):
arrays = encoding.encode_dict('array', self._columns)
spec = {'dataset_type': self.snake_name, 'arrays': arrays}
if self._ids:
fingerprints = dict(self._ids)
spec['fingerprints'] = fingerprints
return spec
@classmethod
def _decode(cls, encoding, spec):
arrays = encoding.decode_dict('array', spec['arrays'])
ds = cls(arrays)
if 'fingerprints' in spec:
ds._ids = frozendict(spec['fingerprints'])
return ds
def __getstate__(self):
state = self.__dict__.copy()
# here, we actually DO want to keep the columns
# del state['_columns']
return state
def __setstate__(self, state):
super().__setstate__(state)
def _create_columns(self):
pass
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def is_masked(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, np.ndarray):
return np.ma.isMaskedArray(ar)
else:
return False # an arrow array always has null value options
def shape(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
return tuple()
else:
return ar.shape[1:]
def merged(self, rhs):
# TODO: if we don't allow emtpy datasets, we can remove this method
if len(self) == 0:
return rhs
if len(rhs) == 0:
return self
# TODO: this is where we want to check if both are array like
# and have faster version of merged
return DatasetMerged(self, rhs)
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def hashed(self):
if set(self._ids) == set(self):
return self
new = type(self)(self._columns)
new._ids = frozendict({key: hash_array(array) for key, array in new._columns.items()})
return new
def close(self):
pass # nothing to do, maybe drop a refcount?
# TODO: we might want to really get rid of these, since we want to avoid copying them over the network?
# def dropped(self, names):
class DatasetFile(Dataset):
"""Datasets that map to a file can keep their ids/hashes in the file itself,
or keep them in a meta file.
"""
def __init__(self, path, write=False, fs_options={}, fs=None):
super().__init__()
self.path = path
self.fs_options = fs_options
self.fs = fs
self.write = write
self._columns = {}
self._ids = {}
self._frozen = False
self._hash_calculations = 0 # track it for testing purposes
self._hash_info = {}
self._hash_cache_needs_write = False
self._read_hashes()
@property
def name(self):
base, ext, fs_options = vaex.file.split_ext(self.path)
base = os.path.basename(base)
return base
@property
def _fingerprint(self):
if set(self._ids) == set(self):
fingerprint = vaex.cache.fingerprint(dict(self._ids))
return f'dataset-{self.snake_name}-hashed-{fingerprint}'
else:
# TODO: if the dataset is hashed, return a fingerprint based on that
fingerprint = vaex.file.fingerprint(self.path, fs_options=self.fs_options, fs=self.fs)
return f'dataset-{self.snake_name}-{fingerprint}'
def leafs(self) -> List[Dataset]:
return [self]
def _create_columns(self):
pass
@classmethod
def quick_test(cls, path, fs_options={}, fs=None, *args, **kwargs):
return False
@classmethod
def open(cls, path, *args, **kwargs):
return cls(path, *args, **kwargs)
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
yield from self._default_chunk_iterator(self._columns, columns, chunk_size, reverse=reverse)
def is_masked(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, np.ndarray):
return np.ma.isMaskedArray(ar)
else:
return False # an arrow array always has null value options
def shape(self, column):
ar = self._columns[column]
if not isinstance(ar, np.ndarray):
ar = ar[0:1] # take a small piece
if isinstance(ar, vaex.array_types.supported_arrow_array_types):
return tuple()
else:
return ar.shape[1:]
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return DatasetSlicedArrays(self, start=start, end=end)
def _read_hashes(self):
path_hashes = Path(self.path + '.d') / 'hashes.yaml'
try:
exists = path_hashes.exists()
except OSError: # happens for windows py<38
exists = False
if exists:
with path_hashes.open() as f:
hashes = vaex.utils.yaml_load(f)
if hashes is None:
raise ValueError(f'{path_hashes} was probably truncated due to another process writing.')
self._hash_info = hashes.get('columns', {})
def _freeze(self):
self._ids = frozendict(self._ids)
self._columns = frozendict(self._columns)
self._set_row_count()
self._frozen = True
if self._hash_cache_needs_write:
self._write_hash_info()
def encode(self, encoding, skip=set()):
spec = {'dataset_type': self.snake_name,
'write': self.write,
'path': self.path,
'fs_options': self.fs_options,
'fs': self.fs}
return spec
def __getstate__(self):
# we don't have the columns in the state, since we should be able
# to get them from disk again
return {
'write': self.write,
'path': self.path,
'fs_options': self.fs_options,
'fs': self.fs,
'_ids': dict(self._ids) # serialize the hases as non-frozen dict
}
def __setstate__(self, state):
super().__setstate__(state)
# 'ctor' like initialization
self._frozen = False
self._hash_calculations = 0
self._columns = {}
self._hash_info = {}
self._hash_cache_needs_write = False
self._read_hashes()
def add_column(self, name, data):
self._columns[name] = data
if self.write:
return # the columns don't include the final data
# the hashes will be done in .freeze()
hash_info = self._hash_info.get(name)
if hash_info:
hash_info_previous = hash_info.copy()
hash, hash_info = hash_array(data, hash_info, return_info=True)
if hash_info_previous != hash_info:
self._hash_cache_needs_write = True
self._ids[name] = hash
self._hash_info[name] = hash_info # always update the information
@property
def _local_hash_path(self):
# TODO: support s3 and gcs
# TODO: fallback directory when a user cannot write
if Path(self.path).exists():
directory = Path(self.path + '.d')
directory.mkdir(exist_ok=True)
else:
o = urlparse(self.path)
directory = Path(vaex.utils.get_private_dir('dataset', o.scheme, o.netloc, o.path[1:]))
return directory / 'hashes.yaml'
def hashed(self):
if set(self._ids) == set(self):
return self
cls = type(self)
# use pickle protocol to clone
new = cls.__new__(cls)
new.__setstate__(self.__getstate__())
hashes = {}
disk_cached_hashes = {}
for name, column in new.items():
hash_info = self._hash_info.get(name)
if hash_info is None:
logging.warning(f'Calculating hash for column {name} of length {len(column)} (1 time operation, will be cached on disk)')
hash_info = hash_array_data(column)
hash, hash_info = hash_array(column, hash_info, return_info=True)
new._hash_calculations += 1
hashes[name] = hash
disk_cached_hashes[name] = hash_info
new._ids = frozendict(hashes)
new._hash_info = frozendict(disk_cached_hashes)
path_hashes = new._local_hash_path
# TODO: without this check, if multiple processes are writing (e.g. tests/execution_test.py::test_task_sum with ray)
# this leads to a race condition, where we write the file, and while truncated, _read_hases() fails (because the file exists)
# if new._hash_info != new._ids:
new._write_hash_info()
return new
def _write_hash_info(self):
if self._hash_info: # TODO: file lock
path_hashes = self._local_hash_path
with path_hashes.open('w') as f:
vaex.utils.yaml_dump(f, {'columns': dict(self._hash_info)})
class DatasetCached(DatasetDecorator):
snake_name = "cached"
shared_cache = {}
def __init__(self, original, names, cache=None, to_numpy=False):
super(DatasetCached, self).__init__(original)
self.original = original
self.names = names
self._shared = cache is None or cache is self.shared_cache
self.cache = cache if cache is not None else self.shared_cache
self.to_numpy = to_numpy
self._create_columns()
self._row_count = self.original.row_count
@property
def _fingerprint(self):
return self.original.fingerprint
def _create_columns(self):
columns = {}
schema = self.original.schema()
for name, column in self.original.items():
columns[name] = ColumnProxy(self, name, schema[name])
self._columns = frozendict(columns)
self._ids = frozendict(self.original._ids)
def _encode(self, encoding, skip=set()):
raise NotImplementedError("cannot serialize cache")
@classmethod
def _decode(cls, encoding, spec):
raise NotImplementedError("cannot serialize cache")
def chunk_iterator(self, columns, chunk_size=None, reverse=False):
chunk_size = chunk_size or chunk_size_default
columns_all = set(columns)
columns_cachable = columns_all & set(self.names)
# avoids asking the cache twice, by using .get() and then testing for None
columns_cached = {name: self.cache.get(self._cache_key(name)) for name in columns_cachable}
columns_cached = {name: array for name, array in columns_cached.items() if array is not None}
columns_to_cache = columns_cachable - set(columns_cached)
column_required = columns_all - set(columns_cached)
cache_chunks = {name: [] for name in columns_to_cache}
def cached_iterator():
chunks_list = [chunks for name, chunks in columns_cached.items()]
# chunks_list is of form [[ar1x, ar2x, a3x], [ar1y, ar2y, a3y]]
# and now we want to yield
# * i1, i2 {'x': ar1x, 'y': ar1y}
# * i1, i2 {'x': ar2x, 'y': ar2y}
# * i1, i2 {'x': ar3x, 'y': ar3y}
names = [name for name, chunks in columns_cached.items()]
i1 = 0
i2 = 0
for chunks in zip(*chunks_list):
i2 += len(chunks[0])
for chunk in chunks:
assert len(chunk) == len(chunks[0])
yield i1, i2, dict(zip(names, chunks))
i1 = i2
if columns_cached:
cached_iter = chunk_rechunk(cached_iterator(), chunk_size)
else:
cached_iter = empty_chunk_iterator(0, self.row_count, chunk_size)
if column_required:
original_iter = self.original.chunk_iterator(column_required, chunk_size, reverse=reverse)
else:
original_iter = empty_chunk_iterator(0, self.row_count, chunk_size)
original_iter = list(original_iter)
cached_iter = list(cached_iter)
for (o1, o2, ochunks), (c1, c2, cchunks) in zip(original_iter, cached_iter):
assert o1 == c1
assert o2 == c2
yield o1, o2, {**ochunks, **cchunks}
for name in columns_to_cache:
if self.to_numpy:
ochunks = {k: vaex.array_types.to_numpy(v) for k, v in ochunks.items()}
cache_chunks[name].append(ochunks[name])
# we write it too the cache in 1 go
for name in columns_to_cache:
self.cache[self._cache_key(name)] = cache_chunks[name]
def slice(self, start, end):
if start == 0 and end == self.row_count:
return self
return type(self)(self.original.slice(start, end), self.names, cache=self.cache)
def hashed(self):
if set(self._ids) == set(self):
return self
return type(self)(self.original.hashed(), self.names, cache=self.cache)
def _cache_key(self, name):
return f"{self.fingerprint}-{name}"
| [
"logging.getLogger",
"vaex.encoding.set_object",
"frozendict.frozendict",
"vaex.encoding.register",
"numpy.asanyarray",
"numpy.array",
"vaex.file.fingerprint",
"numpy.ma.isMaskedArray",
"vaex.encoding.set_object_spec",
"vaex.encoding.get_object_spec",
"vaex.array_types.to_numpy",
"pathlib.Path... | [((553, 586), 'logging.getLogger', 'logging.getLogger', (['"""vaex.dataset"""'], {}), "('vaex.dataset')\n", (570, 586), False, 'import logging\n'), ((746, 762), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (760, 762), False, 'import threading\n'), ((905, 933), 'vaex.encoding.register', 'encoding.register', (['"""dataset"""'], {}), "('dataset')\n", (922, 933), False, 'from vaex import encoding\n'), ((2943, 2985), 'vaex.utils.create_hasher', 'vaex.utils.create_hasher', ([], {'large_data': '(False)'}), '(large_data=False)\n', (2967, 2985), False, 'import vaex\n'), ((3196, 3234), 'numpy.array', 'np.array', (['[start, end]'], {'dtype': 'np.int64'}), '([start, end], dtype=np.int64)\n', (3204, 3234), True, 'import numpy as np\n'), ((6823, 6865), 'vaex.utils.create_hasher', 'vaex.utils.create_hasher', ([], {'large_data': '(False)'}), '(large_data=False)\n', (6847, 6865), False, 'import vaex\n'), ((3633, 3656), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['ar'], {}), '(ar)\n', (3652, 3656), True, 'import numpy as np\n'), ((5855, 5878), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['ar'], {}), '(ar)\n', (5874, 5878), True, 'import numpy as np\n'), ((7244, 7261), 'numpy.asanyarray', 'np.asanyarray', (['ar'], {}), '(ar)\n', (7257, 7261), True, 'import numpy as np\n'), ((9148, 9179), 'vaex.array_types.concat', 'vaex.array_types.concat', (['arrays'], {}), '(arrays)\n', (9171, 9179), False, 'import vaex\n'), ((11188, 11248), 'vaex.dataset._slice_of_chunks', 'vaex.dataset._slice_of_chunks', (['chunks_ready_list', 'chunk_size'], {}), '(chunks_ready_list, chunk_size)\n', (11217, 11248), False, 'import vaex\n'), ((11298, 11350), 'vaex.dataset._concat_chunk_list', 'vaex.dataset._concat_chunk_list', (['chunks_current_list'], {}), '(chunks_current_list)\n', (11329, 11350), False, 'import vaex\n'), ((12011, 12023), 'frozendict.frozendict', 'frozendict', ([], {}), '()\n', (12021, 12023), False, 'from frozendict import frozendict\n'), ((12228, 12270), 'yaml.dump', 'yaml.dump', (['data'], {'sort_keys': '(False)', 'indent': '(4)'}), '(data, sort_keys=False, indent=4)\n', (12237, 12270), False, 'import yaml\n'), ((13852, 13875), 'vaex.encoding.get_object', 'encoding.get_object', (['id'], {}), '(id)\n', (13871, 13875), False, 'from vaex import encoding\n'), ((19855, 19909), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.ds.fingerprint', 'self.name'], {}), '(self.ds.fingerprint, self.name)\n', (19877, 19909), False, 'import vaex\n'), ((20068, 20084), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (20076, 20084), True, 'import numpy as np\n'), ((21978, 22042), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.original.fingerprint', 'self.renaming'], {}), '(self.original.fingerprint, self.renaming)\n', (22000, 22042), False, 'import vaex\n'), ((22314, 22355), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.original'], {}), "('dataset', self.original)\n", (22329, 22355), False, 'from vaex import encoding\n'), ((22504, 22547), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['dataset']"], {}), "('dataset', spec['dataset'])\n", (22519, 22547), False, 'from vaex import encoding\n'), ((26499, 26527), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['*ids'], {}), '(*ids)\n', (26521, 26527), False, 'import vaex\n'), ((26927, 26946), 'frozendict.frozendict', 'frozendict', (['columns'], {}), '(columns)\n', (26937, 26946), False, 'from frozendict import frozendict\n'), ((26967, 26985), 'frozendict.frozendict', 'frozendict', (['hashes'], {}), '(hashes)\n', (26977, 26985), False, 'from frozendict import frozendict\n'), ((27051, 27097), 'vaex.encoding.encode_list', 'encoding.encode_list', (['"""dataset"""', 'self.datasets'], {}), "('dataset', self.datasets)\n", (27071, 27097), False, 'from vaex import encoding\n'), ((27291, 27340), 'vaex.encoding.decode_list', 'encoding.decode_list', (['"""dataset"""', "spec['datasets']"], {}), "('dataset', spec['datasets'])\n", (27311, 27340), False, 'from vaex import encoding\n'), ((32570, 32655), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.original.fingerprint', 'self._hash_index', 'self.masked'], {}), '(self.original.fingerprint, self._hash_index, self.masked\n )\n', (32592, 32655), False, 'import vaex\n'), ((33539, 33558), 'frozendict.frozendict', 'frozendict', (['columns'], {}), '(columns)\n', (33549, 33558), False, 'from frozendict import frozendict\n'), ((33579, 33597), 'frozendict.frozendict', 'frozendict', (['hashes'], {}), '(hashes)\n', (33589, 33597), False, 'from frozendict import frozendict\n'), ((33667, 33708), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.original'], {}), "('dataset', self.original)\n", (33682, 33708), False, 'from vaex import encoding\n'), ((33809, 33847), 'vaex.encoding.encode', 'encoding.encode', (['"""array"""', 'self.indices'], {}), "('array', self.indices)\n", (33824, 33847), False, 'from vaex import encoding\n'), ((33979, 34022), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['dataset']"], {}), "('dataset', spec['dataset'])\n", (33994, 34022), False, 'from vaex import encoding\n'), ((34041, 34082), 'vaex.encoding.decode', 'encoding.decode', (['"""array"""', "spec['indices']"], {}), "('array', spec['indices'])\n", (34056, 34082), False, 'from vaex import encoding\n'), ((35460, 35551), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.original.id', 'self._hash_index', 'self.state', 'self.selection'], {}), '(self.original.id, self._hash_index, self.state, self\n .selection)\n', (35482, 35551), False, 'import vaex\n'), ((36166, 36185), 'frozendict.frozendict', 'frozendict', (['columns'], {}), '(columns)\n', (36176, 36185), False, 'from frozendict import frozendict\n'), ((36206, 36224), 'frozendict.frozendict', 'frozendict', (['hashes'], {}), '(hashes)\n', (36216, 36224), False, 'from frozendict import frozendict\n'), ((36294, 36335), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.original'], {}), "('dataset', self.original)\n", (36309, 36335), False, 'from vaex import encoding\n'), ((36626, 36664), 'vaex.encoding.encode', 'encoding.encode', (['"""array"""', 'self._filter'], {}), "('array', self._filter)\n", (36641, 36664), False, 'from vaex import encoding\n'), ((36759, 36802), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['dataset']"], {}), "('dataset', spec['dataset'])\n", (36774, 36802), False, 'from vaex import encoding\n'), ((39248, 39319), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.original.fingerprint', 'self.start', 'self.end'], {}), '(self.original.fingerprint, self.start, self.end)\n', (39270, 39319), False, 'import vaex\n'), ((39544, 39585), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.original'], {}), "('dataset', self.original)\n", (39559, 39585), False, 'from vaex import encoding\n'), ((39739, 39782), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['dataset']"], {}), "('dataset', spec['dataset'])\n", (39754, 39782), False, 'from vaex import encoding\n'), ((41549, 41620), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.original.fingerprint', 'self.start', 'self.end'], {}), '(self.original.fingerprint, self.start, self.end)\n', (41571, 41620), False, 'import vaex\n'), ((42153, 42172), 'frozendict.frozendict', 'frozendict', (['columns'], {}), '(columns)\n', (42163, 42172), False, 'from frozendict import frozendict\n'), ((42242, 42283), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.original'], {}), "('dataset', self.original)\n", (42257, 42283), False, 'from vaex import encoding\n'), ((42437, 42480), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['dataset']"], {}), "('dataset', spec['dataset'])\n", (42452, 42480), False, 'from vaex import encoding\n'), ((43785, 43855), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.original.fingerprint', 'self._dropped_names'], {}), '(self.original.fingerprint, self._dropped_names)\n', (43807, 43855), False, 'import vaex\n'), ((44114, 44155), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.original'], {}), "('dataset', self.original)\n", (44129, 44155), False, 'from vaex import encoding\n'), ((44307, 44350), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['dataset']"], {}), "('dataset', spec['dataset'])\n", (44322, 44350), False, 'from vaex import encoding\n'), ((45736, 45775), 'frozendict.frozendict', 'frozendict', (['{**left._ids, **right._ids}'], {}), '({**left._ids, **right._ids})\n', (45746, 45775), False, 'from frozendict import frozendict\n'), ((45862, 45931), 'vaex.cache.fingerprint', 'vaex.cache.fingerprint', (['self.left.fingerprint', 'self.right.fingerprint'], {}), '(self.left.fingerprint, self.right.fingerprint)\n', (45884, 45931), False, 'import vaex\n'), ((46571, 46608), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.left'], {}), "('dataset', self.left)\n", (46586, 46608), False, 'from vaex import encoding\n'), ((46638, 46676), 'vaex.encoding.encode', 'encoding.encode', (['"""dataset"""', 'self.right'], {}), "('dataset', self.right)\n", (46653, 46676), False, 'from vaex import encoding\n'), ((46840, 46880), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['left']"], {}), "('dataset', spec['left'])\n", (46855, 46880), False, 'from vaex import encoding\n'), ((46897, 46938), 'vaex.encoding.decode', 'encoding.decode', (['"""dataset"""', "spec['right']"], {}), "('dataset', spec['right'])\n", (46912, 46938), False, 'from vaex import encoding\n'), ((49094, 49113), 'frozendict.frozendict', 'frozendict', (['columns'], {}), '(columns)\n', (49104, 49113), False, 'from frozendict import frozendict\n'), ((50222, 50266), 'vaex.encoding.encode_dict', 'encoding.encode_dict', (['"""array"""', 'self._columns'], {}), "('array', self._columns)\n", (50242, 50266), False, 'from vaex import encoding\n'), ((50540, 50585), 'vaex.encoding.decode_dict', 'encoding.decode_dict', (['"""array"""', "spec['arrays']"], {}), "('array', spec['arrays'])\n", (50560, 50585), False, 'from vaex import encoding\n'), ((53471, 53501), 'vaex.file.split_ext', 'vaex.file.split_ext', (['self.path'], {}), '(self.path)\n', (53490, 53501), False, 'import vaex\n'), ((53517, 53539), 'os.path.basename', 'os.path.basename', (['base'], {}), '(base)\n', (53533, 53539), False, 'import os\n'), ((55909, 55930), 'frozendict.frozendict', 'frozendict', (['self._ids'], {}), '(self._ids)\n', (55919, 55930), False, 'from frozendict import frozendict\n'), ((55955, 55980), 'frozendict.frozendict', 'frozendict', (['self._columns'], {}), '(self._columns)\n', (55965, 55980), False, 'from frozendict import frozendict\n'), ((58955, 58973), 'frozendict.frozendict', 'frozendict', (['hashes'], {}), '(hashes)\n', (58965, 58973), False, 'from frozendict import frozendict\n'), ((58999, 59029), 'frozendict.frozendict', 'frozendict', (['disk_cached_hashes'], {}), '(disk_cached_hashes)\n', (59009, 59029), False, 'from frozendict import frozendict\n'), ((60515, 60534), 'frozendict.frozendict', 'frozendict', (['columns'], {}), '(columns)\n', (60525, 60534), False, 'from frozendict import frozendict\n'), ((60555, 60585), 'frozendict.frozendict', 'frozendict', (['self.original._ids'], {}), '(self.original._ids)\n', (60565, 60585), False, 'from frozendict import frozendict\n'), ((1475, 1535), 'pkg_resources.iter_entry_points', 'pkg_resources.iter_entry_points', ([], {'group': '"""vaex.dataset.opener"""'}), "(group='vaex.dataset.opener')\n", (1506, 1535), False, 'import pkg_resources\n'), ((3725, 3780), 'vaex.utils.create_hasher', 'vaex.utils.create_hasher', (['data_byte_ar'], {'large_data': '(True)'}), '(data_byte_ar, large_data=True)\n', (3749, 3780), False, 'import vaex\n'), ((4372, 4422), 'vaex.utils.create_hasher', 'vaex.utils.create_hasher', (['byte_ar'], {'large_data': '(True)'}), '(byte_ar, large_data=True)\n', (4396, 4422), False, 'import vaex\n'), ((4578, 4619), 'vaex.utils.create_hasher', 'vaex.utils.create_hasher', ([], {'large_data': '(True)'}), '(large_data=True)\n', (4602, 4619), False, 'import vaex\n'), ((7372, 7383), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (7381, 7383), True, 'import pyarrow as pa\n'), ((7479, 7502), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['ar'], {}), '(ar)\n', (7498, 7502), True, 'import numpy as np\n'), ((10885, 10945), 'vaex.dataset._slice_of_chunks', 'vaex.dataset._slice_of_chunks', (['chunks_ready_list', 'chunk_size'], {}), '(chunks_ready_list, chunk_size)\n', (10914, 10945), False, 'import vaex\n'), ((11003, 11055), 'vaex.dataset._concat_chunk_list', 'vaex.dataset._concat_chunk_list', (['chunks_current_list'], {}), '(chunks_current_list)\n', (11034, 11055), False, 'import vaex\n'), ((12078, 12090), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12088, 12090), False, 'import uuid\n'), ((13378, 13411), 'vaex.encoding.has_object_spec', 'encoding.has_object_spec', (['self.id'], {}), '(self.id)\n', (13402, 13411), False, 'from vaex import encoding\n'), ((13467, 13506), 'vaex.encoding.set_object_spec', 'encoding.set_object_spec', (['self.id', 'spec'], {}), '(self.id, spec)\n', (13491, 13506), False, 'from vaex import encoding\n'), ((13679, 13702), 'vaex.encoding.has_object', 'encoding.has_object', (['id'], {}), '(id)\n', (13698, 13702), False, 'from vaex import encoding\n'), ((13723, 13751), 'vaex.encoding.get_object_spec', 'encoding.get_object_spec', (['id'], {}), '(id)\n', (13747, 13751), False, 'from vaex import encoding\n'), ((13809, 13836), 'vaex.encoding.set_object', 'encoding.set_object', (['id', 'ds'], {}), '(id, ds)\n', (13828, 13836), False, 'from vaex import encoding\n'), ((14444, 14475), 'vaex.array_types.data_type', 'vaex.array_types.data_type', (['col'], {}), '(col)\n', (14470, 14475), False, 'import vaex\n'), ((20717, 20754), 'vaex.array_types.concat', 'vaex.array_types.concat', (['array_chunks'], {}), '(array_chunks)\n', (20740, 20754), False, 'import vaex\n'), ((36471, 36517), 'vaex.encoding.encode', 'encoding.encode', (['"""dataframe-state"""', 'self.state'], {}), "('dataframe-state', self.state)\n", (36486, 36517), False, 'from vaex import encoding\n'), ((36550, 36594), 'vaex.encoding.encode', 'encoding.encode', (['"""selection"""', 'self.selection'], {}), "('selection', self.selection)\n", (36565, 36594), False, 'from vaex import encoding\n'), ((36859, 36905), 'vaex.encoding.decode', 'encoding.decode', (['"""array"""', "spec['filter_array']"], {}), "('array', spec['filter_array'])\n", (36874, 36905), False, 'from vaex import encoding\n'), ((36978, 37027), 'vaex.encoding.decode', 'encoding.decode', (['"""dataframe-state"""', "spec['state']"], {}), "('dataframe-state', spec['state'])\n", (36993, 37027), False, 'from vaex import encoding\n'), ((37052, 37099), 'vaex.encoding.decode', 'encoding.decode', (['"""selection"""', "spec['selection']"], {}), "('selection', spec['selection'])\n", (37067, 37099), False, 'from vaex import encoding\n'), ((37117, 37143), 'vaex.from_dataset', 'vaex.from_dataset', (['dataset'], {}), '(dataset)\n', (37134, 37143), False, 'import vaex\n'), ((49272, 49284), 'frozendict.frozendict', 'frozendict', ([], {}), '()\n', (49282, 49284), False, 'from frozendict import frozendict\n'), ((50668, 50700), 'frozendict.frozendict', 'frozendict', (["spec['fingerprints']"], {}), "(spec['fingerprints'])\n", (50678, 50700), False, 'from frozendict import frozendict\n'), ((51401, 51424), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['ar'], {}), '(ar)\n', (51420, 51424), True, 'import numpy as np\n'), ((53899, 53971), 'vaex.file.fingerprint', 'vaex.file.fingerprint', (['self.path'], {'fs_options': 'self.fs_options', 'fs': 'self.fs'}), '(self.path, fs_options=self.fs_options, fs=self.fs)\n', (53920, 53971), False, 'import vaex\n'), ((54743, 54766), 'numpy.ma.isMaskedArray', 'np.ma.isMaskedArray', (['ar'], {}), '(ar)\n', (54762, 54766), True, 'import numpy as np\n'), ((55376, 55398), 'pathlib.Path', 'Path', (["(self.path + '.d')"], {}), "(self.path + '.d')\n", (55380, 55398), False, 'from pathlib import Path\n'), ((57885, 57907), 'pathlib.Path', 'Path', (["(self.path + '.d')"], {}), "(self.path + '.d')\n", (57889, 57907), False, 'from pathlib import Path\n'), ((57981, 58000), 'urllib.parse.urlparse', 'urlparse', (['self.path'], {}), '(self.path)\n', (57989, 58000), False, 'from urllib.parse import urlparse\n'), ((2545, 2581), 'vaex.utils.format_exception_trace', 'vaex.utils.format_exception_trace', (['e'], {}), '(e)\n', (2578, 2581), False, 'import vaex\n'), ((4058, 4113), 'vaex.utils.create_hasher', 'vaex.utils.create_hasher', (['mask_byte_ar'], {'large_data': '(True)'}), '(mask_byte_ar, large_data=True)\n', (4082, 4113), False, 'import vaex\n'), ((35101, 35121), 'numpy.sum', 'np.sum', (['self._filter'], {}), '(self._filter)\n', (35107, 35121), True, 'import numpy as np\n'), ((35875, 35889), 'vaex.array_types.data_type', 'data_type', (['col'], {}), '(col)\n', (35884, 35889), False, 'from vaex.array_types import data_type\n'), ((39939, 39953), 'vaex.array_types.data_type', 'data_type', (['col'], {}), '(col)\n', (39948, 39953), False, 'from vaex.array_types import data_type\n'), ((55636, 55659), 'vaex.utils.yaml_load', 'vaex.utils.yaml_load', (['f'], {}), '(f)\n', (55656, 55659), False, 'import vaex\n'), ((57835, 57850), 'pathlib.Path', 'Path', (['self.path'], {}), '(self.path)\n', (57839, 57850), False, 'from pathlib import Path\n'), ((58030, 58099), 'vaex.utils.get_private_dir', 'vaex.utils.get_private_dir', (['"""dataset"""', 'o.scheme', 'o.netloc', 'o.path[1:]'], {}), "('dataset', o.scheme, o.netloc, o.path[1:])\n", (58056, 58099), False, 'import vaex\n'), ((3593, 3605), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3603, 3605), False, 'import uuid\n'), ((8629, 8640), 'pyarrow.string', 'pa.string', ([], {}), '()\n', (8638, 8640), True, 'import pyarrow as pa\n'), ((9891, 9938), 'vaex.array_types.slice', 'vaex.array_types.slice', (['chunk', '(0)', 'needed_length'], {}), '(chunk, 0, needed_length)\n', (9913, 9938), False, 'import vaex\n'), ((10076, 10120), 'vaex.array_types.slice', 'vaex.array_types.slice', (['chunk', 'needed_length'], {}), '(chunk, needed_length)\n', (10098, 10120), False, 'import vaex\n'), ((38027, 38075), 'vaex.array_types.filter', 'vaex.array_types.filter', (['ar', 'self._filter[i1:i2]'], {}), '(ar, self._filter[i1:i2])\n', (38050, 38075), False, 'import vaex\n'), ((46308, 46322), 'vaex.array_types.data_type', 'data_type', (['col'], {}), '(col)\n', (46317, 46322), False, 'from vaex.array_types import data_type\n'), ((46434, 46448), 'vaex.array_types.data_type', 'data_type', (['col'], {}), '(col)\n', (46443, 46448), False, 'from vaex.array_types import data_type\n'), ((20658, 20680), 'vaex.dtype', 'vaex.dtype', (['self.dtype'], {}), '(self.dtype)\n', (20668, 20680), False, 'import vaex\n'), ((63029, 63057), 'vaex.array_types.to_numpy', 'vaex.array_types.to_numpy', (['v'], {}), '(v)\n', (63054, 63057), False, 'import vaex\n'), ((29392, 29447), 'vaex.array_types.slice', 'vaex.array_types.slice', (['ar', '(start - chunk_start)', 'length'], {}), '(ar, start - chunk_start, length)\n', (29414, 29447), False, 'import vaex\n'), ((29859, 29906), 'vaex.array_types.slice', 'vaex.array_types.slice', (['ar', '(start - chunk_start)'], {}), '(ar, start - chunk_start)\n', (29881, 29906), False, 'import vaex\n'), ((30384, 30421), 'vaex.array_types.slice', 'vaex.array_types.slice', (['ar', '(0)', 'length'], {}), '(ar, 0, length)\n', (30406, 30421), False, 'import vaex\n')] |
import numpy as np
import matplotlib.pyplot as plt
def kalman_xy(x, P, measurement, R,
motion = np.matrix('0. 0. 0. 0.').T,
Q = np.matrix(np.eye(4))):
"""
Parameters:
x: initial state 4-tuple of location and velocity: (x0, x1, x0_dot, x1_dot)
P: initial uncertainty convariance matrix
measurement: observed position
R: measurement noise
motion: external motion added to state vector x
Q: motion noise (same shape as P)
"""
return kalman(x, P, measurement, R, motion, Q,
F = np.matrix('''
1. 0. 1. 0.;
0. 1. 0. 1.;
0. 0. 1. 0.;
0. 0. 0. 1.
'''),
H = np.matrix('''
1. 0. 0. 0.;
0. 1. 0. 0.'''))
def kalman(x, P, measurement, R, motion, Q, F, H):
'''
Parameters:
x: initial state
P: initial uncertainty convariance matrix
measurement: observed position (same shape as H*x)
R: measurement noise (same shape as H)
motion: external motion added to state vector x
Q: motion noise (same shape as P)
F: next state function: x_prime = F*x
H: measurement function: position = H*x
Return: the updated and predicted new values for (x, P)
See also http://en.wikipedia.org/wiki/Kalman_filter
This version of kalman can be applied to many different situations by
appropriately defining F and H
'''
# UPDATE x, P based on measurement m
# distance between measured and current position-belief
y = np.matrix(measurement).T - H * x
S = H * P * H.T + R # residual convariance
K = P * H.T * S.I # Kalman gain
x = x + K*y
I = np.matrix(np.eye(F.shape[0])) # identity matrix
P = (I - K*H)*P
# PREDICT x, P based on motion
x = F*x + motion
P = F*P*F.T + Q
return x, P
def demo_kalman_xy():
x = np.matrix('0. 0. 0. 0.').T
P = np.matrix(np.eye(4))*1000 # initial uncertainty
N = 20
true_x = np.linspace(0.0, 10.0, N)
true_y = true_x**2
observed_x = true_x + 0.05*np.random.random(N)*true_x
observed_y = true_y + 0.05*np.random.random(N)*true_y
plt.plot(observed_x, observed_y, 'ro')
result = []
R = 0.01**2
for meas in zip(observed_x, observed_y):
x, P = kalman_xy(x, P, meas, R)
result.append((x[:2]).tolist())
kalman_x, kalman_y = zip(*result)
plt.plot(kalman_x, kalman_y, 'g-')
plt.show()
demo_kalman_xy() | [
"numpy.eye",
"numpy.random.random",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.matrix",
"matplotlib.pyplot.show"
] | [((2070, 2095), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', 'N'], {}), '(0.0, 10.0, N)\n', (2081, 2095), True, 'import numpy as np\n'), ((2239, 2277), 'matplotlib.pyplot.plot', 'plt.plot', (['observed_x', 'observed_y', '"""ro"""'], {}), "(observed_x, observed_y, 'ro')\n", (2247, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2477, 2511), 'matplotlib.pyplot.plot', 'plt.plot', (['kalman_x', 'kalman_y', '"""g-"""'], {}), "(kalman_x, kalman_y, 'g-')\n", (2485, 2511), True, 'import matplotlib.pyplot as plt\n'), ((2516, 2526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2524, 2526), True, 'import matplotlib.pyplot as plt\n'), ((111, 135), 'numpy.matrix', 'np.matrix', (['"""0. 0. 0. 0."""'], {}), "('0. 0. 0. 0.')\n", (120, 135), True, 'import numpy as np\n'), ((167, 176), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (173, 176), True, 'import numpy as np\n'), ((1778, 1796), 'numpy.eye', 'np.eye', (['F.shape[0]'], {}), '(F.shape[0])\n', (1784, 1796), True, 'import numpy as np\n'), ((1961, 1985), 'numpy.matrix', 'np.matrix', (['"""0. 0. 0. 0."""'], {}), "('0. 0. 0. 0.')\n", (1970, 1985), True, 'import numpy as np\n'), ((566, 755), 'numpy.matrix', 'np.matrix', (['"""\n 1. 0. 1. 0.;\n 0. 1. 0. 1.;\n 0. 0. 1. 0.;\n 0. 0. 0. 1.\n """'], {}), '(\n """\n 1. 0. 1. 0.;\n 0. 1. 0. 1.;\n 0. 0. 1. 0.;\n 0. 0. 0. 1.\n """\n )\n', (575, 755), True, 'import numpy as np\n'), ((769, 865), 'numpy.matrix', 'np.matrix', (['"""\n 1. 0. 0. 0.;\n 0. 1. 0. 0."""'], {}), '(\n """\n 1. 0. 0. 0.;\n 0. 1. 0. 0."""\n )\n', (778, 865), True, 'import numpy as np\n'), ((1624, 1646), 'numpy.matrix', 'np.matrix', (['measurement'], {}), '(measurement)\n', (1633, 1646), True, 'import numpy as np\n'), ((2007, 2016), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2013, 2016), True, 'import numpy as np\n'), ((2150, 2169), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (2166, 2169), True, 'import numpy as np\n'), ((2208, 2227), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (2224, 2227), True, 'import numpy as np\n')] |
import os, sys, inspect
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from core.bounds import WSR_mu_plus
from core.concentration import get_tlambda, get_lhat_from_table, get_lhat_from_table_binarysearch
import numpy as np
from scipy.optimize import brentq
from tqdm import tqdm
import pdb
def get_coco_example_loss_and_size_tables(lambdas_example_table):
lam_len = len(lambdas_example_table)
lam_low = min(lambdas_example_table)
lam_high = max(lambdas_example_table)
fname_loss = f'../coco/.cache/{lam_low}_{lam_high}_{lam_len}_example_loss_table.npy'
fname_sizes = f'../coco/.cache/{lam_low}_{lam_high}_{lam_len}_example_size_table.npy'
loss_table = np.load(fname_loss)
sizes_table = np.load(fname_sizes)
return loss_table, sizes_table
if __name__ == "__main__":
n_cal = int(4000)
dataset_replicates = 5
n_lambda = 10000
n_reps = int(1e2)
epsilon = 1e-10
maxiters = int(1e5)
num_grid_bennett = 1000
mus = [0.05, 0.1, 0.2]
deltas = [0.001, 0.01, 0.05, 0.1]
lambdas_table = np.linspace(0,1,n_lambda)
delta = .1
gamma = .1
# get losses
example_loss_table, _ = get_coco_example_loss_and_size_tables(lambdas_table)
example_loss_table = np.concatenate( (example_loss_table,)*dataset_replicates, axis=0 )
example_loss_table = example_loss_table + np.random.uniform(size=example_loss_table.shape)/100
risks = np.zeros((n_reps,))
# get the bound
bound_str = 'WSR'
bound_fn = WSR_mu_plus
tlambda = get_tlambda(1500,deltas,n_cal,None,None,None,epsilon,maxiters,bound_str,bound_fn)
for j in tqdm(range(n_reps)):
np.random.shuffle(example_loss_table)
calib_loss_table, val_loss_table = (example_loss_table[:n_cal], example_loss_table[n_cal:])
# get lhat (should be close to gamma)
lhat = get_lhat_from_table_binarysearch(calib_loss_table, lambdas_table, gamma, delta, tlambda, bound_str)
val_losses = val_loss_table[:,np.argmax(lambdas_table == lhat)]
risks[j] = val_losses.mean()
print(f"dataset replicates: {dataset_replicates}")
print((risks > gamma).mean())
print(risks)
| [
"core.concentration.get_lhat_from_table_binarysearch",
"core.concentration.get_tlambda",
"os.path.join",
"numpy.argmax",
"numpy.linspace",
"numpy.zeros",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.load",
"numpy.random.shuffle"
] | [((43, 74), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (55, 74), False, 'import os, sys, inspect\n'), ((684, 703), 'numpy.load', 'np.load', (['fname_loss'], {}), '(fname_loss)\n', (691, 703), True, 'import numpy as np\n'), ((722, 742), 'numpy.load', 'np.load', (['fname_sizes'], {}), '(fname_sizes)\n', (729, 742), True, 'import numpy as np\n'), ((1056, 1083), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_lambda'], {}), '(0, 1, n_lambda)\n', (1067, 1083), True, 'import numpy as np\n'), ((1241, 1307), 'numpy.concatenate', 'np.concatenate', (['((example_loss_table,) * dataset_replicates)'], {'axis': '(0)'}), '((example_loss_table,) * dataset_replicates, axis=0)\n', (1255, 1307), True, 'import numpy as np\n'), ((1419, 1438), 'numpy.zeros', 'np.zeros', (['(n_reps,)'], {}), '((n_reps,))\n', (1427, 1438), True, 'import numpy as np\n'), ((1522, 1616), 'core.concentration.get_tlambda', 'get_tlambda', (['(1500)', 'deltas', 'n_cal', 'None', 'None', 'None', 'epsilon', 'maxiters', 'bound_str', 'bound_fn'], {}), '(1500, deltas, n_cal, None, None, None, epsilon, maxiters,\n bound_str, bound_fn)\n', (1533, 1616), False, 'from core.concentration import get_tlambda, get_lhat_from_table, get_lhat_from_table_binarysearch\n'), ((1647, 1684), 'numpy.random.shuffle', 'np.random.shuffle', (['example_loss_table'], {}), '(example_loss_table)\n', (1664, 1684), True, 'import numpy as np\n'), ((1847, 1950), 'core.concentration.get_lhat_from_table_binarysearch', 'get_lhat_from_table_binarysearch', (['calib_loss_table', 'lambdas_table', 'gamma', 'delta', 'tlambda', 'bound_str'], {}), '(calib_loss_table, lambdas_table, gamma,\n delta, tlambda, bound_str)\n', (1879, 1950), False, 'from core.concentration import get_tlambda, get_lhat_from_table, get_lhat_from_table_binarysearch\n'), ((1354, 1402), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'example_loss_table.shape'}), '(size=example_loss_table.shape)\n', (1371, 1402), True, 'import numpy as np\n'), ((1986, 2018), 'numpy.argmax', 'np.argmax', (['(lambdas_table == lhat)'], {}), '(lambdas_table == lhat)\n', (1995, 2018), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 19:17:17 2020
@author: djamal
"""
import numpy as np
import struct
import os
#import pandas as pd
class Filetranslation():
def fread(self, fid, n, type):
fmt, nbytes = {'uint8': ('B', 1), 'int16':('h', 2), 'int32':('i', 4), 'float32':('f', 4), 'float64':('d', 8)}[type]
return struct.unpack(fmt * n, fid.read(nbytes * n))
def load_binary_output(self, filename):
'''Ported from ReadFASTbinary.m by <NAME>, DTU Wind
Info about ReadFASTbinary.m:
% Author: <NAME>, National Renewable Energy Laboratory
% (c) 2012, National Renewable Energy Laboratory
%
% Edited for FAST v7.02.00b-bjj 22-Oct-2012
'''
FileFmtID_WithTime = 1 # File identifiers used in FAST
LenName = 10 # number of characters per channel name
LenUnit = 10 # number of characters per unit name
with open(filename, 'rb') as fid:
FileID = self.fread(fid, 1, 'int16') # FAST output file format, INT(2)
NumOutChans = self.fread(fid, 1, 'int32')[0] # The number of output channels, INT(4)
NT = self.fread(fid, 1, 'int32')[0] # The number of time steps, INT(4)
if FileID == FileFmtID_WithTime:
TimeScl = self.fread(fid, 1, 'float64') # The time slopes for scaling, REAL(8)
TimeOff = self.fread(fid, 1, 'float64') # The time offsets for scaling, REAL(8)
else:
TimeOut1 = self.fread(fid, 1, 'float64') # The first time in the time series, REAL(8)
TimeIncr = self.fread(fid, 1, 'float64') # The time increment, REAL(8)
ColScl = self.fread(fid, NumOutChans, 'float32') # The channel slopes for scaling, REAL(4)
ColOff = self.fread(fid, NumOutChans, 'float32') # The channel offsets for scaling, REAL(4)
LenDesc = self.fread(fid, 1, 'int32')[0] # The number of characters in the description string, INT(4)
DescStrASCII = self.fread(fid, LenDesc, 'uint8') # DescStr converted to ASCII
DescStr = "".join(map(chr, DescStrASCII)).strip()
ChanName = [] # initialize the ChanName cell array
for iChan in range(NumOutChans + 1):
ChanNameASCII = self.fread(fid, LenName, 'uint8') # ChanName converted to numeric ASCII
ChanName.append("".join(map(chr, ChanNameASCII)).strip())
ChanUnit = [] # initialize the ChanUnit cell array
for iChan in range(NumOutChans + 1):
ChanUnitASCII = self.fread(fid, LenUnit, 'uint8') # ChanUnit converted to numeric ASCII
ChanUnit.append("".join(map(chr, ChanUnitASCII)).strip()[1:-1])
# Get the channel time series
nPts = NT * NumOutChans # number of data points in the file
if FileID == FileFmtID_WithTime:
PackedTime = self.fread(fid, NT, 'int32') # read the time data
cnt = len(PackedTime)
if cnt < NT:
raise Exception('Could not read entire %s file: read %d of %d time values' % (filename, cnt, NT))
PackedData = self.fread(fid, nPts, 'int16') # read the channel data
cnt = len(PackedData)
if cnt < nPts:
raise Exception('Could not read entire %s file: read %d of %d values' % (filename, cnt, nPts))
# Scale the packed binary to real data
data = np.array(PackedData).reshape(NT, NumOutChans)
data = (data - ColOff) / ColScl
if FileID == FileFmtID_WithTime:
time = (np.array(PackedTime) - TimeOff) / TimeScl;
else:
time = TimeOut1 + TimeIncr * np.arange(NT)
data = np.concatenate([time.reshape(NT, 1), data], 1)
info = {'name': os.path.splitext(os.path.basename(filename))[0],
'description': DescStr,
'attribute_names': ChanName,
'attribute_units': ChanUnit}
return data, ChanName, info
| [
"numpy.array",
"os.path.basename",
"numpy.arange"
] | [((3960, 3980), 'numpy.array', 'np.array', (['PackedData'], {}), '(PackedData)\n', (3968, 3980), True, 'import numpy as np\n'), ((4112, 4132), 'numpy.array', 'np.array', (['PackedTime'], {}), '(PackedTime)\n', (4120, 4132), True, 'import numpy as np\n'), ((4210, 4223), 'numpy.arange', 'np.arange', (['NT'], {}), '(NT)\n', (4219, 4223), True, 'import numpy as np\n'), ((4337, 4363), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4353, 4363), False, 'import os\n')] |
#!/usr/bin/env python
import numpy as np
import rospy
import tf
import tf_conversions
from math import *
import geometry_msgs.msg
def phoxi_transformation_examples():
# original matrix
T_phoxi = np.array([
[-0.034784670752, 0.873298269909, -0.485942546454, 0.594404677631629],
[0.979858695089, -0.065869488303, -0.188515644360, 0.128504467275411],
[-0.196639172950, -0.482712484078, -0.853417654714, 1.427249342095321],
[0.000000000000, 0.000000000000, 0.000000000000, 1.000000000000]
])
# https://github.com/ros/geometry/blob/hydro-devel/tf/src/tf/transformations.py
# Example code for getting rpy angles and translation from the 4x4
# homogenous matrix via TF
scale, shear, angles, trans, persp = tf.transformations.decompose_matrix(
T_phoxi)
print("angles: {}".format(angles))
print("trans: {}".format(trans))
# rpy = tf.transformations.euler_from_matrix(T_phoxi)
# xyz = [T_phoxi[0,3], T_phoxi[1,3], T_phoxi[2,3]]
# print(rpy)
# print(xyz)
# T_phoxi is the transformation to the camera frame
# The transformation to be entered into the URDF is to the camera base
# frame
# One possibility: Take the transformation between camera frame and camera base_frame from the URDF,
# and add it to the transformation here manually.
rpy_camera_frame_to_base = [pi / 2, pi / 2, pi / 2]
xyz_camera_frame_to_base = [1, 2, 3]
T_camera_frame_to_base = tf.transformations.compose_matrix(
scale=None,
shear=None,
angles=rpy_camera_frame_to_base,
translate=xyz_camera_frame_to_base,
perspective=None)
T_phoxi_base = tf.transformations.concatenate_matrices(
T_phoxi, T_camera_frame_to_base)
# Now rpy and xyz can be extracted for the T_phoxi_base transformation and used in the scene URDF,
# as the transformation from world to camera base.
# NOTE: The signs and multiplications in the code above may have to be
# inverted.
return
def quaternion_eular_test():
q = geometry_msgs.msg.Quaternion(-0.5, 0.5, 0.5, 0.5)
rpy = tf.transformations.euler_from_quaternion([-0.5, 0.5, 0.5, 0.5])
print(rpy)
return
if __name__ == '__main__':
phoxi_transformation_examples()
# quaternion_eular_test()
| [
"tf.transformations.euler_from_quaternion",
"tf.transformations.compose_matrix",
"numpy.array",
"tf.transformations.decompose_matrix",
"tf.transformations.concatenate_matrices"
] | [((207, 466), 'numpy.array', 'np.array', (['[[-0.034784670752, 0.873298269909, -0.485942546454, 0.594404677631629], [\n 0.979858695089, -0.065869488303, -0.18851564436, 0.128504467275411], [-\n 0.19663917295, -0.482712484078, -0.853417654714, 1.427249342095321], [\n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[-0.034784670752, 0.873298269909, -0.485942546454, \n 0.594404677631629], [0.979858695089, -0.065869488303, -0.18851564436, \n 0.128504467275411], [-0.19663917295, -0.482712484078, -0.853417654714, \n 1.427249342095321], [0.0, 0.0, 0.0, 1.0]])\n', (215, 466), True, 'import numpy as np\n'), ((764, 808), 'tf.transformations.decompose_matrix', 'tf.transformations.decompose_matrix', (['T_phoxi'], {}), '(T_phoxi)\n', (799, 808), False, 'import tf\n'), ((1473, 1626), 'tf.transformations.compose_matrix', 'tf.transformations.compose_matrix', ([], {'scale': 'None', 'shear': 'None', 'angles': 'rpy_camera_frame_to_base', 'translate': 'xyz_camera_frame_to_base', 'perspective': 'None'}), '(scale=None, shear=None, angles=\n rpy_camera_frame_to_base, translate=xyz_camera_frame_to_base,\n perspective=None)\n', (1506, 1626), False, 'import tf\n'), ((1679, 1751), 'tf.transformations.concatenate_matrices', 'tf.transformations.concatenate_matrices', (['T_phoxi', 'T_camera_frame_to_base'], {}), '(T_phoxi, T_camera_frame_to_base)\n', (1718, 1751), False, 'import tf\n'), ((2121, 2184), 'tf.transformations.euler_from_quaternion', 'tf.transformations.euler_from_quaternion', (['[-0.5, 0.5, 0.5, 0.5]'], {}), '([-0.5, 0.5, 0.5, 0.5])\n', (2161, 2184), False, 'import tf\n')] |
"""
Solver D3Q6 for the advection equation on the 3D-torus
d_t(u) + cx d_x(u) + cy d_y(u) + c_z d_z(u) = 0, t > 0, 0 < x,y,z < 1,
u(t=0,x,y,z) = u0(x,y,z),
u(t,x=0,y,z) = u(t,x=1,y,z) 0 < y,z < 1,
u(t,x,y=0,z) = u(t,x,y=1,z) 0 < x,z < 1,
u(t,x,y,z=0) = u(t,x,y,z=1) 0 < x,y < 1,
the solution is
u(t,x,y,z) = u0(x-cx*t,y-cy*t,z-cz*t)
test: True
"""
from six.moves import range
import numpy as np
import sympy as sp
import pylbm
u, X, Y, Z, LA = sp.symbols('u, X, Y, Z, lambda')
def save(sol, im):
x, y, z = sol.domain.x, sol.domain.y, sol.domain.z
h5 = pylbm.H5File(sol.domain.mpi_topo, 'advection', './advection', im)
h5.set_grid(x, y, z)
h5.add_scalar('u', sol.m[u])
h5.save()
def run(dx, Tf, generator="cython", sorder=None, withPlot=True):
"""
Parameters
----------
dx: double
spatial step
Tf: double
final time
generator: pylbm generator
sorder: list
storage order
withPlot: boolean
if True plot the solution otherwise just compute the solution
"""
# advective velocity
ux, uy, uz = .5, .2, .1
# domain of the computation
xmin, xmax, ymin, ymax, zmin, zmax = 0., 1., 0., 1., 0., 1.
def u0(x, y, z):
xm, ym, zm = .5*(xmin+xmax), .5*(ymin+ymax), .5*(zmin+zmax)
return .5*np.ones((x.size, y.size, z.size)) \
+ .5*(((x-xm)**2+(y-ym)**2+(z-zm)**2)<.25**2)
s = 1.
la = 1.
d = {
'box':{'x':[xmin, xmax], 'y':[ymin, ymax], 'z':[zmin, zmax], 'label':-1},
'space_step':dx,
'scheme_velocity':la,
'schemes':[{
'velocities': list(range(1,7)),
'conserved_moments':[u],
'polynomials': [1, LA*X, LA*Y, LA*Z, X**2-Y**2, X**2-Z**2],
'equilibrium': [u, ux*u, uy*u, uz*u, 0., 0.],
'relaxation_parameters': [0., s, s, s, s, s],
'init':{u: u0},
},],
'parameters': {LA: la},
'generator': generator,
}
sol = pylbm.Simulation(d, sorder=sorder)
im = 0
while sol.t < Tf:
sol.one_time_step()
if withPlot:
im += 1
save(sol, im)
return sol
if __name__ == '__main__':
dx = 1./128
Tf = 1.
run(dx, Tf)
| [
"six.moves.range",
"numpy.ones",
"pylbm.Simulation",
"pylbm.H5File",
"sympy.symbols"
] | [((459, 491), 'sympy.symbols', 'sp.symbols', (['"""u, X, Y, Z, lambda"""'], {}), "('u, X, Y, Z, lambda')\n", (469, 491), True, 'import sympy as sp\n'), ((576, 641), 'pylbm.H5File', 'pylbm.H5File', (['sol.domain.mpi_topo', '"""advection"""', '"""./advection"""', 'im'], {}), "(sol.domain.mpi_topo, 'advection', './advection', im)\n", (588, 641), False, 'import pylbm\n'), ((2001, 2035), 'pylbm.Simulation', 'pylbm.Simulation', (['d'], {'sorder': 'sorder'}), '(d, sorder=sorder)\n', (2017, 2035), False, 'import pylbm\n'), ((1321, 1354), 'numpy.ones', 'np.ones', (['(x.size, y.size, z.size)'], {}), '((x.size, y.size, z.size))\n', (1328, 1354), True, 'import numpy as np\n'), ((1641, 1652), 'six.moves.range', 'range', (['(1)', '(7)'], {}), '(1, 7)\n', (1646, 1652), False, 'from six.moves import range\n')] |
import numpy as np
import re
import pandas as pd
import networkx as nx
from cloudvolume import CloudVolume, Skeleton
from io import StringIO
import os
from brainlit.utils.util import (
check_type,
check_size,
)
from sklearn.metrics import pairwise_distances_argmin_min
import warnings
class NeuronTrace:
"""Neuron Trace class to handle neuron traces as swcs and s3 skeletons
Arguments
---------
path : str
Path to either s3 bucket (url) or swc file (filepath).
seg_id : int
If s3 bucket path is provided, the segment number to pull, default None.
mip : int
If s3 bucket path is provided, the resolution to use for scaling, default None.
rounding : bool
If s3 is provided, specifies if it should be rounded, default True
read_offset : bool
If swc is provided, whether offset should be read from file, default False.
fill_missing: bool
Always passes directly into 'CloudVolume()' function to fill missing skeleton values with 0s, default True.
use_https : bool
Always passes directly into 'CloudVolume()' function to set use_https to desired value, default True.
Attributes
----------
path : str
Path to either s3 bucket (url) or swc file (filepath)
input_type : bool
Specifies whether input file is 'swc' or 'skel'
df : :class:`pandas.DataFrame`
Indices, coordinates, and parents of each node
args : tuple
Stores arguments for df - offset, color, cc, branch
seg_id : int
If s3 bucket path is provided, the segment number to pull
mip : None,int
If s3 bucket path is provided, the resolution to use for scaling
Example
----------
>>> swc_path = "./data/data_octree/consensus-swcs/2018-08-01_G-002_consensus.swc"
>>> s3_path = "s3://open-neurodata/brainlit/brain1_segments"
>>> seg_id = 11
>>> mip = 2
>>> swc_trace = NeuronTrace(swc_path)
>>> s3_trace = NeuronTrace(s3_path,seg_id,mip)
"""
def __init__(
self,
path,
seg_id=None,
mip=None,
rounding=True,
read_offset=False,
fill_missing=True,
use_https=False,
):
self.path = path
self.input_type = None
self.df = None
self.args = []
self.seg_id = seg_id
self.mip = mip
self.rounding = rounding
self.fill_missing = fill_missing
self.use_https = use_https
check_type(path, str)
check_type(seg_id, (type(None), int))
check_type(mip, (type(None), int))
check_type(read_offset, bool)
check_type(rounding, bool)
if (seg_id == None and type(mip) == int) or (
type(seg_id) == int and mip == None
):
raise ValueError(
"For 'swc' do not input mip or seg_id, and for 'skel', provide both mip and seg_id"
)
# first check if it is a skel
if seg_id != None and mip != None:
cv = CloudVolume(
path, mip=mip, fill_missing=fill_missing, use_https=use_https
)
skeleton = cv.skeleton.get(seg_id)
if type(skeleton) is Skeleton:
self.input_type = "skel"
# else, check if it is a swc by checking if file exists/extension is .swc
elif os.path.isfile(self.path) and os.path.splitext(path)[-1].lower() == ".swc":
self.input_type = "swc"
# if it is not a swc or skeleton, raise error
if self.input_type != "swc" and self.input_type != "skel":
raise ValueError("Did not input 'swc' filepath or 'skel' url")
# next, convert to a dataframe
if self.input_type == "swc" and read_offset == False:
df, offset, color, cc, branch = self._read_swc(self.path)
args = [offset, color, cc, branch]
self.df = df
self.args = args
elif self.input_type == "swc" and read_offset == True:
df, color, cc, branch = self._read_swc_offset(path)
args = [None, color, cc, branch]
self.df = df
self.args = args
elif self.input_type == "skel":
df = self._read_s3(path, seg_id, mip, rounding)
(self.path, seg_id, mip)
self.df = df
# public methods
def get_df_arguments(self):
"""Gets arguments for df - offset, color, cc, branch
Returns
-------
self.args : list
list of arguments for df, if found - offset, color, cc, branch
Example
-------
>>> swc_trace.get_df_arguments()
>>> [[73954.8686, 17489.532566, 34340.365689], [1.0, 1.0, 1.0], nan, nan]
"""
return self.args
def get_df(self):
"""Gets the dataframe providing indices, coordinates, and parents of each node
Returns
-------
self.df : :class:`pandas.DataFrame`
dataframe providing indices, coordinates, and parents of each node
Example
-------
>>> swc_trace.get_df()
>>> sample structure x y z r parent
0 1 0 -52.589700 -1.448032 -1.228827 1.0 -1
1 2 0 -52.290940 -1.448032 -1.228827 1.0 1
2 3 0 -51.992181 -1.143616 -0.240423 1.0 2
3 4 0 -51.095903 -1.143616 -0.240423 1.0 3
4 5 0 -50.797144 -0.839201 -0.240423 1.0 4
... ... ... ... ... ... ... ...
148 149 0 45.702088 14.381594 -7.159252 1.0 148
149 150 0 46.000847 14.686010 -7.159252 1.0 149
150 151 0 46.897125 14.686010 -7.159252 1.0 150
151 152 0 47.494643 15.294842 -7.159252 1.0 151
152 153 6 48.092162 15.294842 -7.159252 1.0 152
53 rows × 7 columns
"""
return self.df
def get_skel(self, benchmarking=False, origin=None):
"""Gets a skeleton version of dataframe, if swc input is provided
Arguments
----------
origin : None, numpy array with shape (3,1) (default = None)
origin of coordinate frame in microns, (default: None assumes (0,0,0) origin)
benchmarking : bool
For swc files, specifies whether swc file is from benchmarking dataset, to obtain skeleton ID
Returns
--------
skel : cloudvolume.Skeleton
Skeleton object of given SWC file
Example
-------
>>> swc_trace.get_skel(benchmarking=True)
>>> Skeleton(segid=, vertices=(shape=153, float32), edges=(shape=152, uint32), radius=(153, float32), vertex_types=(153, uint8), vertex_color=(153, float32), space='physical' transform=[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]])
"""
check_type(origin, (type(None), np.ndarray))
check_type(benchmarking, bool)
if type(origin) == np.ndarray:
check_size(origin)
if self.input_type == "swc":
skel = self._swc2skeleton(self.path, benchmarking, origin)
return skel
elif self.input_type == "skel":
cv = CloudVolume(
self.path,
mip=self.mip,
fill_missing=self.fill_missing,
use_https=self.use_https,
)
skel = cv.skeleton.get(self.seg_id)
return skel
def get_df_voxel(self, spacing, origin=np.array([0, 0, 0])):
"""Converts coordinates in pd.DataFrame from spatial units to voxel units
Arguments
----------
spacing : :class:`numpy.array`
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z])
origin : :class:`numpy.array`
Origin of the spatial coordinate. Default is (0,0,0). Assumed to be
np.array([x,y,z])
Returns
-------
df_voxel : :class:`pandas.DataFrame`
Indicies, coordinates, and parents of each node in the swc. Coordinates
are in voxel units.
Example
-------
>>> swc_trace.get_df_voxel(spacing=np.asarray([2,2,2]))
>>> sample structure x y z r parent
0 1 0 -26 -1 -1 1.0 -1
1 2 0 -26 -1 -1 1.0 1
2 3 0 -26 -1 0 1.0 2
3 4 0 -26 -1 0 1.0 3
4 5 0 -25 0 0 1.0 4
... ... ... ... ... ... ... ...
148 149 0 23 7 -4 1.0 148
149 150 0 23 7 -4 1.0 149
150 151 0 23 7 -4 1.0 150
151 152 0 24 8 -4 1.0 151
152 153 6 24 8 -4 1.0 152
153 rows × 7 columns
"""
check_type(spacing, np.ndarray)
check_size(spacing)
check_type(origin, np.ndarray)
check_size(origin)
df_voxel = self._df_in_voxel(self.df, spacing, origin)
return df_voxel
def get_graph(self, spacing=None, origin=None):
"""Converts dataframe in either spatial or voxel coordinates into a directed graph.
Will convert to voxel coordinates if spacing is specified.
Arguments
----------
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : None, :class:`numpy.array` (default = None)
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
G : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
Example
-------
>>> swc_trace.get_graph()
>>> <networkx.classes.digraph.DiGraph at 0x7f81a83937f0>
"""
check_type(spacing, (type(None), np.ndarray))
if type(spacing) == np.ndarray:
check_size(spacing)
check_type(origin, (type(None), np.ndarray))
if type(origin) == np.ndarray:
check_size(origin)
# if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
if type(spacing) == np.ndarray and origin is None:
origin = np.array([0, 0, 0])
# voxel conversion option
if type(spacing) == np.ndarray:
df_voxel = self._df_in_voxel(self.df, spacing, origin)
G = self._df_to_graph(df_voxel)
# no voxel conversion option
else:
G = self._df_to_graph(self.df)
return G
def get_paths(self, spacing=None, origin=None):
"""Converts dataframe in either spatial or voxel coordinates into a list of paths.
Will convert to voxel coordinates if spacing is specified.
Arguments
----------
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : None, :class:`numpy.array`
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
paths : list
List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
units. Each array is one path.
Example
-------
>>> swc_trace.get_paths()[0][1:10]
>>> array([[-52, -1, -1],
[-51, -1, 0],
[-51, -1, 0],
[-50, 0, 0],
[-50, 0, 0],
[-49, 0, 0],
[-48, 0, 0],
[-46, 0, 0],
[-46, 0, 0]], dtype=object)
"""
check_type(spacing, (type(None), np.ndarray))
if type(spacing) == np.ndarray:
check_size(spacing)
check_type(origin, (type(None), np.ndarray))
if type(origin) == np.ndarray:
check_size(origin)
# if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
if type(spacing) == np.ndarray and origin is None:
origin = np.array([0, 0, 0])
# voxel conversion option
if type(spacing) == np.ndarray:
df_voxel = self._df_in_voxel(self.df, spacing, origin)
G = self._df_to_graph(df_voxel)
# no voxel conversion option
else:
G = self._df_to_graph(self.df)
paths = self._graph_to_paths(G)
return paths
def generate_df_subset(
self, vox_in_img_list, subneuron_start=None, subneuron_end=None
):
"""Read a new subset dataframe in coordinates in img spacing.
Specify specific range of vertices from dataframe if desired
Arguments
----------
vox_in_img_list : list
List of voxels
subneuron_start : None, int (default = None)
Provides start index, if specified, to apply function to a portion of the dataframe
Default is None.
subneuron_end : None, int (default = None)
Provides end index, if specified, to apply function to a portion of the dataframe
Default is None.
Returns
-------
df : :class:`pandas.DataFrame`
Indicies, coordinates (in img spacing) and parents of each node.
Coordinates are in spatial units.
Example
-------
>>> #swc input, subneuron_start and subneuron_end specified
>>> subneuron_start = 5
>>> subneuron_end = 8
>>> #generate vox_in_img_list
>>> my_list = []
>>>for i in range(subneuron_end-subneuron_start):
my_list.append(10)
>>> vox_in_img_list_2 = list([my_list,my_list,my_list])
>>>swc_trace.generate_df_subset(vox_in_img_list_2,subneuron_start,subneuron_end)
>>> sample structure x y z r parent
5 6 0 10 10 10 1.0 5
6 7 0 10 10 10 1.0 6
7 8 0 10 10 10 1.0 7
"""
check_type(vox_in_img_list, list)
check_type(subneuron_start, (type(None), int))
check_type(subneuron_end, (type(None), int))
if (subneuron_start == None and type(subneuron_end) == int) or (
type(subneuron_start) == int and subneuron_end == None
):
raise ValueError(
"Provide both starting and ending vertices to use for the subneuron"
)
# no subneuron range specified
df = self.df
# subneuron range specified
if subneuron_start != None and subneuron_end != None:
subneuron_df = self.df[subneuron_start:subneuron_end]
df = subneuron_df
df_new = self._generate_df_subset(df, vox_in_img_list)
return df_new
def get_bfs_subgraph(self, node_id, depth, df=None, spacing=None, origin=None):
"""
Creates a spanning subgraph from a seed node and parent graph using BFS.
Arguments
----------
node_id : int
The id of the node to use as a seed.
If df is not None this become the node index.
depth : int
The max depth for BFS to traven in each direction.
df : None, DataFrame (default = None)
Dataframe storing indices.
In some cases indexing by row number is preferred.
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : :class:`numpy.array`
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
G_sub : :class:`networkx.classes.digraph.DiGraph`
Subgraph
tree : DiGraph
The tree returned by BFS.
paths : list
List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
units. Each array is one path.
Example
-------
>>> #swc input, specify node_id and depth
>>> swc_trace.get_bfs_subgraph(node_id=11,depth=2)
>>>(<networkx.classes.digraph.DiGraph at 0x7f7f2ce65670>,
<networkx.classes.digraph.DiGraph at 0x7f7f2ce65370>,
array([array([[4727, 4440, 3849],
[4732, 4442, 3850],
[4739, 4455, 3849]]),
array([[4732, 4442, 3850],
[4749, 4439, 3856]])], dtype=object))
"""
check_type(node_id, (list, int))
check_type(depth, int)
check_type(df, (type(None), pd.core.frame.DataFrame))
check_type(spacing, (type(None), np.ndarray))
if type(spacing) == np.ndarray:
check_size(spacing)
check_type(origin, (type(None), np.ndarray))
if type(origin) == np.ndarray:
check_size(origin)
# if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
if type(spacing) == np.ndarray and origin is None:
origin = np.array([0, 0, 0])
# voxel conversion option
if type(spacing) == np.ndarray:
df_voxel = self._df_in_voxel(self.df, spacing, origin)
G = self._df_to_graph(df_voxel)
# no voxel conversion option
else:
G = self._df_to_graph(self.df)
G_sub, tree = self._get_bfs_subgraph(G, node_id, depth, df)
paths = self._graph_to_paths(G_sub)
return G_sub, tree, paths
def get_sub_neuron(self, bounding_box, spacing=None, origin=None):
"""Returns sub-neuron with node coordinates bounded by start and end
Arguments
----------
bounding_box : tuple or list or None
Defines a bounding box around a sub-region around the neuron. Length 2
tuple/list. First element is the coordinate of one corner (inclusive)
and second element is the coordinate of the opposite corner (exclusive).
Both coordinates are numpy.array([x,y,z])in voxel units.
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : :class:`numpy.array`
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
G_sub : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
Example
-------
>>> bounding_box=[[1,2,4],[1,2,3]]
>>> #swc input, no spacing and origin
>>> swc_trace.get_sub_neuron(bounding_box)
>>> <networkx.classes.digraph.DiGraph at 0x7f81a95d1e50>
"""
check_type(bounding_box, (tuple, list))
if len(bounding_box) != 2:
raise ValueError("Bounding box must be length 2")
check_type(spacing, (type(None), np.ndarray))
check_type(spacing, (type(None), np.ndarray))
if type(spacing) == np.ndarray:
check_size(spacing)
check_type(origin, (type(None), np.ndarray))
if type(origin) == np.ndarray:
check_size(origin)
# if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
if type(spacing) == np.ndarray and origin is None:
origin = np.array([0, 0, 0])
# voxel conversion option
if type(spacing) == np.ndarray:
df_voxel = self._df_in_voxel(self.df, spacing, origin)
G = self._df_to_graph(df_voxel)
# no voxel conversion option
else:
G = self._df_to_graph(self.df)
G_sub = self._get_sub_neuron(G, bounding_box)
return G_sub
def get_sub_neuron_paths(self, bounding_box, spacing=None, origin=None):
"""Returns sub-neuron with node coordinates bounded by start and end
Arguments
----------
bounding_box : tuple or list or None
Defines a bounding box around a sub-region around the neuron. Length 2
tuple/list. First element is the coordinate of one corner (inclusive)
and second element is the coordinate of the opposite corner (exclusive).
Both coordinates are numpy.array([x,y,z])in voxel units.
spacing : None, :class:`numpy.array` (default = None)
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z]).
Provided if graph should convert to voxel coordinates first. Default is None.
origin : :class:`numpy.array`
Origin of the spatial coordinate, if converting to voxels. Default is None.
Assumed to be np.array([x,y,z])
Returns
-------
paths : list
List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
units. Each array is one path.
Example
-------
>>> bounding_box=[[1,2,4],[1,2,3]]
>>> #swc input, no spacing and origin
>>> swc_trace.get_sub_neuron_paths(bounding_box)
>>> array([], dtype=object)
"""
check_type(bounding_box, (tuple, list))
if len(bounding_box) != 2:
raise ValueError("Bounding box must be length 2")
check_type(spacing, (type(None), np.ndarray))
check_type(spacing, (type(None), np.ndarray))
if type(spacing) == np.ndarray:
check_size(spacing)
check_type(origin, (type(None), np.ndarray))
if type(origin) == np.ndarray:
check_size(origin)
# if origin isn't specified but spacing is, set origin to np.array([0, 0, 0])
if type(spacing) == np.ndarray and origin is None:
origin = np.array([0, 0, 0])
# voxel conversion option
if type(spacing) == np.ndarray:
df_voxel = self._df_in_voxel(self.df, spacing, origin)
G = self._df_to_graph(df_voxel)
# no voxel conversion option
else:
G = self._df_to_graph(self.df)
G_sub = self._get_sub_neuron(G, bounding_box)
paths = self._graph_to_paths(G_sub)
return paths
@staticmethod
def ssd(pts1, pts2):
"""Compute significant spatial distance metric between two traces as defined in APP1.
Args:
pts1 (np.array): array containing coordinates of points of trace 1. shape: npoints x ndims
pts2 (np.array): array containing coordinates of points of trace 1. shape: npoints x ndims
Returns:
[float]: significant spatial distance as defined by APP1
Example
-------
>>> pts1 = swc_trace.get_paths()[0][1:10]
>> pts2 = swc_trace.get_paths()[0][11:20]
>>> NeuronTrace.ssd(pts1,pts2)
>>>6.247937554557103
"""
check_type(pts1, np.ndarray)
check_type(pts2, np.ndarray)
_, dists1 = pairwise_distances_argmin_min(pts1, pts2)
dists1 = dists1[dists1 >= 2]
_, dists2 = pairwise_distances_argmin_min(pts2, pts1)
dists2 = dists2[dists2 >= 2]
# If there are is no significant distance between the 2 sets
if len(dists1) == 0 and len(dists2) == 0:
ssd = 0
# Else, calculate the mean
else:
dists = np.concatenate([dists1, dists2])
ssd = np.mean(dists)
return ssd
# private methods
def _read_swc(self, path):
"""
Read a single swc file
Arguments:
path {string} -- path to file
raw {bool} -- whether you are passing the file directly
Returns:
df {pandas dataframe} -- indices, coordinates, and parents of each node
offset {list of floats} -- offset value of fragment
color {list of ints} -- color
cc {int} -- cc value, from file name
branch {int} -- branch number, from file name
"""
# check input
file = open(path, "r")
in_header = True
offset_found = False
header_length = -1
offset = np.nan
color = np.nan
cc = np.nan
branch = np.nan
while in_header:
line = file.readline().split()
if "OFFSET" in line:
offset_found = True
idx = line.index("OFFSET") + 1
offset = [float(line[i]) for i in np.arange(idx, idx + 3)]
elif "COLOR" in line:
idx = line.index("COLOR") + 1
line = line[idx]
line = line.split(",")
color = [float(line[i]) for i in np.arange(len(line))]
elif "NAME" in line:
idx = line.index("NAME") + 1
name = line[idx]
name = re.split(r"_|-|\.", name)
try:
idx = name.index("cc") + 1
cc = int(name[idx])
idx = name.index("branch") + 1
branch = int(name[idx])
except ValueError:
pass
elif line[0] != "#":
in_header = False
header_length += 1
if not offset_found:
warnings.warn("No offset information found in: " + path)
offset = [float(0) for i in range(3)]
# read coordinates
df = pd.read_table(
path,
names=["sample", "structure", "x", "y", "z", "r", "parent"],
skiprows=header_length,
delimiter="\s+",
)
return df, offset, color, cc, branch
def _read_swc_offset(self, path):
df, offset, color, cc, branch = self._read_swc(path)
df["x"] = df["x"] + offset[0]
df["y"] = df["y"] + offset[1]
df["z"] = df["z"] + offset[2]
return df, color, cc, branch
def _read_s3(self, s3_path, seg_id, mip, rounding=True):
"""Read a s3 bucket path to a skeleton object
into a pandas dataframe.
Parameters
----------
s3_path : str
String representing the path to the s3 bucket
seg_id : int
The segement number to pull
mip : int
The resolution to use for scaling
rounding: bool, Optional
True is default, false if swc shouldn't be rounded
Returns
-------
df : :class:`pandas.DataFrame`
Indicies, coordinates, and parents of each node in the swc.
Coordinates are in spatial units.
"""
# TODO check header length
# check input
cv = CloudVolume(
s3_path, mip=mip, fill_missing=self.fill_missing, use_https=self.use_https
)
skeleton = cv.skeleton.get(seg_id)
swc_string = skeleton.to_swc()
string_io = StringIO(swc_string)
splitted_string = swc_string.split("\n")
in_h = True
h_len = -1
while in_h:
h_len += 1
line = splitted_string[h_len]
if len(line) == 0 or line[0] != "#":
in_h = False
df = pd.read_table(
string_io,
names=["sample", "structure", "x", "y", "z", "r", "parent"],
skiprows=h_len,
sep=" "
# delim_whitespace=True,
)
# round swc files when reading
if rounding == True:
res = cv.scales[mip]["resolution"]
df["x"] = np.round(df["x"] / res[0])
df["y"] = np.round(df["y"] / res[1])
df["z"] = np.round(df["z"] / res[2])
return df
def _generate_df_subset(self, swc_df, vox_in_img_list):
"""Read a new subset of swc dataframe in coordinates in img spacing.
Parameters
----------
swc_df : pd.DataFrame
DataFrame containing information from swc file
vox_in_img_list: list
List of voxels
Returns
-------
df : :class:`pandas.DataFrame`
Indicies, coordinates (in img spacing) and parents of each node in the swc.
Coordinates are in spatial units.
"""
# check input
df_new = swc_df.copy()
df_new["x"], df_new["y"], df_new["z"] = (
vox_in_img_list[:][0],
vox_in_img_list[:][1],
vox_in_img_list[:][2],
)
return df_new
def _space_to_voxel(self, spatial_coord, spacing, origin=np.array([0, 0, 0])):
"""Converts coordinate from spatial units to voxel units.
Parameters
----------
spatial_coord : :class:`numpy.array`
3D coordinate in spatial units. Assumed to be np.array[(x,y,z)]
spacing : :class:`numpy.array`
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z])
origin : :class:`numpy.array`
Origin of the spatial coordinate. Default is (0,0,0). Assumed to be
np.array([x,y,z])
Returns
-------
voxel_coord : :class:`numpy.array`
Coordinate in voxel units. Assumed to be np.array([x,y,z])
"""
voxel_coord = np.round(np.divide(spatial_coord - origin, spacing))
voxel_coord = voxel_coord.astype(np.int64)
return voxel_coord
def _df_in_voxel(self, df, spacing, origin=np.array([0, 0, 0])):
"""Converts coordinates in pd.DataFrame representing swc from spatial units
to voxel units
Parameters
----------
df : :class:`pandas.DataFrame`
Indicies, coordinates, and parents of each node in the swc. Coordinates
are in spatial units.
spacing : :class:`numpy.array`
Conversion factor (spatial units/voxel). Assumed to be np.array([x,y,z])
origin : :class:`numpy.array`
Origin of the spatial coordinate. Default is (0,0,0). Assumed to be
np.array([x,y,z])
Returns
-------
df_voxel : :class:`pandas.DataFrame`
Indicies, coordinates, and parents of each node in the swc. Coordinates
are in voxel units.
"""
x = []
y = []
z = []
df_voxel = df.copy()
for index, row in df_voxel.iterrows():
vox = self._space_to_voxel(row[["x", "y", "z"]].to_numpy(), spacing, origin)
x.append(vox[0])
y.append(vox[1])
z.append(vox[2])
df_voxel["x"] = x
df_voxel["y"] = y
df_voxel["z"] = z
return df_voxel
def _df_to_graph(self, df_voxel):
"""Converts dataframe of swc in voxel coordinates into a directed graph
Parameters
----------
df_voxel : :class:`pandas.DataFrame`
Indicies, coordinates, and parents of each node in the swc. Coordinates
are in voxel units.
Returns
-------
G : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
"""
G = nx.DiGraph()
# add nodes
for index, row in df_voxel.iterrows():
id = int(row["sample"])
G.add_node(id)
G.nodes[id]["x"] = int(row["x"])
G.nodes[id]["y"] = int(row["y"])
G.nodes[id]["z"] = int(row["z"])
# add edges
for index, row in df_voxel.iterrows():
child = int(row["sample"])
parent = int(row["parent"])
if parent > min(df_voxel["parent"]):
G.add_edge(parent, child)
return G
def _get_sub_neuron(self, G, bounding_box):
"""Returns sub-neuron with node coordinates bounded by start and end
Parameters
----------
G : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
bounding_box : tuple or list or None
Defines a bounding box around a sub-region around the neuron. Length 2
tuple/list. First element is the coordinate of one corner (inclusive) and second element is the coordinate of the opposite corner (exclusive). Both coordinates are numpy.array([x,y,z])in voxel units.
Returns
-------
G_sub : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
"""
G_sub = G.copy() # make copy of input G
start = bounding_box[0]
end = bounding_box[1]
# remove nodes that are not neighbors of nodes bounded by start and end
for node in list(G_sub.nodes):
neighbors = list(G_sub.successors(node)) + list(G_sub.predecessors(node))
remove = True
for id in neighbors + [node]:
x = G_sub.nodes[id]["x"]
y = G_sub.nodes[id]["y"]
z = G_sub.nodes[id]["z"]
if x >= start[0] and y >= start[1] and z >= start[2]:
if x < end[0] and y < end[1] and z < end[2]:
remove = False
if remove:
G_sub.remove_node(node)
# set origin to start of bounding box
for id in list(G_sub.nodes):
G_sub.nodes[id]["x"] = G_sub.nodes[id]["x"] - start[0]
G_sub.nodes[id]["y"] = G_sub.nodes[id]["y"] - start[1]
G_sub.nodes[id]["z"] = G_sub.nodes[id]["z"] - start[2]
return G_sub
def _graph_to_paths(self, G):
"""Converts neuron represented as a directed graph with no cycles into a
list of paths.
Parameters
----------
G : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph. Coordinates x,y,z are
node attributes accessed by keys 'x','y','z' respectively.
Returns
-------
paths : list
List of Nx3 numpy.array. Rows of the array are 3D coordinates in voxel
units. Each array is one path.
"""
G_cp = G.copy() # make copy of input G
branches = []
while len(G_cp.edges) != 0: # iterate over branches
# get longest branch
longest = nx.algorithms.dag.dag_longest_path(
G_cp
) # list of nodes on the path
branches.append(longest)
# remove longest branch
for idx, e in enumerate(longest):
if idx < len(longest) - 1:
G_cp.remove_edge(longest[idx], longest[idx + 1])
# convert branches into list of paths
paths = []
for branch in branches:
# get vertices in branch as n by 3 numpy.array; n = length of branches
path = np.zeros((len(branch), 3), dtype=np.int64)
for idx, node in enumerate(branch):
path[idx, 0] = np.int64(G_cp.nodes[node]["x"])
path[idx, 1] = np.int64(G_cp.nodes[node]["y"])
path[idx, 2] = np.int64(G_cp.nodes[node]["z"])
paths.append(path)
return np.array(paths, dtype="object")
def _get_bfs_subgraph(self, G, node_id, depth, df=None):
"""
Creates a spanning subgraph from a seed node and parent graph using BFS.
Parameters
----------
G : :class:`networkx.classes.digraph.DiGraph`
Neuron from swc represented as directed graph.
node_id : int
The id of the node to use as a seed.
If df is not None this become the node index.
depth : int
The max depth for BFS to traven in each direction.
df : None, DataFrame (default = None)
Dataframe storing indices.
In some cases indexing by row number is preferred.
Returns
-------
G_sub : :class:`networkx.classes.digraph.DiGraph`
Subgraph
tree : DiGraph
The tree returned by BFS.
"""
if df is not None:
node_id = int(df.iloc[node_id]["sample"])
G_undir = G.to_undirected()
tree = nx.bfs_tree(G_undir, node_id, depth_limit=depth) # forward BFS
G_sub = nx.subgraph(G, list(tree.nodes))
return G_sub, tree
def _swc2skeleton(self, swc_file, benchmarking=False, origin=None):
"""Converts swc file into Skeleton object
Arguments:
swc_file {str} -- path to SWC file
Keyword Arguments:
origin {numpy array with shape (3,1)} -- origin of coordinate frame in microns, (default: None assumes (0,0,0) origin)
Returns:
skel {cloudvolume.Skeleton} -- Skeleton object of given SWC file
"""
with open(swc_file, "r") as f:
contents = f.read()
# get every line that starts with a hashtag
comments = [i.split(" ") for i in contents.split("\n") if i.startswith("#")]
offset = np.array([float(j) for i in comments for j in i[2:] if "OFFSET" in i])
color = [float(j) for i in comments for j in i[2].split(",") if "COLOR" in i]
# set alpha to 0.0 so skeleton is opaque
color.append(0.0)
color = np.array(color, dtype="float32")
skel = Skeleton.from_swc(contents)
# physical units
# space can be 'physical' or 'voxel'
skel.space = "physical"
# hard coding parsing the id from the filename
idx = swc_file.find("G")
if benchmarking == True:
idx1 = swc_file.find(
"_", swc_file.find("_") + 1
) # finding second occurence of "_"
idx2 = swc_file.find(".")
skel.id = swc_file[idx1 + 1 : idx2]
else:
skel.id = int(swc_file[idx + 2 : idx + 5])
# hard coding changing data type of vertex_types
skel.extra_attributes[-1]["data_type"] = "float32"
skel.extra_attributes.append(
{"id": "vertex_color", "data_type": "float32", "num_components": 4}
)
# add offset to vertices
# and shift by origin
skel.vertices += offset
if origin is not None:
skel.vertices -= origin
# convert from microns to nanometers
skel.vertices *= 1000
skel.vertex_color = np.zeros((skel.vertices.shape[0], 4), dtype="float32")
skel.vertex_color[:, :] = color
return skel
| [
"brainlit.utils.util.check_type",
"numpy.array",
"numpy.divide",
"numpy.arange",
"numpy.mean",
"re.split",
"numpy.int64",
"sklearn.metrics.pairwise_distances_argmin_min",
"networkx.DiGraph",
"numpy.concatenate",
"warnings.warn",
"io.StringIO",
"numpy.round",
"cloudvolume.CloudVolume",
"o... | [((2585, 2606), 'brainlit.utils.util.check_type', 'check_type', (['path', 'str'], {}), '(path, str)\n', (2595, 2606), False, 'from brainlit.utils.util import check_type, check_size\n'), ((2704, 2733), 'brainlit.utils.util.check_type', 'check_type', (['read_offset', 'bool'], {}), '(read_offset, bool)\n', (2714, 2733), False, 'from brainlit.utils.util import check_type, check_size\n'), ((2742, 2768), 'brainlit.utils.util.check_type', 'check_type', (['rounding', 'bool'], {}), '(rounding, bool)\n', (2752, 2768), False, 'from brainlit.utils.util import check_type, check_size\n'), ((7215, 7245), 'brainlit.utils.util.check_type', 'check_type', (['benchmarking', 'bool'], {}), '(benchmarking, bool)\n', (7225, 7245), False, 'from brainlit.utils.util import check_type, check_size\n'), ((7796, 7815), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (7804, 7815), True, 'import numpy as np\n'), ((9249, 9280), 'brainlit.utils.util.check_type', 'check_type', (['spacing', 'np.ndarray'], {}), '(spacing, np.ndarray)\n', (9259, 9280), False, 'from brainlit.utils.util import check_type, check_size\n'), ((9289, 9308), 'brainlit.utils.util.check_size', 'check_size', (['spacing'], {}), '(spacing)\n', (9299, 9308), False, 'from brainlit.utils.util import check_type, check_size\n'), ((9317, 9347), 'brainlit.utils.util.check_type', 'check_type', (['origin', 'np.ndarray'], {}), '(origin, np.ndarray)\n', (9327, 9347), False, 'from brainlit.utils.util import check_type, check_size\n'), ((9356, 9374), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (9366, 9374), False, 'from brainlit.utils.util import check_type, check_size\n'), ((14906, 14939), 'brainlit.utils.util.check_type', 'check_type', (['vox_in_img_list', 'list'], {}), '(vox_in_img_list, list)\n', (14916, 14939), False, 'from brainlit.utils.util import check_type, check_size\n'), ((17546, 17578), 'brainlit.utils.util.check_type', 'check_type', (['node_id', '(list, int)'], {}), '(node_id, (list, int))\n', (17556, 17578), False, 'from brainlit.utils.util import check_type, check_size\n'), ((17587, 17609), 'brainlit.utils.util.check_type', 'check_type', (['depth', 'int'], {}), '(depth, int)\n', (17597, 17609), False, 'from brainlit.utils.util import check_type, check_size\n'), ((20001, 20040), 'brainlit.utils.util.check_type', 'check_type', (['bounding_box', '(tuple, list)'], {}), '(bounding_box, (tuple, list))\n', (20011, 20040), False, 'from brainlit.utils.util import check_type, check_size\n'), ((22371, 22410), 'brainlit.utils.util.check_type', 'check_type', (['bounding_box', '(tuple, list)'], {}), '(bounding_box, (tuple, list))\n', (22381, 22410), False, 'from brainlit.utils.util import check_type, check_size\n'), ((24071, 24099), 'brainlit.utils.util.check_type', 'check_type', (['pts1', 'np.ndarray'], {}), '(pts1, np.ndarray)\n', (24081, 24099), False, 'from brainlit.utils.util import check_type, check_size\n'), ((24108, 24136), 'brainlit.utils.util.check_type', 'check_type', (['pts2', 'np.ndarray'], {}), '(pts2, np.ndarray)\n', (24118, 24136), False, 'from brainlit.utils.util import check_type, check_size\n'), ((24158, 24199), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (24187, 24199), False, 'from sklearn.metrics import pairwise_distances_argmin_min\n'), ((24257, 24298), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['pts2', 'pts1'], {}), '(pts2, pts1)\n', (24286, 24298), False, 'from sklearn.metrics import pairwise_distances_argmin_min\n'), ((26602, 26728), 'pandas.read_table', 'pd.read_table', (['path'], {'names': "['sample', 'structure', 'x', 'y', 'z', 'r', 'parent']", 'skiprows': 'header_length', 'delimiter': '"""\\\\s+"""'}), "(path, names=['sample', 'structure', 'x', 'y', 'z', 'r',\n 'parent'], skiprows=header_length, delimiter='\\\\s+')\n", (26615, 26728), True, 'import pandas as pd\n'), ((27842, 27934), 'cloudvolume.CloudVolume', 'CloudVolume', (['s3_path'], {'mip': 'mip', 'fill_missing': 'self.fill_missing', 'use_https': 'self.use_https'}), '(s3_path, mip=mip, fill_missing=self.fill_missing, use_https=\n self.use_https)\n', (27853, 27934), False, 'from cloudvolume import CloudVolume, Skeleton\n'), ((28054, 28074), 'io.StringIO', 'StringIO', (['swc_string'], {}), '(swc_string)\n', (28062, 28074), False, 'from io import StringIO\n'), ((28339, 28453), 'pandas.read_table', 'pd.read_table', (['string_io'], {'names': "['sample', 'structure', 'x', 'y', 'z', 'r', 'parent']", 'skiprows': 'h_len', 'sep': '""" """'}), "(string_io, names=['sample', 'structure', 'x', 'y', 'z', 'r',\n 'parent'], skiprows=h_len, sep=' ')\n", (28352, 28453), True, 'import pandas as pd\n'), ((29672, 29691), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (29680, 29691), True, 'import numpy as np\n'), ((30552, 30571), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (30560, 30571), True, 'import numpy as np\n'), ((32336, 32348), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (32346, 32348), True, 'import networkx as nx\n'), ((36515, 36546), 'numpy.array', 'np.array', (['paths'], {'dtype': '"""object"""'}), "(paths, dtype='object')\n", (36523, 36546), True, 'import numpy as np\n'), ((37535, 37583), 'networkx.bfs_tree', 'nx.bfs_tree', (['G_undir', 'node_id'], {'depth_limit': 'depth'}), '(G_undir, node_id, depth_limit=depth)\n', (37546, 37583), True, 'import networkx as nx\n'), ((38602, 38634), 'numpy.array', 'np.array', (['color'], {'dtype': '"""float32"""'}), "(color, dtype='float32')\n", (38610, 38634), True, 'import numpy as np\n'), ((38650, 38677), 'cloudvolume.Skeleton.from_swc', 'Skeleton.from_swc', (['contents'], {}), '(contents)\n', (38667, 38677), False, 'from cloudvolume import CloudVolume, Skeleton\n'), ((39695, 39749), 'numpy.zeros', 'np.zeros', (['(skel.vertices.shape[0], 4)'], {'dtype': '"""float32"""'}), "((skel.vertices.shape[0], 4), dtype='float32')\n", (39703, 39749), True, 'import numpy as np\n'), ((3125, 3199), 'cloudvolume.CloudVolume', 'CloudVolume', (['path'], {'mip': 'mip', 'fill_missing': 'fill_missing', 'use_https': 'use_https'}), '(path, mip=mip, fill_missing=fill_missing, use_https=use_https)\n', (3136, 3199), False, 'from cloudvolume import CloudVolume, Skeleton\n'), ((7297, 7315), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (7307, 7315), False, 'from brainlit.utils.util import check_type, check_size\n'), ((10633, 10652), 'brainlit.utils.util.check_size', 'check_size', (['spacing'], {}), '(spacing)\n', (10643, 10652), False, 'from brainlit.utils.util import check_type, check_size\n'), ((10757, 10775), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (10767, 10775), False, 'from brainlit.utils.util import check_type, check_size\n'), ((10943, 10962), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (10951, 10962), True, 'import numpy as np\n'), ((12614, 12633), 'brainlit.utils.util.check_size', 'check_size', (['spacing'], {}), '(spacing)\n', (12624, 12633), False, 'from brainlit.utils.util import check_type, check_size\n'), ((12738, 12756), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (12748, 12756), False, 'from brainlit.utils.util import check_type, check_size\n'), ((12924, 12943), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (12932, 12943), True, 'import numpy as np\n'), ((17779, 17798), 'brainlit.utils.util.check_size', 'check_size', (['spacing'], {}), '(spacing)\n', (17789, 17798), False, 'from brainlit.utils.util import check_type, check_size\n'), ((17903, 17921), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (17913, 17921), False, 'from brainlit.utils.util import check_type, check_size\n'), ((18089, 18108), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (18097, 18108), True, 'import numpy as np\n'), ((20300, 20319), 'brainlit.utils.util.check_size', 'check_size', (['spacing'], {}), '(spacing)\n', (20310, 20319), False, 'from brainlit.utils.util import check_type, check_size\n'), ((20424, 20442), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (20434, 20442), False, 'from brainlit.utils.util import check_type, check_size\n'), ((20610, 20629), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (20618, 20629), True, 'import numpy as np\n'), ((22670, 22689), 'brainlit.utils.util.check_size', 'check_size', (['spacing'], {}), '(spacing)\n', (22680, 22689), False, 'from brainlit.utils.util import check_type, check_size\n'), ((22794, 22812), 'brainlit.utils.util.check_size', 'check_size', (['origin'], {}), '(origin)\n', (22804, 22812), False, 'from brainlit.utils.util import check_type, check_size\n'), ((22980, 22999), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (22988, 22999), True, 'import numpy as np\n'), ((24544, 24576), 'numpy.concatenate', 'np.concatenate', (['[dists1, dists2]'], {}), '([dists1, dists2])\n', (24558, 24576), True, 'import numpy as np\n'), ((24595, 24609), 'numpy.mean', 'np.mean', (['dists'], {}), '(dists)\n', (24602, 24609), True, 'import numpy as np\n'), ((26455, 26511), 'warnings.warn', 'warnings.warn', (["('No offset information found in: ' + path)"], {}), "('No offset information found in: ' + path)\n", (26468, 26511), False, 'import warnings\n'), ((28683, 28709), 'numpy.round', 'np.round', (["(df['x'] / res[0])"], {}), "(df['x'] / res[0])\n", (28691, 28709), True, 'import numpy as np\n'), ((28732, 28758), 'numpy.round', 'np.round', (["(df['y'] / res[1])"], {}), "(df['y'] / res[1])\n", (28740, 28758), True, 'import numpy as np\n'), ((28781, 28807), 'numpy.round', 'np.round', (["(df['z'] / res[2])"], {}), "(df['z'] / res[2])\n", (28789, 28807), True, 'import numpy as np\n'), ((30382, 30424), 'numpy.divide', 'np.divide', (['(spatial_coord - origin)', 'spacing'], {}), '(spatial_coord - origin, spacing)\n', (30391, 30424), True, 'import numpy as np\n'), ((35655, 35695), 'networkx.algorithms.dag.dag_longest_path', 'nx.algorithms.dag.dag_longest_path', (['G_cp'], {}), '(G_cp)\n', (35689, 35695), True, 'import networkx as nx\n'), ((3457, 3482), 'os.path.isfile', 'os.path.isfile', (['self.path'], {}), '(self.path)\n', (3471, 3482), False, 'import os\n'), ((7506, 7604), 'cloudvolume.CloudVolume', 'CloudVolume', (['self.path'], {'mip': 'self.mip', 'fill_missing': 'self.fill_missing', 'use_https': 'self.use_https'}), '(self.path, mip=self.mip, fill_missing=self.fill_missing,\n use_https=self.use_https)\n', (7517, 7604), False, 'from cloudvolume import CloudVolume, Skeleton\n'), ((36309, 36340), 'numpy.int64', 'np.int64', (["G_cp.nodes[node]['x']"], {}), "(G_cp.nodes[node]['x'])\n", (36317, 36340), True, 'import numpy as np\n'), ((36372, 36403), 'numpy.int64', 'np.int64', (["G_cp.nodes[node]['y']"], {}), "(G_cp.nodes[node]['y'])\n", (36380, 36403), True, 'import numpy as np\n'), ((36435, 36466), 'numpy.int64', 'np.int64', (["G_cp.nodes[node]['z']"], {}), "(G_cp.nodes[node]['z'])\n", (36443, 36466), True, 'import numpy as np\n'), ((25644, 25667), 'numpy.arange', 'np.arange', (['idx', '(idx + 3)'], {}), '(idx, idx + 3)\n', (25653, 25667), True, 'import numpy as np\n'), ((26026, 26051), 're.split', 're.split', (['"""_|-|\\\\."""', 'name'], {}), "('_|-|\\\\.', name)\n", (26034, 26051), False, 'import re\n'), ((3487, 3509), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (3503, 3509), False, 'import os\n')] |
"""
Find chessboards in both frames of a stereo feed
"""
import logging
from pathlib import Path
import click
import cv2 as cv
import numpy as np
from rakali import VideoPlayer
from rakali.annotate import add_frame_labels
from rakali.camera.chessboard import ChessboardFinder
from rakali.stereo.reader import StereoCamera
from rakali.video import go
from rakali.video.writer import get_stereo_writer
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
NVR = "192.168.1.20"
@click.command(context_settings=dict(max_content_width=120))
@click.version_option()
@click.option(
"-l",
"--left-eye",
help="Left eye, can be local USB cam (0|1|2..) or IP cam rtsp URL or file",
default=f"http://{NVR}/axis-cgi/mjpg/video.cgi?&camera=1",
show_default=True,
)
@click.option(
"-r",
"--right-eye",
help="Right eye, can be local USB cam (0|1|2..) or IP cam rtsp URL or file",
default=f"http://{NVR}/axis-cgi/mjpg/video.cgi?&camera=2",
show_default=True,
)
@click.option(
"-o",
"--output-folder",
help="Chessboard images store folder",
default="~/rakali/stereo/chessboards/",
show_default=True,
)
@click.option(
"--chessboard-rows",
help="Chessboard rows",
default=9,
show_default=True,
)
@click.option(
"--chessboard-columns",
help="Chessboard columns",
default=6,
show_default=True,
)
def cli(left_eye, right_eye, output_folder, chessboard_rows, chessboard_columns):
"""
Find chessboard calibration images in both frames of the stereo pair
"""
out_path = Path(output_folder).expanduser()
out_path.mkdir(parents=True, exist_ok=True)
chessboard_size = (chessboard_columns, chessboard_rows)
finder = ChessboardFinder(chessboard_size)
stream = StereoCamera(
left_src=left_eye,
right_src=right_eye,
)
player = VideoPlayer()
with player, stream:
original_writer = get_stereo_writer(stream, file_name="original_stereo.avi")
annotated_writer = get_stereo_writer(stream, file_name="annotated_stereo.avi")
frame_count = 0
good_count = 0
while go():
ok, frames = stream.read()
frame_count += 1
if ok:
good = []
annotated = []
# so we have a good stereo frame, now inspect each frame and if
# a chessboard is found in each, save the pair to disk.
for frame in frames.frames():
labels = [f"Stereo Calibrate {frame_count}"]
display_frame = frame.copy()
height, width, channels = display_frame.shape
has_corners, corners = finder.corners(frame)
if has_corners:
good.append(True)
finder.draw(display_frame, corners)
labels.append("CHESSBOARD")
else:
good.append(False)
labels.append("NO CHESSBOARD FOR YOU")
add_frame_labels(display_frame, labels=labels)
annotated.append(display_frame)
if all(good):
# both frames have verified chessboards save frames for analysis
for side, frame in frames.calibration_named_frames():
cv.imwrite(f"{out_path}/{side}_{good_count:05}.jpg", frame)
good_count += 1
player.show(np.hstack(annotated))
annotated_writer.stereo_write(annotated)
original_writer.stereo_write(frames.frames())
| [
"logging.basicConfig",
"logging.getLogger",
"rakali.video.writer.get_stereo_writer",
"cv2.imwrite",
"rakali.VideoPlayer",
"pathlib.Path",
"click.option",
"rakali.video.go",
"numpy.hstack",
"rakali.annotate.add_frame_labels",
"click.version_option",
"rakali.stereo.reader.StereoCamera",
"rakal... | [((403, 443), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (422, 443), False, 'import logging\n'), ((454, 481), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (471, 481), False, 'import logging\n'), ((568, 590), 'click.version_option', 'click.version_option', ([], {}), '()\n', (588, 590), False, 'import click\n'), ((592, 792), 'click.option', 'click.option', (['"""-l"""', '"""--left-eye"""'], {'help': '"""Left eye, can be local USB cam (0|1|2..) or IP cam rtsp URL or file"""', 'default': 'f"""http://{NVR}/axis-cgi/mjpg/video.cgi?&camera=1"""', 'show_default': '(True)'}), "('-l', '--left-eye', help=\n 'Left eye, can be local USB cam (0|1|2..) or IP cam rtsp URL or file',\n default=f'http://{NVR}/axis-cgi/mjpg/video.cgi?&camera=1', show_default\n =True)\n", (604, 792), False, 'import click\n'), ((803, 1005), 'click.option', 'click.option', (['"""-r"""', '"""--right-eye"""'], {'help': '"""Right eye, can be local USB cam (0|1|2..) or IP cam rtsp URL or file"""', 'default': 'f"""http://{NVR}/axis-cgi/mjpg/video.cgi?&camera=2"""', 'show_default': '(True)'}), "('-r', '--right-eye', help=\n 'Right eye, can be local USB cam (0|1|2..) or IP cam rtsp URL or file',\n default=f'http://{NVR}/axis-cgi/mjpg/video.cgi?&camera=2', show_default\n =True)\n", (815, 1005), False, 'import click\n'), ((1016, 1155), 'click.option', 'click.option', (['"""-o"""', '"""--output-folder"""'], {'help': '"""Chessboard images store folder"""', 'default': '"""~/rakali/stereo/chessboards/"""', 'show_default': '(True)'}), "('-o', '--output-folder', help='Chessboard images store folder',\n default='~/rakali/stereo/chessboards/', show_default=True)\n", (1028, 1155), False, 'import click\n'), ((1176, 1267), 'click.option', 'click.option', (['"""--chessboard-rows"""'], {'help': '"""Chessboard rows"""', 'default': '(9)', 'show_default': '(True)'}), "('--chessboard-rows', help='Chessboard rows', default=9,\n show_default=True)\n", (1188, 1267), False, 'import click\n'), ((1284, 1381), 'click.option', 'click.option', (['"""--chessboard-columns"""'], {'help': '"""Chessboard columns"""', 'default': '(6)', 'show_default': '(True)'}), "('--chessboard-columns', help='Chessboard columns', default=6,\n show_default=True)\n", (1296, 1381), False, 'import click\n'), ((1739, 1772), 'rakali.camera.chessboard.ChessboardFinder', 'ChessboardFinder', (['chessboard_size'], {}), '(chessboard_size)\n', (1755, 1772), False, 'from rakali.camera.chessboard import ChessboardFinder\n'), ((1787, 1839), 'rakali.stereo.reader.StereoCamera', 'StereoCamera', ([], {'left_src': 'left_eye', 'right_src': 'right_eye'}), '(left_src=left_eye, right_src=right_eye)\n', (1799, 1839), False, 'from rakali.stereo.reader import StereoCamera\n'), ((1877, 1890), 'rakali.VideoPlayer', 'VideoPlayer', ([], {}), '()\n', (1888, 1890), False, 'from rakali import VideoPlayer\n'), ((1943, 2001), 'rakali.video.writer.get_stereo_writer', 'get_stereo_writer', (['stream'], {'file_name': '"""original_stereo.avi"""'}), "(stream, file_name='original_stereo.avi')\n", (1960, 2001), False, 'from rakali.video.writer import get_stereo_writer\n'), ((2029, 2088), 'rakali.video.writer.get_stereo_writer', 'get_stereo_writer', (['stream'], {'file_name': '"""annotated_stereo.avi"""'}), "(stream, file_name='annotated_stereo.avi')\n", (2046, 2088), False, 'from rakali.video.writer import get_stereo_writer\n'), ((2150, 2154), 'rakali.video.go', 'go', ([], {}), '()\n', (2152, 2154), False, 'from rakali.video import go\n'), ((1584, 1603), 'pathlib.Path', 'Path', (['output_folder'], {}), '(output_folder)\n', (1588, 1603), False, 'from pathlib import Path\n'), ((3085, 3131), 'rakali.annotate.add_frame_labels', 'add_frame_labels', (['display_frame'], {'labels': 'labels'}), '(display_frame, labels=labels)\n', (3101, 3131), False, 'from rakali.annotate import add_frame_labels\n'), ((3522, 3542), 'numpy.hstack', 'np.hstack', (['annotated'], {}), '(annotated)\n', (3531, 3542), True, 'import numpy as np\n'), ((3397, 3456), 'cv2.imwrite', 'cv.imwrite', (['f"""{out_path}/{side}_{good_count:05}.jpg"""', 'frame'], {}), "(f'{out_path}/{side}_{good_count:05}.jpg', frame)\n", (3407, 3456), True, 'import cv2 as cv\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import gym
import torch
import time
import numpy as np
import torch.nn as nn
from datetime import datetime, timedelta
from torch.nn import functional as F
from collections import namedtuple
hidden_size =128
batch_size = 100
percentile = 70
lr = 0.01
class Net(nn.Module):
def __init__(self, obs_size, hidden, num_actions):
super().__init__()
self.net = nn.Sequential(nn.Linear(obs_size, hidden),
nn.ReLU(),
nn.Linear(hidden, num_actions)
)
def forward(self,x):
return self.net(x)
Episode = namedtuple('Episode',('reward','steps'))
Steps = namedtuple('Steps',('observation','action'))
@torch.no_grad()
def play(env):
state = env.reset()
r = 0
while True:
env.render()
time.sleep(0.01)
action = net(torch.FloatTensor(state)).argmax(dim=-1).item()
last_state, reward, done, _ = env.step(action)
r += reward
if done:
print(r)
break
state = last_state
env.close()
def iter_batch(env, net, batch_size):
batch = []
obs_action = []
rewards = 0
obs = env.reset()
while True:
obs_t = torch.FloatTensor([obs])
action_p_t = F.softmax(net(obs_t),dim=-1)
action_p = action_p_t.detach().numpy()[0]
action = np.random.choice(len(action_p),p=action_p)
step = Steps(obs, action)
obs_action.append(step)
next_obs,r,done,_= env.step(action)
rewards += r
if done:
e = Episode(rewards,obs_action)
batch.append(e)
obs_action = []
rewards = 0
next_obs = env.reset()
if len(batch) == batch_size:
yield batch
batch = []
obs = next_obs
def filter_batch(batch, percentile):
rewards = list(map(lambda s:s.reward, batch))
reward_boundry = np.percentile(rewards, percentile)
rewards_mean = float(np.mean(rewards))
obs_v = []
act_v = []
for reward, step in batch:
if reward < reward_boundry:
continue
obs_v.extend(list(map(lambda s:s.observation, step)))
act_v.extend(list(map(lambda s:s.action, step)))
obs_v = torch.FloatTensor(obs_v)
act_v = torch.LongTensor(act_v)
return (obs_v, act_v, reward_boundry, rewards_mean)
if __name__=="__main__":
start_time = datetime.now()
env = gym.make('CartPole-v1')
obs_size = env.observation_space.shape[0]
num_actions = env.action_space.n
net = Net(obs_size, hidden_size, num_actions)
loss_fun = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr= lr)
for i, batch in enumerate(iter_batch(env, net, batch_size)):
obs_v, act_v, reward_boundry, rewards_mean = \
filter_batch(batch, percentile)
optimizer.zero_grad()
output = net(obs_v)
loss = loss_fun(output, act_v)
loss.backward()
optimizer.step()
print(f'epoch:{i} loss:{loss.item():.3f} mean:{rewards_mean:.0f}')
if rewards_mean > 475:
duration = timedelta(seconds = (datetime.now()-start_time).seconds)
print(f'Solved! in {duration}')
break
| [
"numpy.mean",
"torch.nn.ReLU",
"collections.namedtuple",
"torch.nn.CrossEntropyLoss",
"torch.LongTensor",
"time.sleep",
"datetime.datetime.now",
"gym.make",
"numpy.percentile",
"torch.nn.Linear",
"torch.no_grad",
"torch.FloatTensor"
] | [((715, 757), 'collections.namedtuple', 'namedtuple', (['"""Episode"""', "('reward', 'steps')"], {}), "('Episode', ('reward', 'steps'))\n", (725, 757), False, 'from collections import namedtuple\n'), ((764, 810), 'collections.namedtuple', 'namedtuple', (['"""Steps"""', "('observation', 'action')"], {}), "('Steps', ('observation', 'action'))\n", (774, 810), False, 'from collections import namedtuple\n'), ((812, 827), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (825, 827), False, 'import torch\n'), ((2048, 2082), 'numpy.percentile', 'np.percentile', (['rewards', 'percentile'], {}), '(rewards, percentile)\n', (2061, 2082), True, 'import numpy as np\n'), ((2376, 2400), 'torch.FloatTensor', 'torch.FloatTensor', (['obs_v'], {}), '(obs_v)\n', (2393, 2400), False, 'import torch\n'), ((2413, 2436), 'torch.LongTensor', 'torch.LongTensor', (['act_v'], {}), '(act_v)\n', (2429, 2436), False, 'import torch\n'), ((2537, 2551), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2549, 2551), False, 'from datetime import datetime, timedelta\n'), ((2562, 2585), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (2570, 2585), False, 'import gym\n'), ((2735, 2756), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2754, 2756), True, 'import torch.nn as nn\n'), ((922, 938), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (932, 938), False, 'import time\n'), ((1327, 1351), 'torch.FloatTensor', 'torch.FloatTensor', (['[obs]'], {}), '([obs])\n', (1344, 1351), False, 'import torch\n'), ((2108, 2124), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (2115, 2124), True, 'import numpy as np\n'), ((472, 499), 'torch.nn.Linear', 'nn.Linear', (['obs_size', 'hidden'], {}), '(obs_size, hidden)\n', (481, 499), True, 'import torch.nn as nn\n'), ((536, 545), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (543, 545), True, 'import torch.nn as nn\n'), ((582, 612), 'torch.nn.Linear', 'nn.Linear', (['hidden', 'num_actions'], {}), '(hidden, num_actions)\n', (591, 612), True, 'import torch.nn as nn\n'), ((960, 984), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (977, 984), False, 'import torch\n'), ((3280, 3294), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3292, 3294), False, 'from datetime import datetime, timedelta\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from supra.Utils.Classes import Constants
consts = Constants()
def getPressure(z):
p = 10*101.325*np.exp(-0.00012*z)*100
# in Pa
return p
def anglescan(S, phi, theta, z_profile, vfreq, P_amb, wind=True, debug=True, trace=False, plot=False):
# Originally by <NAME> (Supracenter)
""" Ray-traces from a point given initial launch angles
Arguments:
S: [list] [x, y, z] of initial launch point (Supracenter or Wave-Release point)
phi: [float] initial azimuthal angle of launch [deg] with 0 deg being North and 90 deg being East
theta: [float] initial takeoff angle of launch [deg] with 90 deg being horizontal and 180 deg being vertically down
z_profile: [list] weather profile (n_layers * 4)
[[heights (increasing order) [m], speed of sound [m/s], wind speed [m/s], wind direction [rad] (same angle definition as phi)],
... ]
Keyword Arguments:
wind: [Boolean] if False sets all wind speeds to 0
debug: [Boolean] if True outputs print messages of program status
trace: [Boolean] if True returns (x, y, z, t) coordinates of the ray trace
plot: [Boolean] if True plots the ray trace
Returns:
D: [list] (x, y, z, t) final position and travel time of the raytrace
T: [list] returned if trace is set to True, (x, y, z, t) of all points along the ray-trace
"""
b_const = 1.119e-4
k_const = 2.0e-4
T = z_profile[-1, 1]
P = getPressure(z_profile[-1, 0])
# Azimuths and Wind directions are measured as angles from north, and increasing clockwise to the East
phi = (phi - 90)%360
# Flip coordinate system horizontally
phi = (360 - phi)%360
phi = np.radians(phi)
theta = np.radians(theta)
# Switch to turn off winds
if not wind:
z_profile[:, 2] = 0
# z_profile[:, 1] = 330
# The number of layers in the integration region
n_layers = len(z_profile)
# Slowness, as defined in SUPRACENTER on pg 35, s = 1/c
s = 1.0/z_profile[0:n_layers, 1]
# Elevation for that layer
z = z_profile[0:n_layers, 0]
# Component of wind vector in the direction of phi and phi + pi/2 respectively
u = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi)
v = z_profile[:, 2]*np.sin(z_profile[:, 3])*np.cos(phi+np.pi/2) + z_profile[:, 2]*np.cos(z_profile[:, 3])*np.sin(phi+np.pi/2)
s_val = s[n_layers-1]
# ray parameter
p = s_val*np.sin(theta)/(1 + s_val*u[n_layers - 1]*np.sin(theta))
X = 0
Y = 0
#Travel time
t_arrival = 0
if trace:
T = []
T.append([S[0], S[1], S[2], t_arrival])
# ignore negative roots
np.seterr(divide='ignore', invalid='ignore')
### Scan Loop ###
a, b = np.cos(phi), np.sin(phi)
last_z = 0
g = 0
#for i in range(n_layers - 1):
for i in range(n_layers - 1, 0, -1):
s2 = s[i]**2
delz = z[i] - z[i-1]
pres_1 = getPressure(z[i])
pres = getPressure(z[i-1])
# clear old variables
# Wind transformation variables
U = u[i]
V = v[i]
p2 = p/(1 - p*U)
# This term produces nans
A = delz/np.sqrt(s2 - p2**2)
if np.isnan(A).all():
if debug:
print("ANGLESCAN ERROR: All NaNs - rays reflect upwards")
if trace:
return np.array([[np.nan, np.nan, np.nan, np.nan]])
else:
return np.array([np.nan, np.nan, np.nan, np.nan])
# Equation (10)
dx = (p2 + s2*U)*A
X += dx
# Equation (11)
dy = s2*V*A
Y += dy
horizontal_change = np.sqrt((a*dx - b*dy)**2 + (b*dx + a*dy)**2)
angle_of_depression = np.arctan(delz/horizontal_change)
# snell = delz / np.sqrt(delz**2 + (a*dx - b*dy)**2 + (b*dx + a*dy)**2)
# sin(arctan(x)) == x / (sqrt(x^2 + 1))
G = -vfreq**2*k_const/b_const*(np.exp(b_const*z[i]) - np.exp(b_const*z[i-1]))/np.sin(angle_of_depression)/101325
g += G
# Calculate true destination positions (transform back)
#0.0016s
# Winds Disabled
last_z = i - 1
dt = s2/np.sqrt(s2 - p**2/(1 - p*u[i-1])**2)*delz
# If possible, use the ray timing, else just use the distance with the sound speed at that layer
if not np.isnan(dt):
t_arrival += dt
else:
t_arrival += np.sqrt((a*dx - b*dy)**2 + (b*dx + a*dy)**2 + delz**2)*s[i]
if trace:
T.append([S[0] + (a*X - b*Y), S[1] + (b*X + a*Y), z[last_z], t_arrival])
if trace and plot:
tr = np.array(T)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(tr[:, 0], tr[:, 1], tr[:, 2], c='b')
ax.plot(tr[:, 0], tr[:, 1], tr[:, 2], c='k')
ax.scatter(S[0], S[1], S[2], c='r', marker="*")
ax.scatter(S[0] + (a*X - b*Y), S[1] + (b*X + a*Y), z[last_z], c='g', marker="^")
plt.show()
# if v_tol is not None and h_tol is not None:
# dh = z[last_z] - target[2]
# dx = np.sqrt((S[0] + (a*X - b*Y) - target[0])**2 + (S[1] + (b*X + a*Y) - target[1])**2)
# if dh <= v_tol and dx <= h_tol:
# t_arrival += np.sqrt(dh**2 + dx**2)/310
# Compare these destinations with the desired destination, all imaginary values are "turned rays" and are ignored
# E = np.sqrt(((a*X - b*Y)**2 + (b*X + a*Y)**2 + (z[n_layers - last_z - 1])**2))
D = [S[0] + (a*X - b*Y), S[1] + (b*X + a*Y), z[last_z], t_arrival]
T_0 = z_profile[0, 1]**2/consts.GAMMA/consts.R*consts.M_0
P_0 = getPressure(z_profile[0, 0])
z_2 = z_profile[-1, 0]
z_1 = z_profile[0, 0]
P_2 = getPressure(z_2)
P_1 = getPressure(z_1)
T_2 = z_profile[-1, 1]**2*consts.M_0/consts.GAMMA/consts.R
T_1 = z_profile[1, 1]**2*consts.M_0/consts.GAMMA/consts.R
# needs to be average f_d
f = ((T_0/P_0)**(0.33)*(((P_2/T_2)**(0.33)*z_2 - (P_1/T_1)**(0.33)*z_1)/(z_2 - z_1)) + 1)/2
##########################
return np.array([f, np.exp(g), T, P])
if __name__ == '__main__':
S = np.array([0, 0, 1000])
#takeoff
theta = 135
#azimuth
phi = 0
z_profile = np.array([[ 0, 330, 0, 0],
[500, 330, 0, 0],
[1000, 330, 0, 0]])
D = anglescan(S, phi, theta, z_profile, trace=True, plot=True)
print(D) | [
"numpy.radians",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.isnan",
"numpy.cos",
"numpy.sin",
"supra.Utils.Classes.Constants",
"numpy.seterr",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.arctan"
] | [((145, 156), 'supra.Utils.Classes.Constants', 'Constants', ([], {}), '()\n', (154, 156), False, 'from supra.Utils.Classes import Constants\n'), ((1833, 1848), 'numpy.radians', 'np.radians', (['phi'], {}), '(phi)\n', (1843, 1848), True, 'import numpy as np\n'), ((1861, 1878), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (1871, 1878), True, 'import numpy as np\n'), ((2854, 2898), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (2863, 2898), True, 'import numpy as np\n'), ((6334, 6356), 'numpy.array', 'np.array', (['[0, 0, 1000]'], {}), '([0, 0, 1000])\n', (6342, 6356), True, 'import numpy as np\n'), ((6430, 6493), 'numpy.array', 'np.array', (['[[0, 330, 0, 0], [500, 330, 0, 0], [1000, 330, 0, 0]]'], {}), '([[0, 330, 0, 0], [500, 330, 0, 0], [1000, 330, 0, 0]])\n', (6438, 6493), True, 'import numpy as np\n'), ((2933, 2944), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2939, 2944), True, 'import numpy as np\n'), ((2946, 2957), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2952, 2957), True, 'import numpy as np\n'), ((3856, 3912), 'numpy.sqrt', 'np.sqrt', (['((a * dx - b * dy) ** 2 + (b * dx + a * dy) ** 2)'], {}), '((a * dx - b * dy) ** 2 + (b * dx + a * dy) ** 2)\n', (3863, 3912), True, 'import numpy as np\n'), ((3931, 3966), 'numpy.arctan', 'np.arctan', (['(delz / horizontal_change)'], {}), '(delz / horizontal_change)\n', (3940, 3966), True, 'import numpy as np\n'), ((4835, 4846), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (4843, 4846), True, 'import numpy as np\n'), ((4862, 4874), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4872, 4874), True, 'import matplotlib.pyplot as plt\n'), ((4888, 4899), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (4894, 4899), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((5162, 5172), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5170, 5172), True, 'import matplotlib.pyplot as plt\n'), ((197, 217), 'numpy.exp', 'np.exp', (['(-0.00012 * z)'], {}), '(-0.00012 * z)\n', (203, 217), True, 'import numpy as np\n'), ((2369, 2380), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2375, 2380), True, 'import numpy as np\n'), ((2423, 2434), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2429, 2434), True, 'import numpy as np\n'), ((2483, 2506), 'numpy.cos', 'np.cos', (['(phi + np.pi / 2)'], {}), '(phi + np.pi / 2)\n', (2489, 2506), True, 'import numpy as np\n'), ((2545, 2568), 'numpy.sin', 'np.sin', (['(phi + np.pi / 2)'], {}), '(phi + np.pi / 2)\n', (2551, 2568), True, 'import numpy as np\n'), ((2631, 2644), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2637, 2644), True, 'import numpy as np\n'), ((3375, 3396), 'numpy.sqrt', 'np.sqrt', (['(s2 - p2 ** 2)'], {}), '(s2 - p2 ** 2)\n', (3382, 3396), True, 'import numpy as np\n'), ((4548, 4560), 'numpy.isnan', 'np.isnan', (['dt'], {}), '(dt)\n', (4556, 4560), True, 'import numpy as np\n'), ((6280, 6289), 'numpy.exp', 'np.exp', (['g'], {}), '(g)\n', (6286, 6289), True, 'import numpy as np\n'), ((2345, 2368), 'numpy.sin', 'np.sin', (['z_profile[:, 3]'], {}), '(z_profile[:, 3])\n', (2351, 2368), True, 'import numpy as np\n'), ((2399, 2422), 'numpy.cos', 'np.cos', (['z_profile[:, 3]'], {}), '(z_profile[:, 3])\n', (2405, 2422), True, 'import numpy as np\n'), ((2459, 2482), 'numpy.sin', 'np.sin', (['z_profile[:, 3]'], {}), '(z_profile[:, 3])\n', (2465, 2482), True, 'import numpy as np\n'), ((2521, 2544), 'numpy.cos', 'np.cos', (['z_profile[:, 3]'], {}), '(z_profile[:, 3])\n', (2527, 2544), True, 'import numpy as np\n'), ((2672, 2685), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2678, 2685), True, 'import numpy as np\n'), ((3407, 3418), 'numpy.isnan', 'np.isnan', (['A'], {}), '(A)\n', (3415, 3418), True, 'import numpy as np\n'), ((3569, 3613), 'numpy.array', 'np.array', (['[[np.nan, np.nan, np.nan, np.nan]]'], {}), '([[np.nan, np.nan, np.nan, np.nan]])\n', (3577, 3613), True, 'import numpy as np\n'), ((3655, 3697), 'numpy.array', 'np.array', (['[np.nan, np.nan, np.nan, np.nan]'], {}), '([np.nan, np.nan, np.nan, np.nan])\n', (3663, 3697), True, 'import numpy as np\n'), ((4180, 4207), 'numpy.sin', 'np.sin', (['angle_of_depression'], {}), '(angle_of_depression)\n', (4186, 4207), True, 'import numpy as np\n'), ((4384, 4430), 'numpy.sqrt', 'np.sqrt', (['(s2 - p ** 2 / (1 - p * u[i - 1]) ** 2)'], {}), '(s2 - p ** 2 / (1 - p * u[i - 1]) ** 2)\n', (4391, 4430), True, 'import numpy as np\n'), ((4629, 4697), 'numpy.sqrt', 'np.sqrt', (['((a * dx - b * dy) ** 2 + (b * dx + a * dy) ** 2 + delz ** 2)'], {}), '((a * dx - b * dy) ** 2 + (b * dx + a * dy) ** 2 + delz ** 2)\n', (4636, 4697), True, 'import numpy as np\n'), ((4133, 4155), 'numpy.exp', 'np.exp', (['(b_const * z[i])'], {}), '(b_const * z[i])\n', (4139, 4155), True, 'import numpy as np\n'), ((4156, 4182), 'numpy.exp', 'np.exp', (['(b_const * z[i - 1])'], {}), '(b_const * z[i - 1])\n', (4162, 4182), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import numpy as np
class Softmax:
# Converts Arbitrary Values from layers to probabilties
def __init__(self, input_nodes, output_nodes):
self.weights = np.random.randn(input_nodes, output_nodes) / input_nodes
self.biases = np.zeros(output_nodes)
def backprop(self, d_L_d_out):
''' does the backprop stage in Softmax layer. Returns loss grad for
inputs. d_L_d_out is the loss grad '''
# We only know 1 element will be nonzero
for i, grad in enumerate(d_L_d_out):
if grad == 0:
continue
# e^totals
t_exp = np.exp(self.last_totals)
# Sum of all e^totals
S = np.sum(t_exp)
# Grad of out[i] against totals
d_out_d_t = -t_exp[i] * t_exp / (S ** 2)
d_out_d_t[i] = t_exp[i] * (S-t_exp[i]) / (S ** 2)
def forward(self, input):
self.last_input_shape = input.shape
input = input.flatten()
self.last_input = input
input_nodes, output_nodes = self.weights.shape
totals = np.dot(input, self.weights) + self.biases
self.last_totals = totals
exp = np.exp(totals)
return exp / np.sum(exp, axis=0)
| [
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.random.randn"
] | [((268, 290), 'numpy.zeros', 'np.zeros', (['output_nodes'], {}), '(output_nodes)\n', (276, 290), True, 'import numpy as np\n'), ((631, 655), 'numpy.exp', 'np.exp', (['self.last_totals'], {}), '(self.last_totals)\n', (637, 655), True, 'import numpy as np\n'), ((698, 711), 'numpy.sum', 'np.sum', (['t_exp'], {}), '(t_exp)\n', (704, 711), True, 'import numpy as np\n'), ((1165, 1179), 'numpy.exp', 'np.exp', (['totals'], {}), '(totals)\n', (1171, 1179), True, 'import numpy as np\n'), ((189, 231), 'numpy.random.randn', 'np.random.randn', (['input_nodes', 'output_nodes'], {}), '(input_nodes, output_nodes)\n', (204, 231), True, 'import numpy as np\n'), ((1074, 1101), 'numpy.dot', 'np.dot', (['input', 'self.weights'], {}), '(input, self.weights)\n', (1080, 1101), True, 'import numpy as np\n'), ((1201, 1220), 'numpy.sum', 'np.sum', (['exp'], {'axis': '(0)'}), '(exp, axis=0)\n', (1207, 1220), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
//////////////////////////////////////////////////////////////////////////////////////////
// Original author: <NAME>
// Github: https://github.com/aritzLizoain
// My personal website: https://aritzlizoain.github.io/
// Description: CNN Image Segmentation
// Copyright 2020, <NAME>.
// License: MIT License
//////////////////////////////////////////////////////////////////////////////////////////
- load_images (unused)
- get_weights: calculates the weights for the loss function
- process_fits: loads FITS files and creates small sections
- images_small2big: reconstructs small sections
- check_one_object: looks for the chosen category section by section
"""
import os
import sys
import numpy as np
import cv2
from skimage.transform import resize
##############################################################
# NOT USED IN VERSION 2.0.
# THE IMAGES ARE NOW SAVED AND LOADED AS ARRAYS, NOT AS PNG FILES
# def load_images(TRAIN_PATH='', TEST_PATH='',\
# TEST_PREDICTIONS_PATH='',IMG_WIDTH = \
# 256, IMG_HEIGHT = 256):
# train_ids = next(os.walk(TRAIN_PATH))[2]
# test_ids = next(os.walk(TEST_PATH))[2]
# # Get and resize train images and masks
# images = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH,3)\
# , dtype=np.uint8)
# test_images = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH\
# , 3), dtype=np.uint8)
# sys.stdout.flush()
# # # train images
# for n,id_ in enumerate(train_ids):
# img = cv2.imread(TRAIN_PATH + id_)
# img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant'\
# , preserve_range=True)
# images[n] = img
# # # test images
# for n,id_ in enumerate(test_ids):
# mask_ = cv2.imread(TEST_PATH + id_)
# mask_ = resize(mask_, (IMG_HEIGHT, IMG_WIDTH),\
# preserve_range=True, mode='constant')
# test_images[n] = mask_
# print('Dataset correctly loaded')
# return images, test_images
#-------------------------------------------------------------
def get_weights(images,test_images):
from mask import get_percentages
#all_images = np.concatenate((images, test_images)) to take
#both training and test images
all_images=images #to take only training images
unique_elements, percentage = get_percentages(all_images)
inverse_percentages=1/percentage #the weights are inversely
#proportional to their frequency
weights = inverse_percentages/sum(inverse_percentages)*\
len(unique_elements) #normalize to the number of classes
return weights
#-------------------------------------------------------------
def process_fits(name='name.fits', size=256, normalized='yes'\
, normalization_value=255):
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
import numpy as np
#LOADING THE IMAGE AND GETTING INFORMATION
image_file = get_pkg_data_filename(name)
image_data = fits.getdata(image_file, ext=0)
# image_data=image_data/100
# normalize
if normalized=='yes':
maximum_value=np.amax(image_data)
image_data_normalized=image_data/maximum_value*\
normalization_value
elif normalized=='no':
# image_data=image_data
None
else:
print(' ERROR: The given input for the normalization\
variable is not an option. Please choose yes/no')
#information about the original full image
image_length=image_data.shape[1]
image_height=image_data.shape[0]
amount_images_wide=int((image_length/2)/size) #we will only
#take half of the image
amount_images_high=int(image_height/size)
# # RESIZE image UNUSED
# if image_length/size-amount_images_wide < 0.5:
# amount_images_wide=amount_images_wide
# else:
# amount_images_wide=amount_images_wide + 1
# if image_height/size-amount_images_high < 0.5:
# amount_images_high=amount_images_high
# else:
# amount_images_high=amount_images_high + 1
# number_of_images=amount_images_wide*amount_images_high
# if normalized=='yes':
# image_data_normalized_resized=np.resize(image_data_normalized, (size*amount_images_high, size*amount_images_wide))
# print(' Resized and normalized real test image shape: {0}'.format(image_data_normalized_resized.shape))
# plt.figure()
# plt.imshow(image_data_normalized_resized)
# plt.colorbar()
# plt.title('Normalized and resized real test image', fontsize=15)
# plt.show()
# image_data_use = image_data_normalized_resized
# elif normalized=='no':
# image_data_resized=np.resize(image_data, (size*amount_images_high, size*amount_images_wide))
# print(' Resized real test image shape: {0}'.format(image_data_resized.shape))
# plt.figure()
# plt.imshow(image_data_resized)
# plt.colorbar()
# plt.title('Resized real test image', fontsize=25)
# plt.show()
# image_data_use = image_data_resized
#CUT
number_of_images = amount_images_wide*amount_images_high
image_data_use=np.zeros((amount_images_high*size,amount_images_wide*size))
starting_value=image_data.shape[1]-image_data_use.shape[1]
if normalized=='yes':
for i in range(0,image_data_use.shape[0]):
for j in range (0,image_data_use.shape[1]):
image_data_use[i,j] = image_data_normalized[i,j + starting_value]
print(' Cut and normalized real test image shape: {0}'.format(image_data_use.shape))
plt.figure()
plt.grid(False)
plt.imshow(image_data_use)
plt.colorbar()
plt.title('Normalized and cut real test image', fontsize=15)
plt.show()
elif normalized=='no':
for i in range(0,image_data_use.shape[0]):
for j in range (0,image_data_use.shape[1]):
image_data_use[i,j] = image_data[i,j + starting_value]
plt.figure()
plt.grid(False)
plt.imshow(image_data_use)
plt.colorbar()
plt.title('Cut real test image', fontsize=20)
plt.show()
print(' Cut real test image shape: {0}'.format(image_data_use.shape))
# Create the smaller sections
print(' Creating {1} sections of size {0}X{0}...'.format(size, number_of_images))
images_small=np.zeros((number_of_images,size,size))
# print(' Images small shape: {0}'.format(images_small.shape))
for i in range(0, amount_images_wide):
for j in range(0, amount_images_high):
for x in range(0, size):
for y in range (0, size):
images_small[i+j*(amount_images_wide),y,x]=image_data_use[y+j*size,x+i*size]
print(' Real test images correctly created')
details=np.array([size, amount_images_high, amount_images_wide], dtype=int)
return image_data_use, images_small, details
#----------------------------------------------------------------------------
# from mask input of (n_sections, size, size, 4) gives mask output of (size, size, 4)
def images_small2big(images, details):
# Create the big image from small sections
size = details[0]
amount_images_high = details[1]
amount_images_wide = details[2]
dimensions = images.shape[3]
full_image_empty = np.zeros((size*amount_images_high, size*amount_images_wide, dimensions))
print(' Creating the real predicted test image from the {0} sections...'.format(len(images)))
for i in range(0, amount_images_wide):
for j in range(0, amount_images_high):
for x in range(0, size):
for y in range (0, size):
full_image_empty[y+j*size,x+i*size] = images[i+j*(amount_images_wide),y,x]
print(' Real test image prediction correctly created')
return full_image_empty
#----------------------------------------------------------------------------
# CHECK THE ONES WITH A SPECIFIC OBJECT IN SMALL SECTIONS
def check_one_object(test_outputs_real, test_images_real, object_to_find='Cluster', real_percentages=[0,0,0,0], details=[0,0,0]):
from mask import get_max_in_mask, mask_to_label
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if object_to_find=='Background':
object_number = 0
elif object_to_find=='Glowing':
object_number = 1
elif object_to_find=='Hot pixel':
object_number = 2
elif object_to_find=='Cluster':
object_number = 3
else:
print(' ERROR: The given input for the object to find variable is not an option.\
Please choose background/glowing/hot pixel/cluster')
#Legend 1
red_patch = mpatches.Patch(color=[1, 0.2, 0.2], label='Cluster')
blue_patch = mpatches.Patch(color=[0,0.5,1.], label='Hot pixel')
green_patch = mpatches.Patch(color=[0.35,1.,0.25], label='Glowing')
black_patch = mpatches.Patch(color=[0./255, 0./255, 0./255], label='Background')
counter = 0
for i in range (len(test_outputs_real)):
check=test_outputs_real[i]
check=check[np.newaxis, ...]
check=get_max_in_mask(check)
is_there=object_number in check
#in order to know the position of each section
ychange = int(i/details[2])*details[0] #y axis position
xchange = (i-int(i/details[2])*details[2])*details[0] #x axis position
if is_there == True:
from mask import output_to_label_one_object
label_with_one_object = output_to_label_one_object(check, object_number)
label_all_objects = mask_to_label(check, to_print='no')
fig, ax = plt.subplots(1, 3, figsize=(20, 10))
# plt.setp(ax, xticklabels=pixels, yticklabels=pixels)
ax[0].grid(False)
ax0 = ax[0].imshow(np.squeeze(test_images_real[i]))
ax[0].set_title('Section {0}'.format(i+1), fontsize=25);
ax[0].set_xlabel('pixels', fontsize=16)
ax[0].set_ylabel('pixels', fontsize=16)
ax[0].set_xticks([0,50,100,150,200,250])
ax[0].set_xticklabels([0+xchange,50+xchange,100+xchange,150+xchange,200+xchange,250+xchange])
ax[0].set_yticks([0,50,100,150,200,250])
ax[0].set_yticklabels([0+ychange,50+ychange,100+ychange,150+ychange,200+ychange,250+ychange])
cax = fig.add_axes([0.12, 0.16, 0.25, 0.03])
plt.colorbar(ax0, orientation="horizontal", cax=cax)
ax[1].grid(False)
ax[1].imshow(label_all_objects[0])
ax[1].set_title('Predicted label', fontsize=25);
ax[1].set_xlabel('pixels', fontsize=16)
ax[1].set_ylabel('pixels', fontsize=16)
ax[1].set_xticks([0,50,100,150,200,250])
ax[1].set_xticklabels([0+xchange,50+xchange,100+xchange,150+xchange,200+xchange,250+xchange])
ax[1].set_yticks([0,50,100,150,200,250])
ax[1].set_yticklabels([0+ychange,50+ychange,100+ychange,150+ychange,200+ychange,250+ychange])
ax[2].grid(False)
ax[2].imshow(label_with_one_object[0])
ax[2].set_title('Finding {0}'.format(object_to_find), fontsize=25);
ax[2].set_xlabel('pixels', fontsize=16)
ax[2].set_ylabel('pixels', fontsize=16)
ax[2].set_xticks([0,50,100,150,200,250])
ax[2].set_xticklabels([0+xchange,50+xchange,100+xchange,150+xchange,200+xchange,250+xchange])
ax[2].set_yticks([0,50,100,150,200,250])
ax[2].set_yticklabels([0+ychange,50+ychange,100+ychange,150+ychange,200+ychange,250+ychange])
plt.legend(loc='upper center', bbox_to_anchor=(2.1, 1.5), fontsize=16,\
handles=[red_patch, blue_patch, green_patch, black_patch], ncol=4)
plt.show() #the image is not being saved
counter=counter + 1
# print(' {1} found in section {0}'.format(i, object_to_find))
else:
counter=counter
print(' {1} found in {0} sections'.format(counter, object_to_find))
return None
| [
"matplotlib.pyplot.grid",
"numpy.array",
"astropy.utils.data.get_pkg_data_filename",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.style.use",
"mask.get_percentages",
"mask.output_to_label_one_object",
"numpy.squeeze",
"matplotlib.patches.Patch",
"matplotlib.pyplot.title",
"matplotlib.pyplot.le... | [((2433, 2460), 'mask.get_percentages', 'get_percentages', (['all_images'], {}), '(all_images)\n', (2448, 2460), False, 'from mask import get_percentages\n'), ((2976, 3008), 'matplotlib.pyplot.style.use', 'plt.style.use', (['astropy_mpl_style'], {}), '(astropy_mpl_style)\n', (2989, 3008), True, 'import matplotlib.pyplot as plt\n'), ((3190, 3217), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['name'], {}), '(name)\n', (3211, 3217), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((3235, 3266), 'astropy.io.fits.getdata', 'fits.getdata', (['image_file'], {'ext': '(0)'}), '(image_file, ext=0)\n', (3247, 3266), False, 'from astropy.io import fits\n'), ((5445, 5509), 'numpy.zeros', 'np.zeros', (['(amount_images_high * size, amount_images_wide * size)'], {}), '((amount_images_high * size, amount_images_wide * size))\n', (5453, 5509), True, 'import numpy as np\n'), ((6698, 6738), 'numpy.zeros', 'np.zeros', (['(number_of_images, size, size)'], {}), '((number_of_images, size, size))\n', (6706, 6738), True, 'import numpy as np\n'), ((7147, 7214), 'numpy.array', 'np.array', (['[size, amount_images_high, amount_images_wide]'], {'dtype': 'int'}), '([size, amount_images_high, amount_images_wide], dtype=int)\n', (7155, 7214), True, 'import numpy as np\n'), ((7671, 7747), 'numpy.zeros', 'np.zeros', (['(size * amount_images_high, size * amount_images_wide, dimensions)'], {}), '((size * amount_images_high, size * amount_images_wide, dimensions))\n', (7679, 7747), True, 'import numpy as np\n'), ((9060, 9112), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[1, 0.2, 0.2]', 'label': '"""Cluster"""'}), "(color=[1, 0.2, 0.2], label='Cluster')\n", (9074, 9112), True, 'import matplotlib.patches as mpatches\n'), ((9130, 9184), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0, 0.5, 1.0]', 'label': '"""Hot pixel"""'}), "(color=[0, 0.5, 1.0], label='Hot pixel')\n", (9144, 9184), True, 'import matplotlib.patches as mpatches\n'), ((9200, 9256), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0.35, 1.0, 0.25]', 'label': '"""Glowing"""'}), "(color=[0.35, 1.0, 0.25], label='Glowing')\n", (9214, 9256), True, 'import matplotlib.patches as mpatches\n'), ((9272, 9347), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '[0.0 / 255, 0.0 / 255, 0.0 / 255]', 'label': '"""Background"""'}), "(color=[0.0 / 255, 0.0 / 255, 0.0 / 255], label='Background')\n", (9286, 9347), True, 'import matplotlib.patches as mpatches\n'), ((3372, 3391), 'numpy.amax', 'np.amax', (['image_data'], {}), '(image_data)\n', (3379, 3391), True, 'import numpy as np\n'), ((5885, 5897), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5895, 5897), True, 'import matplotlib.pyplot as plt\n'), ((5906, 5921), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5914, 5921), True, 'import matplotlib.pyplot as plt\n'), ((5930, 5956), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_data_use'], {}), '(image_data_use)\n', (5940, 5956), True, 'import matplotlib.pyplot as plt\n'), ((5965, 5979), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5977, 5979), True, 'import matplotlib.pyplot as plt\n'), ((5988, 6048), 'matplotlib.pyplot.title', 'plt.title', (['"""Normalized and cut real test image"""'], {'fontsize': '(15)'}), "('Normalized and cut real test image', fontsize=15)\n", (5997, 6048), True, 'import matplotlib.pyplot as plt\n'), ((6057, 6067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6065, 6067), True, 'import matplotlib.pyplot as plt\n'), ((9494, 9516), 'mask.get_max_in_mask', 'get_max_in_mask', (['check'], {}), '(check)\n', (9509, 9516), False, 'from mask import get_max_in_mask, mask_to_label\n'), ((6290, 6302), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6300, 6302), True, 'import matplotlib.pyplot as plt\n'), ((6311, 6326), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (6319, 6326), True, 'import matplotlib.pyplot as plt\n'), ((6335, 6361), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_data_use'], {}), '(image_data_use)\n', (6345, 6361), True, 'import matplotlib.pyplot as plt\n'), ((6370, 6384), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6382, 6384), True, 'import matplotlib.pyplot as plt\n'), ((6393, 6438), 'matplotlib.pyplot.title', 'plt.title', (['"""Cut real test image"""'], {'fontsize': '(20)'}), "('Cut real test image', fontsize=20)\n", (6402, 6438), True, 'import matplotlib.pyplot as plt\n'), ((6447, 6457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6455, 6457), True, 'import matplotlib.pyplot as plt\n'), ((9876, 9924), 'mask.output_to_label_one_object', 'output_to_label_one_object', (['check', 'object_number'], {}), '(check, object_number)\n', (9902, 9924), False, 'from mask import output_to_label_one_object\n'), ((9957, 9992), 'mask.mask_to_label', 'mask_to_label', (['check'], {'to_print': '"""no"""'}), "(check, to_print='no')\n", (9970, 9992), False, 'from mask import get_max_in_mask, mask_to_label\n'), ((10015, 10051), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 10)'}), '(1, 3, figsize=(20, 10))\n', (10027, 10051), True, 'import matplotlib.pyplot as plt\n'), ((10774, 10826), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['ax0'], {'orientation': '"""horizontal"""', 'cax': 'cax'}), "(ax0, orientation='horizontal', cax=cax)\n", (10786, 10826), True, 'import matplotlib.pyplot as plt\n'), ((11982, 12123), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper center"""', 'bbox_to_anchor': '(2.1, 1.5)', 'fontsize': '(16)', 'handles': '[red_patch, blue_patch, green_patch, black_patch]', 'ncol': '(4)'}), "(loc='upper center', bbox_to_anchor=(2.1, 1.5), fontsize=16,\n handles=[red_patch, blue_patch, green_patch, black_patch], ncol=4)\n", (11992, 12123), True, 'import matplotlib.pyplot as plt\n'), ((12157, 12167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12165, 12167), True, 'import matplotlib.pyplot as plt\n'), ((10180, 10211), 'numpy.squeeze', 'np.squeeze', (['test_images_real[i]'], {}), '(test_images_real[i])\n', (10190, 10211), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import mnist_data
import vae
""" parameters """
model_no = '299'
IMAGE_SIZE_MNIST = 28
n_hidden = 500
dim_img = IMAGE_SIZE_MNIST**2 # number of pixels for a MNIST image
dim_z = 10
""" build graph """
# input placeholders
x = tf.placeholder(tf.float32, shape=[None, dim_img], name='target_img')
y = tf.placeholder(tf.float32, shape=[None, mnist_data.NUM_LABELS], name='target_labels')
# dropout
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# network architecture
rec_loss = vae.autoencoder_rec_loss(x, y, dim_img, dim_z, n_hidden, keep_prob)
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver = tf.train.import_meta_graph('models/mnist_gan.ckpt-'+model_no+'.meta')
saver.restore(sess, './models/mnist_gan.ckpt-'+model_no)
def OneHot(X, n=10, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def compute_avg_rec_error(x_sample, y_sample, repeats, n=3):
y_sample = OneHot(y_sample)
x_repeated = np.repeat([x_sample],repeats,axis=0)
y_repeated =np.repeat(y_sample,repeats,axis=0)
avg_dist = 0.0
for i in range(n):
avg_dist = avg_dist + sess.run(rec_loss, feed_dict={x: x_repeated, y: y_repeated, keep_prob : 1})
return avg_dist/n | [
"vae.autoencoder_rec_loss",
"tensorflow.InteractiveSession",
"numpy.repeat",
"tensorflow.placeholder",
"tensorflow.train.Saver",
"numpy.asarray",
"numpy.max",
"tensorflow.train.import_meta_graph"
] | [((274, 342), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, dim_img]', 'name': '"""target_img"""'}), "(tf.float32, shape=[None, dim_img], name='target_img')\n", (288, 342), True, 'import tensorflow as tf\n'), ((347, 437), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, mnist_data.NUM_LABELS]', 'name': '"""target_labels"""'}), "(tf.float32, shape=[None, mnist_data.NUM_LABELS], name=\n 'target_labels')\n", (361, 437), True, 'import tensorflow as tf\n'), ((456, 500), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (470, 500), True, 'import tensorflow as tf\n'), ((536, 603), 'vae.autoencoder_rec_loss', 'vae.autoencoder_rec_loss', (['x', 'y', 'dim_img', 'dim_z', 'n_hidden', 'keep_prob'], {}), '(x, y, dim_img, dim_z, n_hidden, keep_prob)\n', (560, 603), False, 'import vae\n'), ((612, 635), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (633, 635), True, 'import tensorflow as tf\n'), ((645, 661), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (659, 661), True, 'import tensorflow as tf\n'), ((670, 743), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["('models/mnist_gan.ckpt-' + model_no + '.meta')"], {}), "('models/mnist_gan.ckpt-' + model_no + '.meta')\n", (696, 743), True, 'import tensorflow as tf\n'), ((1124, 1162), 'numpy.repeat', 'np.repeat', (['[x_sample]', 'repeats'], {'axis': '(0)'}), '([x_sample], repeats, axis=0)\n', (1133, 1162), True, 'import numpy as np\n'), ((1177, 1213), 'numpy.repeat', 'np.repeat', (['y_sample', 'repeats'], {'axis': '(0)'}), '(y_sample, repeats, axis=0)\n', (1186, 1213), True, 'import numpy as np\n'), ((846, 859), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (856, 859), True, 'import numpy as np\n'), ((900, 909), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (906, 909), True, 'import numpy as np\n')] |
import numpy
import pandas
from pathlib import Path
def raw_to_dataframe(raw):
data = numpy.frombuffer(raw, dtype='int16')
data = data.reshape((data.size // 4, 4))
df = pandas.DataFrame(data, columns=['t', 'x', 'y', 'z'])
# Center the curve
shift = len(df) // 2 - df['x'].idxmin()
df = df.reindex(numpy.roll(df.index, shift)).reset_index(drop=True)
df['t'] = df['t'].diff().cumsum() * 1e-6
df.loc[0, 't'] = 0.
df = df.set_index('t')
return df
def read_data(fname):
df = pandas.read_csv(fname, dtype='h')
df['t'] = df['t'].diff().cumsum() * 1e-6
df.loc[0, 't'] = 0.
df = df.set_index('t')
return df
def normalize(df):
df = df.astype(float)
df /= df.iloc[:50].mean()
df -= 1
zone = df[df['x'] < -0.2]
diff = zone.index[-1] - zone.index[0]
t0 = zone.index[0] - diff
t1 = zone.index[-1] + diff
df = df[t0:t1]
df.index -= df.index[0]
return df
def load_data(coin, path: Path = None):
if path is None:
path = Path('data')
if isinstance(path, str):
path = Path(path)
data = []
for fname in path.glob('%s-*.csv' % coin):
df = read_data(fname)
df = normalize(df)
data.append(df)
return data
def axis_features(curve):
diff = 0
low = curve[0]
d0_low = low
for value in curve:
low = min(low, value)
d = value - low
if d > diff:
diff = d
d0_low = low
if value == curve.min():
break
return {'min': curve.min(), 'l0': d0_low, 'd0': diff}
def summary(curve, coin):
x = axis_features(curve['x'])
y = axis_features(curve['y'])
z = axis_features(curve['z'])
summary = {
'coin': coin,
'min_x': x['min'],
'min_y': y['min'],
'min_z': z['min'],
'l0_x': x['l0'],
'l0_y': y['l0'],
'l0_z': z['l0'],
'd0_x': x['d0'],
'd0_y': y['d0'],
'd0_z': z['d0'],
}
return summary
def data_summary(c1, c2):
data = zip(c1 + c2, ['1 €'] * 10 + ['2 €'] * 10)
return pandas.DataFrame([summary(curve, coin) for curve, coin in data])
def compare_min(df):
df.boxplot(['min_x', 'min_y', 'min_z'], by='coin',
layout=(1, 3), figsize=(12, 6))
def compare_d0(df):
df.boxplot(['d0_x', 'd0_y', 'd0_z'], by='coin',
layout=(1, 3), figsize=(12, 6))
def compare_l0(df):
df.boxplot(['l0_x', 'l0_y', 'l0_z'], by='coin',
layout=(1, 3), figsize=(12, 6))
def classify_coin(curve):
features_x = axis_features(curve['x'])
if -0.40 > features_x['min'] > -0.55:
return 1
if -0.55 > features_x['min'] > -0.75:
return 2
return -1
| [
"numpy.roll",
"pandas.read_csv",
"pathlib.Path",
"pandas.DataFrame",
"numpy.frombuffer"
] | [((92, 128), 'numpy.frombuffer', 'numpy.frombuffer', (['raw'], {'dtype': '"""int16"""'}), "(raw, dtype='int16')\n", (108, 128), False, 'import numpy\n'), ((183, 235), 'pandas.DataFrame', 'pandas.DataFrame', (['data'], {'columns': "['t', 'x', 'y', 'z']"}), "(data, columns=['t', 'x', 'y', 'z'])\n", (199, 235), False, 'import pandas\n'), ((520, 553), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'dtype': '"""h"""'}), "(fname, dtype='h')\n", (535, 553), False, 'import pandas\n'), ((1025, 1037), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (1029, 1037), False, 'from pathlib import Path\n'), ((1083, 1093), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (1087, 1093), False, 'from pathlib import Path\n'), ((324, 351), 'numpy.roll', 'numpy.roll', (['df.index', 'shift'], {}), '(df.index, shift)\n', (334, 351), False, 'import numpy\n')] |
import pandas
from scipy.stats import zscore
from IMLearn.learners import MultivariateGaussian
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from typing import NoReturn, Tuple
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
FEATURE_RESPONSE_PEARSON_COR_PLOT_TITLE_FORMAT = "feature:{}, correlation:{}"
FILL_TONEXTY = "tonexty"
CLR_LIGHT_GREY = "lightgrey"
MODE_MARKERS_AND_LINES = "markers+lines"
MODE_LINES = "lines"
PLOT_HEIGHT = 400
TITLE_ATTR = "title"
RESPONSE = 1
FEATURES = 0
COL_BEDROOMS = "bedrooms"
COL_PRICE = "price"
COL_YR_BUILT = "yr_built"
COL_YR_RENOVATED = "yr_renovated"
COL_DATE = "date"
COL_ID = "id"
HOUSE_PRICES_CSV_PATH = "../datasets/house_prices.csv"
Q4_LOWER_ERROR_TITLE = "mean - 2*std"
Q4_UPPER_ERROR_TITLE = "mean + 2*std"
Q4_LINE_TITLE = "Average Loss"
Q4_PLOT_YAXIS_TITLE = "average loss"
Q4_PLOT_XAXIS_TITLE = "percentage of sampled train data"
Q4_PLOT_TITLE = ("Average Loss as a Function of percentage of sampled train "
"data")
def load_data(filename: str) -> pd.DataFrame:
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
loaded_data = pd.read_csv(filename)
loaded_data = clean_data(loaded_data)
return loaded_data
def clean_data(X: pd.DataFrame) -> pd.DataFrame:
"""
Cleans the given data from errors and extreme anomalies
"""
X = X.drop([COL_ID, COL_DATE, "zipcode"], axis=1)
X = X.apply(pandas.to_numeric, errors='coerce')
X = X.dropna()
X = X.reset_index(drop=True)
# validations
X = X.drop(X[X.price <= 0].index)
X = X.drop(X[X.bedrooms <= 0].index)
X.loc[X[COL_BEDROOMS] == 33, COL_BEDROOMS] = 3
X = X.drop(X[X.bathrooms <= 0].index)
X = X.drop(X[X.floors <= 0].index)
X = X.drop(X[X.sqft_living <= 0].index)
X = X.drop(X[X.sqft_lot <= 0].index)
X = X.drop(X[X.sqft_above <= 0].index)
X = X.drop(X[X.sqft_basement < 0].index) # can be 0 if not exist
X = X.drop(X[X.sqft_living15 <= 0].index)
X = X.drop(X[X.sqft_lot15 <= 0].index)
X = X.drop(X[~X.waterfront.isin([0, 1])].index)
X = X.drop(X[~X.view.isin(range(0, 5))].index)
X = X.drop(X[~X.condition.isin(range(1, 6))].index)
X = X.drop(X[~X.grade.isin(range(1, 14))].index)
X = X.drop(X[~X.yr_built.isin(range(0, 2016))].index)
X = X.drop(X[~X.yr_renovated.isin(range(0, 2016))].index)
return X
def feature_evaluation(X: pd.DataFrame, y: pd.Series,
output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
std_y = np.std(y)
covariance_matrix = MultivariateGaussian().fit(
np.array(pd.concat([X, y], axis=1))).cov_
for feature_index, feature_name in zip(range(X.shape[0]), X):
std_x = np.std(X[feature_name])
p_correlation = covariance_matrix[feature_index, -1] / (std_x * std_y)
go.Figure(
[go.Scatter(x=X[feature_name], y=y, mode="markers")],
layout=go.Layout(
title=FEATURE_RESPONSE_PEARSON_COR_PLOT_TITLE_FORMAT.format(
feature_name, p_correlation),
xaxis={TITLE_ATTR: feature_name},
yaxis={TITLE_ATTR: COL_PRICE},
height=PLOT_HEIGHT)
).write_image(f"{output_path}/{feature_name}.png")
def fit_model_over_data(train_set: Tuple[pd.DataFrame, pd.Series],
test_set: Tuple[pd.DataFrame, pd.Series],
start_p: int, end_p: int = 100,
repetitions: int = 10) -> None:
"""
For every percentage p in <start_p>%, <start_p+1>%, ..., <end_p>%, repeat
the following <repetitions> times:
1) Sample p% of the overall training data
2) Fit linear model (including intercept) over sampled set
3) Test fitted model over test set
4) Store average and variance of loss over test set
Then plot average loss as function of training size with error ribbon of
size (mean-2*std, mean+2*std)
"""
test_features_arr = np.asarray(test_set[FEATURES])
test_response_arr = np.asarray(test_set[RESPONSE])
p_values = tuple(range(start_p, end_p + 1))
loss_values = list()
var_loss = list()
for p in p_values:
train_concat = pd.concat(
[train_set[FEATURES], train_set[RESPONSE]], axis=1)
p_loss_values = list()
for i in range(repetitions):
train_sample = train_concat.sample(frac=(p / 100))
model = LinearRegression()
model.fit(train_sample.drop([COL_PRICE], axis=1),
train_sample[COL_PRICE])
p_loss_values.append(
model.loss(test_features_arr, test_response_arr))
loss_values.append(np.mean(p_loss_values, axis=0))
var_loss.append(np.std(p_loss_values, axis=0))
go.Figure([
go.Scatter(x=p_values, y=loss_values, name=Q4_LINE_TITLE,
showlegend=True, mode=MODE_MARKERS_AND_LINES),
go.Scatter(x=p_values,
y=np.array(loss_values) + 2 * np.array(var_loss),
fill=FILL_TONEXTY, mode=MODE_LINES,
line=dict(color=CLR_LIGHT_GREY),
showlegend=False, name=Q4_UPPER_ERROR_TITLE),
go.Scatter(x=p_values,
y=np.array(loss_values) - 2 * np.array(var_loss),
fill=None, mode=MODE_LINES, line=dict(color=CLR_LIGHT_GREY),
showlegend=False, name=Q4_LOWER_ERROR_TITLE)
],
layout=go.Layout(
title=Q4_PLOT_TITLE,
xaxis={TITLE_ATTR: Q4_PLOT_XAXIS_TITLE},
yaxis={TITLE_ATTR: Q4_PLOT_YAXIS_TITLE},
height=PLOT_HEIGHT)
).show()
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
data = load_data(HOUSE_PRICES_CSV_PATH)
# Question 2 - Feature evaluation with respect to response
feature_evaluation(data.drop([COL_PRICE], axis=1), data[COL_PRICE])
# Question 3 - Split samples into training- and testing sets.
(train_features, train_prices,
test_features, test_prices) = split_train_test(
data.drop([COL_PRICE], axis=1), data[COL_PRICE], 0.75)
# Question 4 - Fit model over increasing percentages of the overall
# training data.
fit_model_over_data((train_features, train_prices),
(test_features, test_prices), 10)
| [
"numpy.mean",
"plotly.graph_objects.Layout",
"pandas.read_csv",
"numpy.asarray",
"IMLearn.learners.regressors.LinearRegression",
"numpy.array",
"plotly.graph_objects.Scatter",
"numpy.random.seed",
"numpy.std",
"pandas.concat",
"IMLearn.learners.MultivariateGaussian"
] | [((1514, 1535), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (1525, 1535), True, 'import pandas as pd\n'), ((3467, 3476), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (3473, 3476), True, 'import numpy as np\n'), ((4928, 4958), 'numpy.asarray', 'np.asarray', (['test_set[FEATURES]'], {}), '(test_set[FEATURES])\n', (4938, 4958), True, 'import numpy as np\n'), ((4983, 5013), 'numpy.asarray', 'np.asarray', (['test_set[RESPONSE]'], {}), '(test_set[RESPONSE])\n', (4993, 5013), True, 'import numpy as np\n'), ((6644, 6661), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (6658, 6661), True, 'import numpy as np\n'), ((3663, 3686), 'numpy.std', 'np.std', (['X[feature_name]'], {}), '(X[feature_name])\n', (3669, 3686), True, 'import numpy as np\n'), ((5157, 5218), 'pandas.concat', 'pd.concat', (['[train_set[FEATURES], train_set[RESPONSE]]'], {'axis': '(1)'}), '([train_set[FEATURES], train_set[RESPONSE]], axis=1)\n', (5166, 5218), True, 'import pandas as pd\n'), ((5385, 5403), 'IMLearn.learners.regressors.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5401, 5403), False, 'from IMLearn.learners.regressors import LinearRegression\n'), ((5642, 5672), 'numpy.mean', 'np.mean', (['p_loss_values'], {'axis': '(0)'}), '(p_loss_values, axis=0)\n', (5649, 5672), True, 'import numpy as np\n'), ((5698, 5727), 'numpy.std', 'np.std', (['p_loss_values'], {'axis': '(0)'}), '(p_loss_values, axis=0)\n', (5704, 5727), True, 'import numpy as np\n'), ((3502, 3524), 'IMLearn.learners.MultivariateGaussian', 'MultivariateGaussian', ([], {}), '()\n', (3522, 3524), False, 'from IMLearn.learners import MultivariateGaussian\n'), ((3547, 3572), 'pandas.concat', 'pd.concat', (['[X, y]'], {'axis': '(1)'}), '([X, y], axis=1)\n', (3556, 3572), True, 'import pandas as pd\n'), ((5754, 5861), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'p_values', 'y': 'loss_values', 'name': 'Q4_LINE_TITLE', 'showlegend': '(True)', 'mode': 'MODE_MARKERS_AND_LINES'}), '(x=p_values, y=loss_values, name=Q4_LINE_TITLE, showlegend=True,\n mode=MODE_MARKERS_AND_LINES)\n', (5764, 5861), True, 'import plotly.graph_objects as go\n'), ((6416, 6552), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'title': 'Q4_PLOT_TITLE', 'xaxis': '{TITLE_ATTR: Q4_PLOT_XAXIS_TITLE}', 'yaxis': '{TITLE_ATTR: Q4_PLOT_YAXIS_TITLE}', 'height': 'PLOT_HEIGHT'}), '(title=Q4_PLOT_TITLE, xaxis={TITLE_ATTR: Q4_PLOT_XAXIS_TITLE},\n yaxis={TITLE_ATTR: Q4_PLOT_YAXIS_TITLE}, height=PLOT_HEIGHT)\n', (6425, 6552), True, 'import plotly.graph_objects as go\n'), ((3799, 3849), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'X[feature_name]', 'y': 'y', 'mode': '"""markers"""'}), "(x=X[feature_name], y=y, mode='markers')\n", (3809, 3849), True, 'import plotly.graph_objects as go\n'), ((5930, 5951), 'numpy.array', 'np.array', (['loss_values'], {}), '(loss_values)\n', (5938, 5951), True, 'import numpy as np\n'), ((6202, 6223), 'numpy.array', 'np.array', (['loss_values'], {}), '(loss_values)\n', (6210, 6223), True, 'import numpy as np\n'), ((5958, 5976), 'numpy.array', 'np.array', (['var_loss'], {}), '(var_loss)\n', (5966, 5976), True, 'import numpy as np\n'), ((6230, 6248), 'numpy.array', 'np.array', (['var_loss'], {}), '(var_loss)\n', (6238, 6248), True, 'import numpy as np\n')] |
import random
import os
import numpy as np
from typing import List, Tuple, Dict, Set, Optional
def print_grid(grid: list):
for row in grid:
row_str = " ".join([str(el) for el in row])
print(row_str)
def txt_to_grid(file_name, simple_layout=False, use_curr_workspace=False):
if use_curr_workspace:
workspace_path = "\\".join(os.getcwd().split("\\")[:-1])
file_name = workspace_path + "/Benchmark/maps/" + file_name
grid = None
with open(file_name) as f:
curr_line = f.readline()
width = len(curr_line) - 3 # Note that '\n' is included
# print(width)
grid = []
while curr_line:
curr_line = f.readline()
if curr_line[1] == "#":
break
curr_row = []
for i in range(1, len(curr_line)-2):
if curr_line[i] == " ":
curr_row.append(0)
else:
if simple_layout:
curr_row.append(1)
else:
curr_row.append(int(curr_line[i]))
grid.append(curr_row)
return grid
class UniformRandomGrid:
def __init__(self):
abs_path = os.path.dirname(os.path.abspath(__file__))
file_name = abs_path + "/maps/droppoff_grid.txt"
self._static_grid = np.array(txt_to_grid(file_name))
def get_uniform_random_grid(self, shape: Tuple[int, int], num_pickup_locs):
# workspace_path = "\\".join(os.getcwd().split("\\")[:-1])
static_grid = self._static_grid
assert num_pickup_locs < shape[0] * shape[1], "num_pickup_locs must be less than number of elements in created grid"
rand_grid = np.zeros(shape, dtype=int)
y_len, x_len = shape[0], shape[1]
curr_no_locs = 0
while curr_no_locs < num_pickup_locs:
y = np.random.randint(0, y_len)
x = np.random.randint(0, x_len)
if rand_grid[y][x] == 0:
rand_grid[y][x] = 1
curr_no_locs += 1
grid = np.concatenate([static_grid, rand_grid], axis=1)
# print(np.count_nonzero(grid))
return grid # .tolist()
def get_uniform_random_grid(shape: Tuple[int, int], num_pickup_locs):
# workspace_path = "\\".join(os.getcwd().split("\\")[:-1])
abs_path = os.path.dirname(os.path.abspath(__file__))
file_name = abs_path + "/maps/droppoff_grid.txt"
static_grid = np.array(txt_to_grid(file_name))
assert num_pickup_locs < shape[0] * shape[1], "num_pickup_locs must be less than number of elements in created grid"
rand_grid = np.zeros(shape, dtype=int)
y_len, x_len = shape[0], shape[1]
curr_no_locs = 0
while curr_no_locs < num_pickup_locs:
y = np.random.randint(0, y_len)
x = np.random.randint(0, x_len)
if rand_grid[y][x] == 0:
rand_grid[y][x] = 1
curr_no_locs += 1
grid = np.concatenate([static_grid, rand_grid], axis=1)
# print(np.count_nonzero(grid))
return grid.tolist()
def get_pickup_points(grid):
for y in range(grid):
for x in range(grid[0]):
pass
def get_dropoff_points(grid):
pass
def get_rand_valid_point(grid):
x, y = -1, -1
valid_coord_found = False
while not valid_coord_found:
x = random.randint(0, len(grid[0])-1)
y = random.randint(0, len(grid)-1)
if grid[y][x] == 0:
valid_coord_found = True
return x, y
def main():
grid = txt_to_grid("maps/map_warehouse_1.txt", simple_layout=False)
# 5 width
dropoff_grid = [row[:6] for row in grid]
with open("maps/droppoff_grid.txt", "w") as f:
f.write("#" * (2+len(dropoff_grid[0])) + "\n")
for row in dropoff_grid:
row_str = "".join([str(el) for el in row])
row_str = "#" + row_str + "#\n"
f.write(row_str)
f.write("#" * (2+len(dropoff_grid[0])) + "\n")
pass
if __name__ == "__main__":
# main()
grid = txt_to_grid("maps/droppoff_grid.txt")
print(len(grid), len(grid[0]))
# 560
# 22 * 45
urg = UniformRandomGrid()
rand_grid = urg.get_uniform_random_grid((22, 44), 560)
print(rand_grid)
from GlobalObjs.GraphNX import GridGraph, plot_graph
grid_graph = GridGraph(rand_grid, only_full_G=True)
plot_graph(grid_graph.get_full_G(), "G.png")
# grid = txt_to_grid("maps/map_warehouse.txt", simple_layout=True)
# with open("tmp.txt", "w+") as f:
# f.write("[\n")
# for row in grid:
# f.write(str(row) + ",\n")
# f.write("]")
# print(grid)
# x, y = get_rand_valid_point(grid)
# print(x, y)
| [
"os.getcwd",
"GlobalObjs.GraphNX.GridGraph",
"numpy.zeros",
"numpy.random.randint",
"numpy.concatenate",
"os.path.abspath"
] | [((2628, 2654), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'int'}), '(shape, dtype=int)\n', (2636, 2654), True, 'import numpy as np\n'), ((2942, 2990), 'numpy.concatenate', 'np.concatenate', (['[static_grid, rand_grid]'], {'axis': '(1)'}), '([static_grid, rand_grid], axis=1)\n', (2956, 2990), True, 'import numpy as np\n'), ((4296, 4334), 'GlobalObjs.GraphNX.GridGraph', 'GridGraph', (['rand_grid'], {'only_full_G': '(True)'}), '(rand_grid, only_full_G=True)\n', (4305, 4334), False, 'from GlobalObjs.GraphNX import GridGraph, plot_graph\n'), ((1722, 1748), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'int'}), '(shape, dtype=int)\n', (1730, 1748), True, 'import numpy as np\n'), ((2072, 2120), 'numpy.concatenate', 'np.concatenate', (['[static_grid, rand_grid]'], {'axis': '(1)'}), '([static_grid, rand_grid], axis=1)\n', (2086, 2120), True, 'import numpy as np\n'), ((2360, 2385), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2375, 2385), False, 'import os\n'), ((2768, 2795), 'numpy.random.randint', 'np.random.randint', (['(0)', 'y_len'], {}), '(0, y_len)\n', (2785, 2795), True, 'import numpy as np\n'), ((2808, 2835), 'numpy.random.randint', 'np.random.randint', (['(0)', 'x_len'], {}), '(0, x_len)\n', (2825, 2835), True, 'import numpy as np\n'), ((1244, 1269), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1259, 1269), False, 'import os\n'), ((1878, 1905), 'numpy.random.randint', 'np.random.randint', (['(0)', 'y_len'], {}), '(0, y_len)\n', (1895, 1905), True, 'import numpy as np\n'), ((1922, 1949), 'numpy.random.randint', 'np.random.randint', (['(0)', 'x_len'], {}), '(0, x_len)\n', (1939, 1949), True, 'import numpy as np\n'), ((361, 372), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (370, 372), False, 'import os\n')] |
import cv2
import numpy as np
import math
import os
from time import sleep
def prewitt(nome):
imagem = cv2.imread(nome)
imagemNova = cv2.imread(nome)
colunas = imagem.shape[1]
linhas = imagem.shape[0]
sizeMask = 3
flagStop = sizeMask**2
pixelB = 0
pixelG = 0
pixelR = 0
pixelB2 = 0
pixelG2 = 0
pixelR2 = 0
cont = 0
tam = int(sizeMask - 1)/2
for linha in range(0, linhas):
for coluna in range(0, colunas):
if ((coluna > tam and linha > tam)
and (coluna < colunas - tam and linha < linhas - tam)):
ct = int(coluna - tam)
cT = int(coluna + tam+1)
lt = int(linha - tam)
lT = int(linha + tam+1)
for i in range(lt, lT):
for j in range(ct, cT):
if cont < 3:
pixelB -= imagem[i, j, 0]
pixelG -= imagem[i, j, 1]
pixelR -= imagem[i, j, 2]
elif cont > 5:
pixelB += imagem[i, j, 0]
pixelG += imagem[i, j, 1]
pixelR += imagem[i, j, 2]
cont += 1
cont = 0
for j in range(ct, cT):
for i in range(lt, lT):
if cont < 3:
pixelB2 -= imagem[i, j, 0]
pixelG2 -= imagem[i, j, 1]
pixelR2 -= imagem[i, j, 2]
elif cont > 5:
pixelB2 += imagem[i, j, 0]
pixelG2 += imagem[i, j, 1]
pixelR2 += imagem[i, j, 2]
cont += 1
auxBlue = (np.abs(pixelB) + np.abs(pixelB2))
auxGreen = (np.abs(pixelG) + np.abs(pixelG2))
auxRed = (np.abs(pixelR) + np.abs(pixelR2))
if auxBlue > 255:
auxBlue = 255
elif auxBlue < 0:
auxBlue = 0
if auxGreen > 255:
auxGreen = 255
elif auxGreen < 0:
auxGreen = 0
if auxRed > 255:
auxRed = 255
elif auxRed < 0:
auxRed = 0
imagemNova.itemset((linha, coluna, 0), auxBlue)
imagemNova.itemset((linha, coluna, 1), auxGreen)
imagemNova.itemset((linha, coluna, 2), auxRed)
pixelB = 0
pixelG = 0
pixelR = 0
pixelB2 = 0
pixelG2 = 0
pixelR2 = 0
cont = 0
else:
auxBlue = imagem[linha, coluna, 0]
auxGreen = imagem[linha, coluna, 1]
auxRed = imagem[linha, coluna, 2]
azul = auxBlue
verde = auxGreen
vermelho = auxRed
imagemNova.itemset((linha, coluna, 0), azul)
imagemNova.itemset((linha, coluna, 1), verde)
imagemNova.itemset((linha, coluna, 2), vermelho)
cv2.imwrite("ImagemcomPrewitt.png", imagemNova)
def logInverso(constante, nome):
imagem = cv2.imread(nome)
imagemNova = cv2.imread(nome)
colunas = imagem.shape[1]
linhas = imagem.shape[0]
for coluna in range(0, colunas):
for linha in range(0, linhas):
auxBlue = imagem[linha, coluna, 0]
auxGreen = imagem[linha, coluna, 1]
auxRed = imagem[linha, coluna, 2]
azul = 10 ** (auxBlue/constante)
azul = float("%.0f" % azul)
verde = 10 ** (auxGreen/constante)
verde = float("%.0f" % verde)
vermelho = 10 ** (auxRed/constante)
vermelho = float("%.0f" % vermelho)
imagemNova.itemset((linha, coluna, 0), azul)
imagemNova.itemset((linha, coluna, 1), verde)
imagemNova.itemset((linha, coluna, 2), vermelho)
cv2.imwrite("ImagemcomLogInverso.png", imagemNova)
def logaritmo(constante, nome):
imagem = cv2.imread(nome)
imagemNova = cv2.imread(nome)
colunas = imagem.shape[1]
linhas = imagem.shape[0]
for coluna in range(0, colunas):
for linha in range(0, linhas):
auxBlue = imagem[linha, coluna, 0]
auxGreen = imagem[linha, coluna, 1]
auxRed = imagem[linha, coluna, 2]
azul = constante * (math.log10(1 + auxBlue))
verde = constante * (math.log10(1 + auxGreen))
vermelho = constante * (math.log10(1 + auxRed))
azul = float("%.0f" % azul)
verde = float("%.0f" % verde)
vermelho = float("%.0f" % vermelho)
imagemNova.itemset((linha, coluna, 0), azul)
imagemNova.itemset((linha, coluna, 1), verde)
imagemNova.itemset((linha, coluna, 2), vermelho)
cv2.imwrite("ImagemComLogaritmo.png", imagemNova)
def negativo(nivelDeCinza, nome):
imagem = cv2.imread(nome)
imagemNova = cv2.imread(nome)
colunas = imagem.shape[1]
linhas = imagem.shape[0]
for coluna in range(0, colunas):
for linha in range(0, linhas):
auxBlue = imagem[linha, coluna, 0]
auxGreen = imagem[linha, coluna, 1]
auxRed = imagem[linha, coluna, 2]
azul = nivelDeCinza - auxBlue
verde = nivelDeCinza - auxGreen
vermelho = nivelDeCinza - auxRed
imagemNova.itemset((linha, coluna, 0), azul)
imagemNova.itemset((linha, coluna, 1), verde)
imagemNova.itemset((linha, coluna, 2), vermelho)
cv2.imwrite("ImagemNegativada.png", imagemNova)
def changefiltro(filtronome, nomefoto):
if filtronome == 'negativo':
negativo(256, nomefoto)
elif filtronome == 'logaritmo':
logaritmo(105.88, nomefoto)
elif filtronome == 'logInverso':
logInverso(105.88, nomefoto)
elif filtronome == 'prewitt':
prewitt(nomefoto)
else:
print("nada")
| [
"cv2.imwrite",
"math.log10",
"cv2.imread",
"numpy.abs"
] | [((110, 126), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (120, 126), False, 'import cv2\n'), ((145, 161), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (155, 161), False, 'import cv2\n'), ((3281, 3328), 'cv2.imwrite', 'cv2.imwrite', (['"""ImagemcomPrewitt.png"""', 'imagemNova'], {}), "('ImagemcomPrewitt.png', imagemNova)\n", (3292, 3328), False, 'import cv2\n'), ((3378, 3394), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (3388, 3394), False, 'import cv2\n'), ((3413, 3429), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (3423, 3429), False, 'import cv2\n'), ((4160, 4210), 'cv2.imwrite', 'cv2.imwrite', (['"""ImagemcomLogInverso.png"""', 'imagemNova'], {}), "('ImagemcomLogInverso.png', imagemNova)\n", (4171, 4210), False, 'import cv2\n'), ((4259, 4275), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (4269, 4275), False, 'import cv2\n'), ((4294, 4310), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (4304, 4310), False, 'import cv2\n'), ((5077, 5126), 'cv2.imwrite', 'cv2.imwrite', (['"""ImagemComLogaritmo.png"""', 'imagemNova'], {}), "('ImagemComLogaritmo.png', imagemNova)\n", (5088, 5126), False, 'import cv2\n'), ((5177, 5193), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (5187, 5193), False, 'import cv2\n'), ((5212, 5228), 'cv2.imread', 'cv2.imread', (['nome'], {}), '(nome)\n', (5222, 5228), False, 'import cv2\n'), ((5820, 5867), 'cv2.imwrite', 'cv2.imwrite', (['"""ImagemNegativada.png"""', 'imagemNova'], {}), "('ImagemNegativada.png', imagemNova)\n", (5831, 5867), False, 'import cv2\n'), ((4622, 4645), 'math.log10', 'math.log10', (['(1 + auxBlue)'], {}), '(1 + auxBlue)\n', (4632, 4645), False, 'import math\n'), ((4680, 4704), 'math.log10', 'math.log10', (['(1 + auxGreen)'], {}), '(1 + auxGreen)\n', (4690, 4704), False, 'import math\n'), ((4742, 4764), 'math.log10', 'math.log10', (['(1 + auxRed)'], {}), '(1 + auxRed)\n', (4752, 4764), False, 'import math\n'), ((1871, 1885), 'numpy.abs', 'np.abs', (['pixelB'], {}), '(pixelB)\n', (1877, 1885), True, 'import numpy as np\n'), ((1888, 1903), 'numpy.abs', 'np.abs', (['pixelB2'], {}), '(pixelB2)\n', (1894, 1903), True, 'import numpy as np\n'), ((1934, 1948), 'numpy.abs', 'np.abs', (['pixelG'], {}), '(pixelG)\n', (1940, 1948), True, 'import numpy as np\n'), ((1951, 1966), 'numpy.abs', 'np.abs', (['pixelG2'], {}), '(pixelG2)\n', (1957, 1966), True, 'import numpy as np\n'), ((1995, 2009), 'numpy.abs', 'np.abs', (['pixelR'], {}), '(pixelR)\n', (2001, 2009), True, 'import numpy as np\n'), ((2012, 2027), 'numpy.abs', 'np.abs', (['pixelR2'], {}), '(pixelR2)\n', (2018, 2027), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from statsmodels.sandbox.stats.multicomp import multipletests
import regreg.api as rr
from ...api import (randomization,
glm_group_lasso,
multiple_queries)
from ...tests.instance import (gaussian_instance,
logistic_instance)
from ...tests.flags import SMALL_SAMPLES, SET_SEED
from ...tests.decorators import (wait_for_return_value,
set_seed_iftrue,
set_sampling_params_iftrue)
from ..query import naive_confidence_intervals, naive_pvalues
from ..M_estimator import restricted_Mest
from ..cv_view import CV_view
from ..glm import (glm_nonparametric_bootstrap,
pairs_bootstrap_glm)
if SMALL_SAMPLES:
nboot = 10
else:
nboot = -1
@set_seed_iftrue(SET_SEED)
@set_sampling_params_iftrue(SMALL_SAMPLES, burnin=10, ndraw=10)
@wait_for_return_value()
def test_cv(n=100, p=50, s=5, signal=7.5, K=5, rho=0.,
randomizer = 'gaussian',
randomizer_scale = 1.,
scale1 = 0.1,
scale2 = 0.2,
lam_frac = 1.,
glmnet = True,
loss = 'gaussian',
bootstrap = False,
condition_on_CVR = True,
marginalize_subgrad = True,
ndraw = 10000,
burnin = 2000,
nboot = nboot):
print(n,p,s, condition_on_CVR, scale1, scale2)
if randomizer == 'laplace':
randomizer = randomization.laplace((p,), scale=randomizer_scale)
elif randomizer == 'gaussian':
randomizer = randomization.isotropic_gaussian((p,),randomizer_scale)
elif randomizer == 'logistic':
randomizer = randomization.logistic((p,), scale=randomizer_scale)
if loss == "gaussian":
X, y, beta, nonzero, sigma = gaussian_instance(n=n, p=p, s=s, rho=rho, signal=signal, sigma=1)
glm_loss = rr.glm.gaussian(X, y)
elif loss == "logistic":
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
glm_loss = rr.glm.logistic(X, y)
epsilon = 1./np.sqrt(n)
# view 1
cv = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
if glmnet:
try:
cv.solve(glmnet=glmnet)
except ImportError:
cv.solve(glmnet=False)
else:
cv.solve(glmnet=False)
# for the test make sure we also run the python code
cv_py = CV_view(glm_loss,
loss_label=loss,
lasso_randomization=randomizer,
epsilon=epsilon,
scale1=scale1,
scale2=scale2)
cv_py.solve(glmnet=False)
lam = cv.lam_CVR
print("lam", lam)
if condition_on_CVR:
cv.condition_on_opt_state()
lam = cv.one_SD_rule(direction="up")
print("new lam", lam)
# non-randomized Lasso, just looking how many vars it selects
problem = rr.simple_problem(glm_loss, rr.l1norm(p, lagrange=lam))
beta_hat = problem.solve()
active_hat = beta_hat !=0
print("non-randomized lasso ", active_hat.sum())
# view 2
W = lam_frac * np.ones(p) * lam
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
M_est = glm_group_lasso(glm_loss, epsilon, penalty, randomizer)
if nboot > 0:
cv.nboot = M_est.nboot = nboot
mv = multiple_queries([cv, M_est])
mv.solve()
active_union = M_est._overall
nactive = np.sum(active_union)
print("nactive", nactive)
if nactive==0:
return None
nonzero = np.where(beta)[0]
if set(nonzero).issubset(np.nonzero(active_union)[0]):
active_set = np.nonzero(active_union)[0]
true_vec = beta[active_union]
if marginalize_subgrad == True:
M_est.decompose_subgradient(conditioning_groups=np.zeros(p, bool),
marginalizing_groups=np.ones(p, bool))
selected_features = np.zeros(p, np.bool)
selected_features[active_set] = True
unpenalized_mle = restricted_Mest(M_est.loss, selected_features)
form_covariances = glm_nonparametric_bootstrap(n, n)
target_info, target_observed = pairs_bootstrap_glm(M_est.loss, selected_features, inactive=None)
cov_info = M_est.setup_sampler()
target_cov, score_cov = form_covariances(target_info,
cross_terms=[cov_info],
nsample=M_est.nboot)
opt_sample = M_est.sampler.sample(ndraw,
burnin)
pvalues = M_est.sampler.coefficient_pvalues(unpenalized_mle,
target_cov,
score_cov,
parameter=np.zeros(selected_features.sum()),
sample=opt_sample)
intervals = M_est.sampler.confidence_intervals(unpenalized_mle, target_cov, score_cov, sample=opt_sample)
L, U = intervals.T
sel_covered = np.zeros(nactive, np.bool)
sel_length = np.zeros(nactive)
LU_naive = naive_confidence_intervals(np.diag(target_cov), target_observed)
naive_covered = np.zeros(nactive, np.bool)
naive_length = np.zeros(nactive)
naive_pvals = naive_pvalues(np.diag(target_cov), target_observed, true_vec)
active_var = np.zeros(nactive, np.bool)
for j in range(nactive):
if (L[j] <= true_vec[j]) and (U[j] >= true_vec[j]):
sel_covered[j] = 1
if (LU_naive[j, 0] <= true_vec[j]) and (LU_naive[j, 1] >= true_vec[j]):
naive_covered[j] = 1
sel_length[j] = U[j]-L[j]
naive_length[j] = LU_naive[j,1]-LU_naive[j,0]
active_var[j] = active_set[j] in nonzero
q = 0.2
BH_desicions = multipletests(pvalues, alpha=q, method="fdr_bh")[0]
return sel_covered, sel_length, naive_pvals, naive_covered, naive_length, active_var, BH_desicions, active_var
| [
"numpy.sqrt",
"numpy.ones",
"regreg.api.l1norm",
"statsmodels.sandbox.stats.multicomp.multipletests",
"numpy.where",
"numpy.diag",
"regreg.api.glm.logistic",
"numpy.sum",
"regreg.api.glm.gaussian",
"numpy.zeros",
"numpy.nonzero",
"numpy.arange"
] | [((3690, 3710), 'numpy.sum', 'np.sum', (['active_union'], {}), '(active_union)\n', (3696, 3710), True, 'import numpy as np\n'), ((1940, 1961), 'regreg.api.glm.gaussian', 'rr.glm.gaussian', (['X', 'y'], {}), '(X, y)\n', (1955, 1961), True, 'import regreg.api as rr\n'), ((2131, 2141), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2138, 2141), True, 'import numpy as np\n'), ((3147, 3173), 'regreg.api.l1norm', 'rr.l1norm', (['p'], {'lagrange': 'lam'}), '(p, lagrange=lam)\n', (3156, 3173), True, 'import regreg.api as rr\n'), ((3368, 3380), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (3377, 3380), True, 'import numpy as np\n'), ((3795, 3809), 'numpy.where', 'np.where', (['beta'], {}), '(beta)\n', (3803, 3809), True, 'import numpy as np\n'), ((4190, 4210), 'numpy.zeros', 'np.zeros', (['p', 'np.bool'], {}), '(p, np.bool)\n', (4198, 4210), True, 'import numpy as np\n'), ((5379, 5405), 'numpy.zeros', 'np.zeros', (['nactive', 'np.bool'], {}), '(nactive, np.bool)\n', (5387, 5405), True, 'import numpy as np\n'), ((5427, 5444), 'numpy.zeros', 'np.zeros', (['nactive'], {}), '(nactive)\n', (5435, 5444), True, 'import numpy as np\n'), ((5554, 5580), 'numpy.zeros', 'np.zeros', (['nactive', 'np.bool'], {}), '(nactive, np.bool)\n', (5562, 5580), True, 'import numpy as np\n'), ((5604, 5621), 'numpy.zeros', 'np.zeros', (['nactive'], {}), '(nactive)\n', (5612, 5621), True, 'import numpy as np\n'), ((5728, 5754), 'numpy.zeros', 'np.zeros', (['nactive', 'np.bool'], {}), '(nactive, np.bool)\n', (5736, 5754), True, 'import numpy as np\n'), ((2091, 2112), 'regreg.api.glm.logistic', 'rr.glm.logistic', (['X', 'y'], {}), '(X, y)\n', (2106, 2112), True, 'import regreg.api as rr\n'), ((3322, 3332), 'numpy.ones', 'np.ones', (['p'], {}), '(p)\n', (3329, 3332), True, 'import numpy as np\n'), ((3843, 3867), 'numpy.nonzero', 'np.nonzero', (['active_union'], {}), '(active_union)\n', (3853, 3867), True, 'import numpy as np\n'), ((3895, 3919), 'numpy.nonzero', 'np.nonzero', (['active_union'], {}), '(active_union)\n', (3905, 3919), True, 'import numpy as np\n'), ((5492, 5511), 'numpy.diag', 'np.diag', (['target_cov'], {}), '(target_cov)\n', (5499, 5511), True, 'import numpy as np\n'), ((5658, 5677), 'numpy.diag', 'np.diag', (['target_cov'], {}), '(target_cov)\n', (5665, 5677), True, 'import numpy as np\n'), ((6198, 6246), 'statsmodels.sandbox.stats.multicomp.multipletests', 'multipletests', (['pvalues'], {'alpha': 'q', 'method': '"""fdr_bh"""'}), "(pvalues, alpha=q, method='fdr_bh')\n", (6211, 6246), False, 'from statsmodels.sandbox.stats.multicomp import multipletests\n'), ((3428, 3440), 'numpy.arange', 'np.arange', (['p'], {}), '(p)\n', (3437, 3440), True, 'import numpy as np\n'), ((4062, 4079), 'numpy.zeros', 'np.zeros', (['p', 'bool'], {}), '(p, bool)\n', (4070, 4079), True, 'import numpy as np\n'), ((4143, 4159), 'numpy.ones', 'np.ones', (['p', 'bool'], {}), '(p, bool)\n', (4150, 4159), True, 'import numpy as np\n')] |
"""Hardware interfaces for triggering"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import sys
import numpy as np
from ._utils import verbose_dec, string_types, logger
class ParallelTrigger(object):
"""Parallel port and dummy triggering support.
.. warning:: When using the parallel port, calling
:meth:`expyfun.ExperimentController.start_stimulus`
will automatically invoke a stamping of the 1 trigger, which
will in turn cause a delay equal to that of
``trigger_duration``.
This can effect e.g. :class:`EyelinkController` timing.
Parameters
----------
mode : str
'parallel' for real use. 'dummy', passes all calls.
address : str | int | None
The address to use. On Linux this should be a string path like
``'/dev/parport0'`` (equivalent to None), on Windows it should be an
integer address like ``888`` or ``0x378`` (equivalent to None).
The config variable ``TRIGGER_ADDRESS`` can be used to set this
permanently.
trigger_duration : float
Amount of time (seconds) to leave the trigger high whenever
sending a trigger.
ec : instance of ExperimentController
The ExperimentController.
verbose : bool, str, int, or None
If not None, override default verbose level.
Notes
-----
Parallel port activation is enabled by using the ``trigger_controller``
argument of :class:`expyfun.ExperimentController`.
"""
@verbose_dec
def __init__(self, mode='dummy', address=None, trigger_duration=0.01,
ec=None, verbose=None):
self.ec = ec
if mode == 'parallel':
if sys.platform.startswith('linux'):
address = '/dev/parport0' if address is None else address
if not isinstance(address, string_types):
raise ValueError('addrss must be a string or None, got %s '
'of type %s' % (address, type(address)))
from parallel import Parallel
logger.info('Expyfun: Using address %s' % (address,))
self._port = Parallel(address)
self._portname = address
self._set_data = self._port.setData
elif sys.platform.startswith('win'):
from ctypes import windll
if not hasattr(windll, 'inpout32'):
raise SystemError(
'Must have inpout32 installed, see:\n\n'
'http://www.highrez.co.uk/downloads/inpout32/')
base = '0x378' if address is None else address
logger.info('Expyfun: Using base address %s' % (base,))
if isinstance(base, string_types):
base = int(base, 16)
if not isinstance(base, int):
raise ValueError('address must be int or None, got %s of '
'type %s' % (base, type(base)))
self._port = windll.inpout32
mask = np.uint8(1 << 5 | 1 << 6 | 1 << 7)
# Use ECP to put the port into byte mode
val = int((self._port.Inp32(base + 0x402) & ~mask) | (1 << 5))
self._port.Out32(base + 0x402, val)
# Now to make sure the port is in output mode we need to make
# sure that bit 5 of the control register is not set
val = int(self._port.Inp32(base + 2) & ~np.uint8(1 << 5))
self._port.Out32(base + 2, val)
self._set_data = lambda data: self._port.Out32(base, data)
self._portname = str(base)
else:
raise NotImplementedError('Parallel port triggering only '
'supported on Linux and Windows')
else: # mode == 'dummy':
self._port = self._portname = None
self._trigger_list = list()
self._set_data = lambda x: (self._trigger_list.append(x)
if x != 0 else None)
self.trigger_duration = trigger_duration
self.mode = mode
def __repr__(self):
return '<ParallelTrigger : %s (%s)>' % (self.mode, self._portname)
def _stamp_trigger(self, trig):
"""Fake stamping."""
self._set_data(int(trig))
self.ec.wait_secs(self.trigger_duration)
self._set_data(0)
def stamp_triggers(self, triggers, delay=None, wait_for_last=True):
"""Stamp a list of triggers with a given inter-trigger delay.
Parameters
----------
triggers : list
No input checking is done, so ensure triggers is a list,
with each entry an integer with fewer than 8 bits (max 255).
delay : float | None
The inter-trigger-onset delay (includes "on" time).
If None, will use twice the trigger duration (50% duty cycle).
wait_for_last : bool
If True, wait for last trigger to be stamped before returning.
"""
if delay is None:
delay = 2 * self.trigger_duration
for ti, trig in enumerate(triggers):
self._stamp_trigger(trig)
if ti < len(triggers) - 1 or wait_for_last:
self.ec.wait_secs(delay - self.trigger_duration)
def close(self):
"""Release hardware interfaces."""
if hasattr(self, '_port'):
del self._port
def __del__(self):
return self.close()
def decimals_to_binary(decimals, n_bits):
"""Convert a sequence of decimal numbers to a sequence of binary numbers.
Parameters
----------
decimals : array-like
Array of integers to convert. Must all be >= 0.
n_bits : array-like
Array of the number of bits to use to represent each decimal number.
Returns
-------
binary : list
Binary representation.
Notes
-----
This function is useful for generating IDs to be stamped using the TDT.
"""
decimals = np.array(decimals, int)
if decimals.ndim != 1 or (decimals < 0).any():
raise ValueError('decimals must be 1D with all nonnegative values')
n_bits = np.array(n_bits, int)
if decimals.shape != n_bits.shape:
raise ValueError('n_bits must have same shape as decimals')
if (n_bits <= 0).any():
raise ValueError('all n_bits must be positive')
binary = list()
for d, b in zip(decimals, n_bits):
if d > 2 ** b - 1:
raise ValueError('cannot convert number {0} using {1} bits'
''.format(d, b))
binary.extend([int(bb) for bb in np.binary_repr(d, b)])
assert len(binary) == n_bits.sum() # make sure we didn't do something dumb
return binary
def binary_to_decimals(binary, n_bits):
"""Convert a sequence of binary numbers to a sequence of decimal numbers.
Parameters
----------
binary : array-like
Array of integers to convert. Must all be 0 or 1.
n_bits : array-like
Array of the number of bits used to represent each decimal number.
Returns
-------
decimals : array-like
Array of integers.
"""
if not np.array_equal(binary, np.array(binary, bool)):
raise ValueError('binary must only contain zeros and ones')
binary = np.array(binary, bool)
if binary.ndim != 1:
raise ValueError('binary must be 1 dimensional')
n_bits = np.atleast_1d(n_bits).astype(int)
if np.any(n_bits <= 0):
raise ValueError('n_bits must all be > 0')
if n_bits.sum() != len(binary):
raise ValueError('the sum of n_bits must be equal to the number of '
'elements in binary')
offset = 0
outs = []
for nb in n_bits:
outs.append(np.sum(binary[offset:offset + nb] *
(2 ** np.arange(nb - 1, -1, -1))))
offset += nb
assert offset == len(binary)
return np.array(outs)
| [
"numpy.uint8",
"sys.platform.startswith",
"numpy.binary_repr",
"numpy.any",
"numpy.array",
"parallel.Parallel",
"numpy.arange",
"numpy.atleast_1d"
] | [((6159, 6182), 'numpy.array', 'np.array', (['decimals', 'int'], {}), '(decimals, int)\n', (6167, 6182), True, 'import numpy as np\n'), ((6323, 6344), 'numpy.array', 'np.array', (['n_bits', 'int'], {}), '(n_bits, int)\n', (6331, 6344), True, 'import numpy as np\n'), ((7460, 7482), 'numpy.array', 'np.array', (['binary', 'bool'], {}), '(binary, bool)\n', (7468, 7482), True, 'import numpy as np\n'), ((7619, 7638), 'numpy.any', 'np.any', (['(n_bits <= 0)'], {}), '(n_bits <= 0)\n', (7625, 7638), True, 'import numpy as np\n'), ((8085, 8099), 'numpy.array', 'np.array', (['outs'], {}), '(outs)\n', (8093, 8099), True, 'import numpy as np\n'), ((1773, 1805), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (1796, 1805), False, 'import sys\n'), ((7354, 7376), 'numpy.array', 'np.array', (['binary', 'bool'], {}), '(binary, bool)\n', (7362, 7376), True, 'import numpy as np\n'), ((7578, 7599), 'numpy.atleast_1d', 'np.atleast_1d', (['n_bits'], {}), '(n_bits)\n', (7591, 7599), True, 'import numpy as np\n'), ((2242, 2259), 'parallel.Parallel', 'Parallel', (['address'], {}), '(address)\n', (2250, 2259), False, 'from parallel import Parallel\n'), ((2370, 2400), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (2393, 2400), False, 'import sys\n'), ((3162, 3196), 'numpy.uint8', 'np.uint8', (['(1 << 5 | 1 << 6 | 1 << 7)'], {}), '(1 << 5 | 1 << 6 | 1 << 7)\n', (3170, 3196), True, 'import numpy as np\n'), ((6781, 6801), 'numpy.binary_repr', 'np.binary_repr', (['d', 'b'], {}), '(d, b)\n', (6795, 6801), True, 'import numpy as np\n'), ((7991, 8016), 'numpy.arange', 'np.arange', (['(nb - 1)', '(-1)', '(-1)'], {}), '(nb - 1, -1, -1)\n', (8000, 8016), True, 'import numpy as np\n'), ((3589, 3605), 'numpy.uint8', 'np.uint8', (['(1 << 5)'], {}), '(1 << 5)\n', (3597, 3605), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import random
import numpy as np
import pandas as pd
import numpy as np
from numpy.linalg import slogdet
import time
from experiment_runner.experiment_runner_v2 import run_experiments
# from PySSM import Matrix, Vector
from PySSM import RBFKernel
from PySSM import IVM, FastIVM
from PySSM import Greedy
from PySSM import Random
from PySSM import SieveStreaming
from PySSM import SieveStreamingPP
from PySSM import ThreeSieves
from PySSM import Salsa
from PySSM import IndependentSetImprovement
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import MinMaxScaler
def pre(cfg):
name = cfg["method"]
sigma = cfg["sigma"]
scale = cfg["scale"]
K = cfg["K"]
kernel = RBFKernel(sigma=sigma,scale=scale)
fastLogDet = FastIVM(K, kernel, 1.0)
if name == "Greedy":
opt = Greedy(K, fastLogDet)
if name == "IndependentSetImprovement":
opt = IndependentSetImprovement(K, fastLogDet)
elif name == "Random":
opt = Random(K, fastLogDet, cfg["run_id"])
elif name == "SieveStreaming":
e = cfg["epsilon"]
opt = SieveStreaming(K, fastLogDet, 1.0, e)
elif name == "SieveStreaming++":
e = cfg["epsilon"]
opt = SieveStreamingPP(K, fastLogDet, 1.0, e)
elif name == "Salsa":
e = cfg["epsilon"]
opt = Salsa(K, fastLogDet, 1.0, e)
elif name == "ThreeSieves":
e = cfg["epsilon"]
T = cfg["T"]
opt = ThreeSieves(K, fastLogDet, 1.0, e, "sieve", T)
return opt
def fit(cfg, opt):
X = np.load("/home/share/fuerBuschjaeger/threesieves/stream51/stream51.npy")
min_max_scaler = MinMaxScaler()
X = min_max_scaler.fit_transform(X)
# X = cfg["X"]
if cfg["method"] == "Greedy":
opt.fit(X,1)
else:
for x in X:
opt.next(x)
return opt
def post(cfg, opt):
solution_dict = {
"name" : cfg.get("method", None),
"sigma" : cfg.get("sigma", None),
"scale" : cfg.get("scale", None),
"K" : cfg.get("K", None),
"epsilon":cfg.get("epsilon", None),
"T":cfg.get("T", None),
"run_id":cfg.get("run_id", None),
"out_path":cfg.get("out_path", None),
"solution":opt.get_solution(),
"fval":opt.get_fval()
}
np.save(cfg["out_path"],solution_dict,allow_pickle=True)
return {
"fval":opt.get_fval(),
"num_candidate_solutions":opt.get_num_candidate_solutions(),
"num_elements_stored":opt.get_num_elements_stored(),
}
print("Loading data")
X = np.load("/home/share/fuerBuschjaeger/threesieves/stream51/stream51.npy")
Ks = range(5,105,5)
# Ks = [5]
eps = [1e-1, 1e-2] #, 1e-3
Ts = [250, 500, 1000, 1500, 2000, 2500, 5000]
#Sigmas = np.array([0.1, 0.5, 1.0, 2.0, 5.0])*np.sqrt(X.shape[1])
Sigmas = [0.1*np.sqrt(X.shape[1]), 0.25*np.sqrt(X.shape[1])]
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--single", help="Run experiments in a single thread",action="store_true", default=True)
args = parser.parse_args()
if args.single:
basecfg = {
"out_path":"results",
"backend":"local",
"num_cpus":1,
"pre": pre,
"post": post,
"fit": fit,
}
else:
basecfg = {
"out_path":"results",
"backend":"ray",
"address":"192.168.127.12:6379",
"redis_password":"<PASSWORD>",
"num_cpus":1,
"max_memory":4*1024*1024*1024, # 4 GB
"pre": pre,
"post": post,
"fit": fit
}
results = []
runs = []
for K in Ks:
for s in Sigmas:
runs.append(
({
"method": "Greedy",
"K":K,
"sigma":s,
"scale":1
})
)
runs.append(
({
"method": "IndependentSetImprovement",
"K":K,
"sigma":s,
"scale":1
})
)
runs.append(
({
"method": "Random",
"K":K,
"sigma":s,
"scale":1,
"repetitions":5
})
)
for e in eps:
runs.append(
( {
"method": "SieveStreaming",
"K":K,
"sigma":s,
"scale":1,
"epsilon":e
})
)
runs.append(
( {
"method": "SieveStreaming++",
"K":K,
"sigma":s,
"scale":1,
"epsilon":e
})
)
for T in Ts:
runs.append(
( {
"method": "ThreeSieves",
"K":K,
"sigma":s,
"scale":1,
"epsilon":e,
"T":T
})
)
# random.shuffle(runs)
run_experiments(basecfg, runs)
| [
"PySSM.Salsa",
"numpy.sqrt",
"PySSM.SieveStreaming",
"PySSM.RBFKernel",
"PySSM.Random",
"PySSM.Greedy",
"PySSM.IndependentSetImprovement",
"PySSM.FastIVM",
"PySSM.ThreeSieves",
"PySSM.SieveStreamingPP",
"numpy.load",
"sklearn.preprocessing.MinMaxScaler",
"numpy.save",
"experiment_runner.ex... | [((2586, 2658), 'numpy.load', 'np.load', (['"""/home/share/fuerBuschjaeger/threesieves/stream51/stream51.npy"""'], {}), "('/home/share/fuerBuschjaeger/threesieves/stream51/stream51.npy')\n", (2593, 2658), True, 'import numpy as np\n'), ((5077, 5107), 'experiment_runner.experiment_runner_v2.run_experiments', 'run_experiments', (['basecfg', 'runs'], {}), '(basecfg, runs)\n', (5092, 5107), False, 'from experiment_runner.experiment_runner_v2 import run_experiments\n'), ((748, 783), 'PySSM.RBFKernel', 'RBFKernel', ([], {'sigma': 'sigma', 'scale': 'scale'}), '(sigma=sigma, scale=scale)\n', (757, 783), False, 'from PySSM import RBFKernel\n'), ((800, 823), 'PySSM.FastIVM', 'FastIVM', (['K', 'kernel', '(1.0)'], {}), '(K, kernel, 1.0)\n', (807, 823), False, 'from PySSM import IVM, FastIVM\n'), ((1575, 1647), 'numpy.load', 'np.load', (['"""/home/share/fuerBuschjaeger/threesieves/stream51/stream51.npy"""'], {}), "('/home/share/fuerBuschjaeger/threesieves/stream51/stream51.npy')\n", (1582, 1647), True, 'import numpy as np\n'), ((1670, 1684), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1682, 1684), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2321, 2379), 'numpy.save', 'np.save', (["cfg['out_path']", 'solution_dict'], {'allow_pickle': '(True)'}), "(cfg['out_path'], solution_dict, allow_pickle=True)\n", (2328, 2379), True, 'import numpy as np\n'), ((864, 885), 'PySSM.Greedy', 'Greedy', (['K', 'fastLogDet'], {}), '(K, fastLogDet)\n', (870, 885), False, 'from PySSM import Greedy\n'), ((944, 984), 'PySSM.IndependentSetImprovement', 'IndependentSetImprovement', (['K', 'fastLogDet'], {}), '(K, fastLogDet)\n', (969, 984), False, 'from PySSM import IndependentSetImprovement\n'), ((2844, 2863), 'numpy.sqrt', 'np.sqrt', (['X.shape[1]'], {}), '(X.shape[1])\n', (2851, 2863), True, 'import numpy as np\n'), ((2870, 2889), 'numpy.sqrt', 'np.sqrt', (['X.shape[1]'], {}), '(X.shape[1])\n', (2877, 2889), True, 'import numpy as np\n'), ((1026, 1062), 'PySSM.Random', 'Random', (['K', 'fastLogDet', "cfg['run_id']"], {}), "(K, fastLogDet, cfg['run_id'])\n", (1032, 1062), False, 'from PySSM import Random\n'), ((1139, 1176), 'PySSM.SieveStreaming', 'SieveStreaming', (['K', 'fastLogDet', '(1.0)', 'e'], {}), '(K, fastLogDet, 1.0, e)\n', (1153, 1176), False, 'from PySSM import SieveStreaming\n'), ((1255, 1294), 'PySSM.SieveStreamingPP', 'SieveStreamingPP', (['K', 'fastLogDet', '(1.0)', 'e'], {}), '(K, fastLogDet, 1.0, e)\n', (1271, 1294), False, 'from PySSM import SieveStreamingPP\n'), ((1362, 1390), 'PySSM.Salsa', 'Salsa', (['K', 'fastLogDet', '(1.0)', 'e'], {}), '(K, fastLogDet, 1.0, e)\n', (1367, 1390), False, 'from PySSM import Salsa\n'), ((1485, 1531), 'PySSM.ThreeSieves', 'ThreeSieves', (['K', 'fastLogDet', '(1.0)', 'e', '"""sieve"""', 'T'], {}), "(K, fastLogDet, 1.0, e, 'sieve', T)\n", (1496, 1531), False, 'from PySSM import ThreeSieves\n')] |
import numpy as np
import scipy.sparse as sp
def get_sparse_mat(a2b, a2idx, b2idx):
n = len(a2idx)
m = len(b2idx)
assoc = np.zeros((n, m))
for a, b_assoc in a2b.iteritems():
if a not in a2idx:
continue
for b in b_assoc:
if b not in b2idx:
continue
assoc[a2idx[a], b2idx[b]] = 1.
assoc = sp.coo_matrix(assoc)
return assoc
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
| [
"numpy.zeros",
"scipy.sparse.isspmatrix_coo",
"numpy.vstack",
"scipy.sparse.coo_matrix"
] | [((136, 152), 'numpy.zeros', 'np.zeros', (['(n, m)'], {}), '((n, m))\n', (144, 152), True, 'import numpy as np\n'), ((377, 397), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['assoc'], {}), '(assoc)\n', (390, 397), True, 'import scipy.sparse as sp\n'), ((460, 488), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['sparse_mx'], {}), '(sparse_mx)\n', (477, 488), True, 'import scipy.sparse as sp\n'), ((541, 582), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (550, 582), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from scipy.stats import t, spearmanr
from scipy.special import erfinv
from chemprop.uncertainty.uncertainty_calibrator import UncertaintyCalibrator
from chemprop.train import evaluate_predictions
class UncertaintyEvaluator(ABC):
"""
A class for evaluating the effectiveness of uncertainty estimates with metrics.
"""
def __init__(
self,
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
):
self.evaluation_method = evaluation_method
self.calibration_method = calibration_method
self.uncertainty_method = uncertainty_method
self.dataset_type = dataset_type
self.loss_function = loss_function
self.calibrator = calibrator
self.raise_argument_errors()
def raise_argument_errors(self):
"""
Raise errors for incompatibilities between dataset type and uncertainty method, or similar.
"""
if self.dataset_type == "spectra":
raise NotImplementedError(
"No uncertainty evaluators implemented for spectra dataset type."
)
if self.uncertainty_method in ['ensemble', 'dropout'] and self.dataset_type in ['classification', 'multiclass']:
raise NotImplementedError(
'Though ensemble and dropout uncertainty methods are available for classification \
multiclass dataset types, their outputs are not confidences and are not \
compatible with any implemented evaluation methods for classification.'
)
@abstractmethod
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
) -> List[float]:
"""
Evaluate the performance of uncertainty predictions against the model target values.
:param targets: The target values for prediction.
:param preds: The prediction values of a model on the test set.
:param uncertainties: The estimated uncertainty values, either calibrated or uncalibrated, of a model on the test set.
:return: A list of metric values for each model task.
"""
class MetricEvaluator(UncertaintyEvaluator):
"""
A class for evaluating confidence estimates of classification and multiclass datasets using builtin evaluation metrics.
"""
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
return evaluate_predictions(
preds=uncertainties,
targets=targets,
num_tasks=np.array(targets).shape[1],
metrics=[self.evaluation_method],
dataset_type=self.dataset_type,
)[self.evaluation_method]
class NLLRegressionEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values using the mean negative-log-likelihood
of the actual targets given the probability distributions estimated by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
"NLL Regression Evaluator is only for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
if self.calibrator is None: # uncalibrated regression uncertainties are variances
uncertainties = np.array(uncertainties)
preds = np.array(preds)
targets = np.array(targets)
nll = np.log(2 * np.pi * uncertainties) / 2 \
+ (preds - targets) ** 2 / (2 * uncertainties)
return np.mean(nll, axis=0).tolist()
else:
nll = self.calibrator.nll(
preds=preds, unc=uncertainties, targets=targets
) # shape(data, task)
return np.mean(nll, axis=0).tolist()
class NLLClassEvaluator(UncertaintyEvaluator):
"""
A class for evaluating classification uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "classification":
raise ValueError(
"NLL Classification Evaluator is only for classification dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets)
uncertainties = np.array(uncertainties)
likelihood = uncertainties * targets + (1 - uncertainties) * (1 - targets)
nll = -1 * np.log(likelihood)
return np.mean(nll, axis=0).tolist()
class NLLMultiEvaluator(UncertaintyEvaluator):
"""
A class for evaluating multiclass uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "multiclass":
raise ValueError(
"NLL Multiclass Evaluator is only for multiclass dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets, dtype=int) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
nll = np.zeros_like(targets)
for i in range(targets.shape[1]):
task_preds = uncertainties[:, i]
task_targets = targets[:, i] # shape(data)
bin_targets = np.zeros_like(preds[:, 0, :]) # shape(data, classes)
bin_targets[np.arange(targets.shape[0]), task_targets] = 1
task_likelihood = np.sum(bin_targets * task_preds, axis=1)
task_nll = -1 * np.log(task_likelihood)
nll[:, i] = task_nll
return np.mean(nll, axis=0).tolist()
class CalibrationAreaEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values based on how they deviate from perfect
calibration on an observed-probability versus expected-probability plot.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise NotImplementedError(
f"Miscalibration area is only implemented for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
fractions = np.zeros([preds.shape[1], 101]) # shape(tasks, 101)
fractions[:, 100] = 1
if self.calibrator is not None:
# using 101 bin edges, hardcoded
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
original_interval = self.calibrator.interval_percentile
for i in range(1, 100):
self.calibrator.regression_calibrator_metric = "interval"
self.calibrator.interval_percentile = i
self.calibrator.calibrate()
bin_scaling = self.calibrator.scaling
bin_unc = (
uncertainties
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(bin_scaling, axis=0)
) # shape(data, tasks)
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
self.calibrator.interval_percentile = original_interval
else: # uncertainties are uncalibrated variances
std = np.sqrt(uncertainties)
for i in range(1, 100):
bin_scaling = erfinv(i / 100) * np.sqrt(2)
bin_unc = std * bin_scaling
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
# trapezoid rule
auce = np.sum(
0.01 * np.abs(fractions - np.expand_dims(np.arange(101) / 100, axis=0)),
axis=1,
)
return auce.tolist()
class ExpectedNormalizedErrorEvaluator(UncertaintyEvaluator):
"""
A class that evaluates uncertainty performance by binning together clusters of predictions
and comparing the average predicted variance of the clusters against the RMSE of the cluster.
Method discussed in https://doi.org/10.1021/acs.jcim.9b00975.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Expected normalized error is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
sort_record = np.rec.fromarrays([uncertainties, abs_error], names="i, j")
sort_record.sort(axis=0)
uncertainties = sort_record["i"]
abs_error = sort_record["j"]
# get stdev scaling
if self.calibrator is not None:
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
# 100 bins
split_unc = np.array_split(
uncertainties, 100, axis=0
) # shape(list100, data, tasks)
split_error = np.array_split(abs_error, 100, axis=0)
mean_vars = np.zeros([preds.shape[1], 100]) # shape(tasks, 100)
rmses = np.zeros_like(mean_vars)
for i in range(100):
if self.calibrator is None: # starts as a variance
mean_vars[:, i] = np.mean(split_unc[i], axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
elif self.calibration_method == "tscaling": # convert back to sample stdev
bin_unc = split_unc[i] / np.expand_dims(original_scaling, axis=0)
bin_var = t.var(df=self.calibrator.num_models - 1, scale=bin_unc)
mean_vars[:, i] = np.mean(bin_var, axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
else:
self.calibrator.regression_calibrator_metric = "stdev"
self.calibrator.calibrate()
stdev_scaling = self.calibrator.scaling
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
bin_unc = split_unc[i]
bin_unc = (
bin_unc
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(stdev_scaling, axis=0)
) # convert from interval to stdev as needed
mean_vars[:, i] = np.mean(np.square(bin_unc), axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
ence = np.mean(np.abs(mean_vars - rmses) / mean_vars, axis=1)
return ence.tolist()
class SpearmanEvaluator(UncertaintyEvaluator):
"""
Class evaluating uncertainty performance using the spearman rank correlation. Method produces
better scores (closer to 1 in the [-1, 1] range) when the uncertainty values are predictive
of the ranking of prediciton errors.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Spearman rank correlation is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
num_tasks = targets.shape[1]
spearman_coeffs = []
for i in range(num_tasks):
spmn = spearmanr(uncertainties[:, i], abs_error[:, i]).correlation
spearman_coeffs.append(spmn)
return spearman_coeffs
def build_uncertainty_evaluator(
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
) -> UncertaintyEvaluator:
"""
Function that chooses and returns the appropriate :class: `UncertaintyEvaluator` subclass
for the provided arguments.
"""
supported_evaluators = {
"nll": {
"regression": NLLRegressionEvaluator,
"classification": NLLClassEvaluator,
"multiclass": NLLMultiEvaluator,
"spectra": None,
}[dataset_type],
"miscalibration_area": CalibrationAreaEvaluator,
"ence": ExpectedNormalizedErrorEvaluator,
"spearman": SpearmanEvaluator,
}
classification_metrics = [
"auc",
"prc-auc",
"accuracy",
"binary_cross_entropy",
"f1",
"mcc",
]
multiclass_metrics = [
"cross_entropy",
"accuracy",
"f1",
"mcc"
]
if dataset_type == "classification" and evaluation_method in classification_metrics:
evaluator_class = MetricEvaluator
elif dataset_type == "multiclass" and evaluation_method in multiclass_metrics:
evaluator_class = MetricEvaluator
else:
evaluator_class = supported_evaluators.get(evaluation_method, None)
if evaluator_class is None:
raise NotImplementedError(
f"Evaluator type {evaluation_method} is not supported. Avalable options are all calibration/multiclass metrics and {list(supported_evaluators.keys())}"
)
else:
evaluator = evaluator_class(
evaluation_method=evaluation_method,
calibration_method=calibration_method,
uncertainty_method=uncertainty_method,
dataset_type=dataset_type,
loss_function=loss_function,
calibrator=calibrator,
)
return evaluator
| [
"numpy.abs",
"numpy.mean",
"numpy.sqrt",
"numpy.log",
"numpy.zeros_like",
"scipy.special.erfinv",
"numpy.square",
"numpy.array_split",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.expand_dims",
"scipy.stats.t.var",
"scipy.stats.spearmanr",
"numpy.rec.fromarrays",
"numpy.arange"
] | [((4921, 4938), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (4929, 4938), True, 'import numpy as np\n'), ((4963, 4986), 'numpy.array', 'np.array', (['uncertainties'], {}), '(uncertainties)\n', (4971, 4986), True, 'import numpy as np\n'), ((5814, 5842), 'numpy.array', 'np.array', (['targets'], {'dtype': 'int'}), '(targets, dtype=int)\n', (5822, 5842), True, 'import numpy as np\n'), ((5889, 5912), 'numpy.array', 'np.array', (['uncertainties'], {}), '(uncertainties)\n', (5897, 5912), True, 'import numpy as np\n'), ((5929, 5944), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (5937, 5944), True, 'import numpy as np\n'), ((5959, 5981), 'numpy.zeros_like', 'np.zeros_like', (['targets'], {}), '(targets)\n', (5972, 5981), True, 'import numpy as np\n'), ((7158, 7175), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (7166, 7175), True, 'import numpy as np\n'), ((7222, 7245), 'numpy.array', 'np.array', (['uncertainties'], {}), '(uncertainties)\n', (7230, 7245), True, 'import numpy as np\n'), ((7262, 7277), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (7270, 7277), True, 'import numpy as np\n'), ((7298, 7321), 'numpy.abs', 'np.abs', (['(preds - targets)'], {}), '(preds - targets)\n', (7304, 7321), True, 'import numpy as np\n'), ((7365, 7396), 'numpy.zeros', 'np.zeros', (['[preds.shape[1], 101]'], {}), '([preds.shape[1], 101])\n', (7373, 7396), True, 'import numpy as np\n'), ((9854, 9871), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (9862, 9871), True, 'import numpy as np\n'), ((9918, 9941), 'numpy.array', 'np.array', (['uncertainties'], {}), '(uncertainties)\n', (9926, 9941), True, 'import numpy as np\n'), ((9958, 9973), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (9966, 9973), True, 'import numpy as np\n'), ((9994, 10017), 'numpy.abs', 'np.abs', (['(preds - targets)'], {}), '(preds - targets)\n', (10000, 10017), True, 'import numpy as np\n'), ((10063, 10122), 'numpy.rec.fromarrays', 'np.rec.fromarrays', (['[uncertainties, abs_error]'], {'names': '"""i, j"""'}), "([uncertainties, abs_error], names='i, j')\n", (10080, 10122), True, 'import numpy as np\n'), ((10473, 10515), 'numpy.array_split', 'np.array_split', (['uncertainties', '(100)'], {'axis': '(0)'}), '(uncertainties, 100, axis=0)\n', (10487, 10515), True, 'import numpy as np\n'), ((10591, 10629), 'numpy.array_split', 'np.array_split', (['abs_error', '(100)'], {'axis': '(0)'}), '(abs_error, 100, axis=0)\n', (10605, 10629), True, 'import numpy as np\n'), ((10651, 10682), 'numpy.zeros', 'np.zeros', (['[preds.shape[1], 100]'], {}), '([preds.shape[1], 100])\n', (10659, 10682), True, 'import numpy as np\n'), ((10720, 10744), 'numpy.zeros_like', 'np.zeros_like', (['mean_vars'], {}), '(mean_vars)\n', (10733, 10744), True, 'import numpy as np\n'), ((12972, 12989), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (12980, 12989), True, 'import numpy as np\n'), ((13036, 13059), 'numpy.array', 'np.array', (['uncertainties'], {}), '(uncertainties)\n', (13044, 13059), True, 'import numpy as np\n'), ((13076, 13091), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (13084, 13091), True, 'import numpy as np\n'), ((13112, 13135), 'numpy.abs', 'np.abs', (['(preds - targets)'], {}), '(preds - targets)\n', (13118, 13135), True, 'import numpy as np\n'), ((3773, 3796), 'numpy.array', 'np.array', (['uncertainties'], {}), '(uncertainties)\n', (3781, 3796), True, 'import numpy as np\n'), ((3817, 3832), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (3825, 3832), True, 'import numpy as np\n'), ((3855, 3872), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (3863, 3872), True, 'import numpy as np\n'), ((5089, 5107), 'numpy.log', 'np.log', (['likelihood'], {}), '(likelihood)\n', (5095, 5107), True, 'import numpy as np\n'), ((6151, 6180), 'numpy.zeros_like', 'np.zeros_like', (['preds[:, 0, :]'], {}), '(preds[:, 0, :])\n', (6164, 6180), True, 'import numpy as np\n'), ((6306, 6346), 'numpy.sum', 'np.sum', (['(bin_targets * task_preds)'], {'axis': '(1)'}), '(bin_targets * task_preds, axis=1)\n', (6312, 6346), True, 'import numpy as np\n'), ((8612, 8634), 'numpy.sqrt', 'np.sqrt', (['uncertainties'], {}), '(uncertainties)\n', (8619, 8634), True, 'import numpy as np\n'), ((5123, 5143), 'numpy.mean', 'np.mean', (['nll'], {'axis': '(0)'}), '(nll, axis=0)\n', (5130, 5143), True, 'import numpy as np\n'), ((6375, 6398), 'numpy.log', 'np.log', (['task_likelihood'], {}), '(task_likelihood)\n', (6381, 6398), True, 'import numpy as np\n'), ((6447, 6467), 'numpy.mean', 'np.mean', (['nll'], {'axis': '(0)'}), '(nll, axis=0)\n', (6454, 6467), True, 'import numpy as np\n'), ((8251, 8288), 'numpy.mean', 'np.mean', (['(bin_unc >= abs_error)'], {'axis': '(0)'}), '(bin_unc >= abs_error, axis=0)\n', (8258, 8288), True, 'import numpy as np\n'), ((8805, 8842), 'numpy.mean', 'np.mean', (['(bin_unc >= abs_error)'], {'axis': '(0)'}), '(bin_unc >= abs_error, axis=0)\n', (8812, 8842), True, 'import numpy as np\n'), ((10873, 10902), 'numpy.mean', 'np.mean', (['split_unc[i]'], {'axis': '(0)'}), '(split_unc[i], axis=0)\n', (10880, 10902), True, 'import numpy as np\n'), ((12163, 12188), 'numpy.abs', 'np.abs', (['(mean_vars - rmses)'], {}), '(mean_vars - rmses)\n', (12169, 12188), True, 'import numpy as np\n'), ((13279, 13326), 'scipy.stats.spearmanr', 'spearmanr', (['uncertainties[:, i]', 'abs_error[:, i]'], {}), '(uncertainties[:, i], abs_error[:, i])\n', (13288, 13326), False, 'from scipy.stats import t, spearmanr\n'), ((3891, 3924), 'numpy.log', 'np.log', (['(2 * np.pi * uncertainties)'], {}), '(2 * np.pi * uncertainties)\n', (3897, 3924), True, 'import numpy as np\n'), ((4013, 4033), 'numpy.mean', 'np.mean', (['nll'], {'axis': '(0)'}), '(nll, axis=0)\n', (4020, 4033), True, 'import numpy as np\n'), ((4214, 4234), 'numpy.mean', 'np.mean', (['nll'], {'axis': '(0)'}), '(nll, axis=0)\n', (4221, 4234), True, 'import numpy as np\n'), ((6229, 6256), 'numpy.arange', 'np.arange', (['targets.shape[0]'], {}), '(targets.shape[0])\n', (6238, 6256), True, 'import numpy as np\n'), ((8144, 8179), 'numpy.expand_dims', 'np.expand_dims', (['bin_scaling'], {'axis': '(0)'}), '(bin_scaling, axis=0)\n', (8158, 8179), True, 'import numpy as np\n'), ((8701, 8716), 'scipy.special.erfinv', 'erfinv', (['(i / 100)'], {}), '(i / 100)\n', (8707, 8716), False, 'from scipy.special import erfinv\n'), ((8719, 8729), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8726, 8729), True, 'import numpy as np\n'), ((11181, 11236), 'scipy.stats.t.var', 't.var', ([], {'df': '(self.calibrator.num_models - 1)', 'scale': 'bin_unc'}), '(df=self.calibrator.num_models - 1, scale=bin_unc)\n', (11186, 11236), False, 'from scipy.stats import t, spearmanr\n'), ((11271, 11295), 'numpy.mean', 'np.mean', (['bin_var'], {'axis': '(0)'}), '(bin_var, axis=0)\n', (11278, 11295), True, 'import numpy as np\n'), ((8081, 8121), 'numpy.expand_dims', 'np.expand_dims', (['original_scaling'], {'axis': '(0)'}), '(original_scaling, axis=0)\n', (8095, 8121), True, 'import numpy as np\n'), ((10949, 10974), 'numpy.square', 'np.square', (['split_error[i]'], {}), '(split_error[i])\n', (10958, 10974), True, 'import numpy as np\n'), ((11114, 11154), 'numpy.expand_dims', 'np.expand_dims', (['original_scaling'], {'axis': '(0)'}), '(original_scaling, axis=0)\n', (11128, 11154), True, 'import numpy as np\n'), ((11888, 11925), 'numpy.expand_dims', 'np.expand_dims', (['stdev_scaling'], {'axis': '(0)'}), '(stdev_scaling, axis=0)\n', (11902, 11925), True, 'import numpy as np\n'), ((12030, 12048), 'numpy.square', 'np.square', (['bin_unc'], {}), '(bin_unc)\n', (12039, 12048), True, 'import numpy as np\n'), ((2849, 2866), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (2857, 2866), True, 'import numpy as np\n'), ((11342, 11367), 'numpy.square', 'np.square', (['split_error[i]'], {}), '(split_error[i])\n', (11351, 11367), True, 'import numpy as np\n'), ((11825, 11865), 'numpy.expand_dims', 'np.expand_dims', (['original_scaling'], {'axis': '(0)'}), '(original_scaling, axis=0)\n', (11839, 11865), True, 'import numpy as np\n'), ((12104, 12129), 'numpy.square', 'np.square', (['split_error[i]'], {}), '(split_error[i])\n', (12113, 12129), True, 'import numpy as np\n'), ((8991, 9005), 'numpy.arange', 'np.arange', (['(101)'], {}), '(101)\n', (9000, 9005), True, 'import numpy as np\n')] |
"""
Classes for preprocessing input data in various contexts.
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:author: <NAME> (<EMAIL>)
:organization: ETS
"""
import logging
import re
import warnings
import numpy as np
import pandas as pd
from collections import defaultdict
from numpy.random import RandomState
from .container import DataContainer
from .reader import DataReader
from .reporter import Reporter
from .transformer import FeatureTransformer
from .utils.conversion import convert_to_float
from .utils.models import is_built_in_model, is_skll_model
class FeatureSubsetProcessor:
"""
Encapsulate feature sub-setting methods.
"""
@classmethod
def select_by_subset(cls, feature_columns, feature_subset_specs, subset):
"""
Select feature columns using feature subset specs.
Parameters
----------
feature_columns : list
A list of feature columns
feature_subset_specs : pd.DataFrame
The feature subset spec DataFrame.
subset : str
The column to subset.
Returns
-------
feature_names : list
A list of feature names to include.
"""
feature_subset = feature_subset_specs[feature_subset_specs[subset] == 1]['Feature']
feature_names = [feature for feature in feature_columns
if feature in feature_subset.values]
# check whether there are any features in the data file and raise warning
if len(feature_columns) != len(feature_names):
feature_subset_specs_set = set(feature_subset_specs['Feature'])
extra_columns = set(feature_columns).difference(feature_subset_specs_set)
if extra_columns:
logging.warning("No subset information was available for the "
"following columns in the input file. These "
"columns will not be used in the model: "
"{}".format(', '.join(extra_columns)))
if len(feature_subset) != len(feature_names):
extra_subset_features = set(feature_subset).difference(set(feature_names))
if extra_subset_features:
logging.warning("The following features were included into the {} "
"subset in the feature_subset_file but were not "
"specified in the input data: "
"{}".format(subset, ', '.join(extra_subset_features)))
return feature_names
@classmethod
def check_feature_subset_file(cls, df, subset=None, sign=None):
"""
Check that the file is in the correct format and contains all
the requested values. Raises an exception if it finds any errors
but otherwise returns nothing.
Parameters
----------
df : pd.DataFrame
The feature subset file DataFrame.
subset : str, optional
Name of a pre-defined feature subset.
Defaults to None.
sign : str, optional
Value of the sign
Defaults to None.
Raises
------
ValueError
If any columns are missing from the subset file
or if any of the columns contain invalid values.
"""
# we want to allow title-cased names of columns for historical reasons
# e.g., `Feature` instead of `feature` etc.
df_feature_specs = df.copy()
if ('feature' not in df_feature_specs and
'Feature' not in df_feature_specs):
raise ValueError("The feature_subset_file must contain "
"a column named 'feature' "
"containing the feature names.")
if subset:
if subset not in df_feature_specs:
raise ValueError("Unknown value for feature_subset: {}".format(subset))
if not df_feature_specs[subset].isin([0, 1]).all():
raise ValueError("The subset columns in feature "
"file can only contain 0 or 1")
if sign:
possible_sign_columns = ['sign_{}'.format(sign),
'Sign_{}'.format(sign)]
existing_sign_columns = [c for c in possible_sign_columns
if c in df_feature_specs]
if len(existing_sign_columns) > 1:
raise ValueError("The feature_subset_file contains "
"multiple columns for sign: "
"{}".format(' ,'.join(existing_sign_columns)))
elif len(existing_sign_columns) == 0:
raise ValueError("The feature_subset_file must "
"contain the requested "
"sign column 'sign_{}'".format(sign))
else:
sign_column = existing_sign_columns[0]
if not df_feature_specs[sign_column].isin(['-', '+']).all():
raise ValueError("The sign columns in feature "
"file can only contain - or +")
class FeatureSpecsProcessor:
"""
Encapsulate feature file processing methods.
"""
@classmethod
def generate_default_specs(cls, feature_names):
"""
Generate default feature "specifications" for the features
with the given names. The specifications are stored as a data frame with
three columns "feature", "transform", and "sign".
Parameters
----------
feature_names: list
List of feature names for which to generate specifications.
Returns
-------
feature_specs: pandas DataFrame
A dataframe with feature specifications that can be saved as a
:ref:`feature list file <example_feature_csv>`.
Note
----
Since these are default specifications, the values for the
`transform` column for each feature will be `"raw"` and the value
for the `sign` column will be `1`.
"""
df_feature_specs = pd.DataFrame({'feature': feature_names})
df_feature_specs['transform'] = 'raw'
df_feature_specs['sign'] = 1.0
return df_feature_specs
@classmethod
def find_feature_sign(cls, feature, sign_dict):
"""
Get the sign from the feature.csv file
Parameters
----------
feature : str
The name of the feature
sign_dict : dict
A dictionary of feature signs.
Returns
-------
feature_sign_numeric : float
The signed feature.
"""
if feature not in sign_dict.keys():
logging.warning("No information about sign is available "
"for feature {}. The feature will be assigned "
"the default positive weight.".format(feature))
feature_sign_numeric = 1.0
else:
feature_sign_string = sign_dict[feature]
feature_sign_numeric = -1.0 if feature_sign_string == '-' else 1.0
return feature_sign_numeric
@classmethod
def validate_feature_specs(cls, df, use_truncations=False):
"""
Check the supplied feature specs to make sure that there are no duplicate
feature names and that all columns are in the right format. Add the default values
for `transform` and `sign` if none is supplied
Parameters
----------
df : pd.DataFrame
The feature specification DataFrame to validate.
use_truncations : bool, optional
Whether to use truncation values. If this is
True and truncation values are not specified,
raise an error.
Defaults to False.
Returns
------
df_specs_new : pandas DataFrame
A data frame with normalized values
Raises
------
KeyError :
If the data frame does not have a ``feature`` column.
ValueError:
If there are duplicate values in the ``feature`` column
or if the ``sign`` column contains invalid values.
ValueError
If ``use_truncations`` is set to True, and no
``min`` and ``max`` columns exist in the data set.
"""
df_specs_org = df
df_specs_new = df_specs_org.copy()
expected_columns = ['feature', 'sign', 'transform']
# we allow internally the use of 'Feature' since
# this is the column name in subset_feature_file.
if 'Feature' in df_specs_org:
df_specs_new['feature'] = df_specs_org['Feature']
# check that we have a column named `feature`
if 'feature' not in df_specs_new:
raise KeyError("The feature file must contain a "
"column named 'feature'")
# check to make sure that there are no duplicate feature names
feature_name_count = df_specs_new['feature'].value_counts()
duplicate_features = feature_name_count[feature_name_count > 1]
if len(duplicate_features) > 0:
raise ValueError("The following feature names "
" are duplicated in the feature "
"file: {}".format(duplicate_features.index))
# if we have `sign` column, check that it can be converted to float
if 'sign' in df_specs_new:
try:
df_specs_new['sign'] = df_specs_new['sign'].astype(float)
assert np.all(df_specs_new['sign'].isin([-1, 1]))
except (ValueError, AssertionError):
raise ValueError("The `sign` column in the feature"
"file can only contain '1' or '-1'")
else:
df_specs_new['sign'] = 1
if 'transform' not in df_specs_new:
df_specs_new['transform'] = 'raw'
if use_truncations:
if not all(col in df_specs_new for col in ['min', 'max']):
raise ValueError('The ``use_truncation_thresholds`` configuration option '
'was specified, but no ``min`` or ``max`` columns exist '
'in the feature file.')
# add ``min`` and ``max`` to the
# list of expected columns
expected_columns.extend(['min', 'max'])
df_specs_new = df_specs_new[expected_columns]
return df_specs_new
@classmethod
def generate_specs(cls,
df,
feature_names,
train_label,
feature_subset=None,
feature_sign=None):
"""
Generate feature specifications using the features.csv
for sign and the correlation with score to identify
the best transformation.
Parameters
----------
df : pd.DataFrame
The DataFrame form which to generate specs.
feature_names : list
A list of feature names.
train_label : str
The label column for the training data
feature_subset : pd.DataFrame, optional
A feature_subset_specs DataFrame
feature_sign : int, optional
The sign of the feature.
Returns
-------
df_feature_specs : pd.DataFrame
A feature specifications DataFrame
"""
# get feature sign info if available
if feature_sign:
# Convert to dictionary {feature:sign}
sign_dict = dict(zip(feature_subset.Feature,
feature_subset['Sign_{}'.format(feature_sign)]))
# else create an empty dictionary
else:
sign_dict = {}
feature_specs = []
feature_dict = {}
for feature in feature_names:
feature_dict['feature'] = feature
feature_dict['transform'] = FeatureTransformer.find_feature_transform(feature,
df[feature],
df[train_label])
feature_dict['sign'] = FeatureSpecsProcessor.find_feature_sign(feature, sign_dict)
# Change the sign for inverse and addOneInv transformations
if feature_dict['transform'] in ['inv', 'addOneInv']:
feature_dict['sign'] = feature_dict['sign'] * -1
feature_specs.append(feature_dict)
feature_dict = {}
df_feature_specs = pd.DataFrame(feature_specs)
return df_feature_specs
class FeaturePreprocessor:
"""
A class to pre-process training and testing features.
"""
@staticmethod
def check_model_name(model_name):
"""
Check that the given model name is valid and determine its type.
Parameters
----------
model_name : str
Name of the model.
Returns
-------
model_type: str
One of `BUILTIN` or `SKLL`.
Raises
------
ValueError
If the model is not supported.
"""
if is_built_in_model(model_name):
model_type = 'BUILTIN'
elif is_skll_model(model_name):
model_type = 'SKLL'
else:
raise ValueError("The specified model {} "
"was not found. Please "
"check the spelling.".format(model_name))
return model_type
@staticmethod
def trim(values,
trim_min,
trim_max,
tolerance=0.4998):
"""
Trim the values contained in the given numpy array to
`trim_min` - `tolerance` as the floor and
`trim_max` + `tolerance` as the ceiling.
Parameters
----------
values : list or np.array
The values to trim.
trim_min : float
The lowest score on the score point, used for
trimming the raw regression predictions.
trim_max : float
The highest score on the score point, used for
trimming the raw regression predictions.
tolerance : float, optional
The tolerance that will be used to compute the
trim interval. Defaults to 0.4998.
Returns
-------
trimmed_values : np.array
Trimmed values.
"""
if isinstance(values, list):
values = np.array(values)
new_max = trim_max + tolerance
new_min = trim_min - tolerance
trimmed_values = values.copy()
trimmed_values[trimmed_values > new_max] = new_max
trimmed_values[trimmed_values < new_min] = new_min
return trimmed_values
@staticmethod
def remove_outliers(values,
mean=None,
sd=None,
sd_multiplier=4):
"""
Clamp any values in the given numpy array that are
+/- `sd_multiplier` (:math:`m`) standard deviations (:math:`\\sigma`)
away from the mean (:math:`\\mu`). Use given `mean` and `sd` instead
of computing :math:`\\sigma` and :math:`\\mu`, if specified.
The values are clamped to the interval .. math::
[\\mu - m * \\sigma, \\mu + m * \\sigma]
Parameters
----------
values : np.array
The values from which to remove outliers.
mean : int or float, optional
Use the given mean value when computing outliers
instead of the mean from the data.
Defaults to None
sd : None, optional
Use the given std. dev. value when computing
outliers instead of the std. dev. from the
data.
Defaults to None.
sd_multiplier : int, optional
Use the given multipler for the std. dev. when
computing the outliers. Defaults to 4.
Defaults to 4.
Returns
-------
new_values : np.array
Numpy array with the outliers clamped.
"""
# convert data to a numpy float array before doing any clamping
new_values = np.array(values, dtype=np.float)
if not mean:
mean = new_values.mean()
if not sd:
sd = new_values.std()
floor = mean - sd_multiplier * sd
ceiling = mean + sd_multiplier * sd
new_values[new_values > ceiling] = ceiling
new_values[new_values < floor] = floor
return new_values
@staticmethod
def remove_outliers_using_truncations(values,
feature_name,
truncations):
"""
Remove outliers using truncation groups,
rather than calculating the outliers based
on the training set.
Parameters
----------
values : np.array
The values from which to remove outliers.
feature_name : str
Name of the feature whose outliers are
being clamped.
truncations : pd.DataFrame
A data frame with truncation values. The
features should be set as the index.
Returns
-------
new_values : numpy array
Numpy array with the outliers clamped.
"""
# convert data to a numpy float array before doing any clamping
new_values = np.array(values, dtype=np.float)
minimum = truncations.loc[feature_name, 'min']
maximum = truncations.loc[feature_name, 'max']
new_values[new_values > maximum] = maximum
new_values[new_values < minimum] = minimum
return new_values
@staticmethod
def select_candidates(df,
N,
candidate_col='candidate'):
"""
Only select candidates which have responses to N or more items.
Parameters
----------
df : pd.DataFrame
The DataFrame from which to select candidates with N or more items.
N: int
minimal number of items per candidate
candidate_col : str, optional
name of the column which contains candidate ids.
Defaults to 'candidate'.
Returns
-------
df_included: pandas DataFrame
Data frame with responses from candidates with responses to N
or more items
df_excluded: pandas DataFrame
Data frame with responses from candidates with responses to
less than N items
"""
items_per_candidate = df[candidate_col].value_counts()
selected_candidates = items_per_candidate[items_per_candidate >= N]
selected_candidates = selected_candidates.index
df_included = df[df[candidate_col].isin(selected_candidates)].copy()
df_excluded = df[~df[candidate_col].isin(selected_candidates)].copy()
# reset indices
df_included.reset_index(drop=True, inplace=True)
df_excluded.reset_index(drop=True, inplace=True)
return (df_included,
df_excluded)
@staticmethod
def check_subgroups(df, subgroups):
"""
Check that all subgroups, if specified, correspond to columns in the
provided data frame, and replace all NaNs in subgroups values with
'No info' for later convenience. Raises an exception if any specified
subgroup columns are missing.
Parameters
----------
df : pd.DataFrame
DataFrame with subgroups to check.
subgroups : list of str
List of column names that contain grouping
information.
Returns
-------
df : pandas DataFrame
Modified input data frame with NaNs replaced.
Raises
------
KeyError
If the data does not contain columns for all subgroups
"""
missing_sub_cols = set(subgroups).difference(df.columns)
if missing_sub_cols:
raise KeyError("The data does not contain columns "
"for all subgroups specified in the "
"configuration file. Please check for "
"capitalization and other spelling "
"errors and make sure the subgroup "
"names do not contain hyphens. "
"The data does not have columns "
"for the following "
"subgroups: {}".format(', '.join(missing_sub_cols)))
# replace any empty values in subgroups values by "No info"
empty_value = re.compile(r"^\s*$")
df[subgroups] = df[subgroups].replace(to_replace=empty_value,
value='No info')
return df
@staticmethod
def rename_default_columns(df,
requested_feature_names,
id_column,
first_human_score_column,
second_human_score_column,
length_column,
system_score_column,
candidate_column):
"""
Standardize all column names and rename all columns with default
names to ##NAME##.
Parameters
----------
df : pd.DataFrame
The DataFrame whose columns to rename.
requested_feature_names : list
List of feature column names that we want
to include in the scoring model.
id_column : str
Column name containing the response IDs.
first_human_score_column : str or None
Column name containing the H1 scores.
second_human_score_column : str or None
Column name containing the H2 scores.
Should be None if no H2 scores are available.
length_column : str or None
Column name containing response lengths.
Should be None if lengths are not available.
system_score_column : str
Column name containing the score predicted
by the system. This is only used for RSMEval.
candidate_column : str or None
Column name containing identifying information
at the candidate level. Should be None if such
information is not available.
Returns
-------
df : pandas DataFrame
Modified input data frame with all the approximate
re-namings.
"""
df = df.copy()
columns = [id_column,
first_human_score_column,
second_human_score_column,
length_column,
system_score_column,
candidate_column]
defaults = ['spkitemid', 'sc1', 'sc2', 'length', 'raw', 'candidate']
# create a dictionary of name mapping for used columns
name_mapping = dict(filter(lambda t: t[0] is not None, zip(columns,
defaults)))
# find the columns where the names match the default names
correct_defaults = [column for (column, default)
in name_mapping.items()
if column == default]
# find the columns with default names reserved for other columns
# which are not used as features in the model
columns_with_incorrect_default_names = [column for column in df.columns
if (column in defaults and
column not in correct_defaults and
column not in requested_feature_names)]
# rename these columns
if columns_with_incorrect_default_names:
new_column_names = ['##{}##'.format(column) for column
in columns_with_incorrect_default_names]
df.rename(columns=dict(zip(columns_with_incorrect_default_names,
new_column_names)),
inplace=True)
# find the columns where the names do not match the default
columns_with_custom_names = [column for column in name_mapping
if column not in correct_defaults]
# rename the custom-named columns to default values
for column in columns_with_custom_names:
# if the column has already been renamed because it used a
# default name, then use the updated name
if column in columns_with_incorrect_default_names:
df.rename(columns={'##{}##'.format(column):
name_mapping[column]},
inplace=True)
else:
df.rename(columns={column:
name_mapping[column]},
inplace=True)
return df
@staticmethod
def filter_on_column(df,
column,
id_column,
exclude_zeros=False,
exclude_zero_sd=False):
"""
Filter out the rows in the given data frame that contain non-numeric
(or zero, if specified) values in the specified column. Additionally,
it may exclude any columns if they have a standard deviation
(:math:`\\sigma`) of 0.
Parameters
----------
df : pd.DataFrame
The DataFrame to filter on.
column : str
Name of the column from which to filter out values.
id_column : str
Name of the column containing the unique response IDs.
exclude_zeros : bool, optional
Whether to exclude responses containing zeros
in the specified column. Defaults to `False`.
exclude_zero_sd : bool, optional
Whether to perform the additional filtering step of removing
columns that have :math:`\\sigma = 0`. Defaults to `False`.
Returns
-------
df_filtered : pandas DataFrame
Data frame containing the responses that were *not* filtered out.
df_excluded : pandas DataFrame
Data frame containing the non-numeric or zero responses that
were filtered out.
Note
----
The columns with :math:`\\sigma=0` are removed from both output
data frames.
"""
# create a copy of the original data frame
df_filter = df.copy()
# we start out assuming that we will not drop this column
drop_column = False
# return a copy of the original data frame if
# the given column does not exist at all
if column not in df.columns:
return df_filter
# Force convert the label column to numeric and
# convert whatever can't be converted to a NaN
df_filter[column] = pd.to_numeric(df_filter[column],
errors='coerce').astype(float)
# Save the values that have been converted to NaNs
# as a separate data frame. We want to keep them as NaNs
# to do more analyses later. We also filter out inf values.
# Since these can only be generated during transformations,
# we include them with NaNs for consistency.
bad_rows = df_filter[column].isnull() | np.isinf(df_filter[column])
df_bad_rows = df_filter[bad_rows]
# if the column contained only non-numeric values, we need to drop it
if len(df_bad_rows) == len(df_filter):
logging.info(f"Feature {column} was excluded from the model "
f"because it only contains non-numeric values.")
drop_column = True
# now drop the above bad rows containing NaNs from our data frame
df_filter = df_filter[~bad_rows]
# exclude zeros if specified
if exclude_zeros:
zero_rows = df_filter[column] == 0
df_zero_rows = df_filter[zero_rows]
df_filter = df_filter[~zero_rows]
else:
df_zero_rows = pd.DataFrame()
# combine all the filtered rows into a single data frame
df_exclude = pd.concat([df_bad_rows, df_zero_rows], sort=True)
# reset the index so that the indexing works correctly
# for the next feature with missing values
df_filter.reset_index(drop=True, inplace=True)
df_exclude.reset_index(drop=True, inplace=True)
# Check if the the standard deviation equals zero:
# for training set sd == 0 will break normalization.
# We set the tolerance level to the 6th digit
# to account for the possibility that the exact value
# computed by `std()` is not 0
if exclude_zero_sd is True:
feature_sd = df_filter[column].std()
if np.isclose(feature_sd, 0, atol=1e-07):
logging.info(f"Feature {column} was excluded from the model "
f"because its standard deviation in the "
f"training set is equal to 0.")
drop_column = True
# if `drop_column` is true, then we need to drop the column
if drop_column:
df_filter = df_filter.drop(column, axis=1)
df_exclude = df_exclude.drop(column, axis=1)
# return the filtered rows and the new data frame
return (df_filter, df_exclude)
@staticmethod
def process_predictions(df_test_predictions,
train_predictions_mean,
train_predictions_sd,
human_labels_mean,
human_labels_sd,
trim_min,
trim_max,
trim_tolerance=0.4998):
"""
Process predictions to create scaled, trimmed
and rounded predictions.
Parameters
----------
df_test_predictions : pd.DataFrame
Data frame containing the test set predictions.
train_predictions_mean : float
The mean of the predictions on the training set.
train_predictions_sd : float
The std. dev. of the predictions on the training
set.
human_labels_mean : float
The mean of the human scores used to train the
model.
human_labels_sd : float
The std. dev. of the human scores used to train
the model.
trim_min : float
The lowest score on the score point, used for
trimming the raw regression predictions.
trim_max : float
The highest score on the score point, used for
trimming the raw regression predictions.
trim_tolerance: float
Tolerance to be added to trim_max and substracted from
trim_min. Defaults to 0.4998.
Returns
-------
df_pred_processed : pd.DataFrame
Data frame containing the various trimmed
and rounded predictions.
"""
# rescale the test set predictions by boosting
# them to match the human mean and SD
scaled_test_predictions = (df_test_predictions['raw'] -
train_predictions_mean) / train_predictions_sd
scaled_test_predictions = scaled_test_predictions * human_labels_sd + human_labels_mean
df_pred_process = df_test_predictions.copy()
df_pred_process['scale'] = scaled_test_predictions
# trim and round the predictions before running the analyses
df_pred_process['raw_trim'] = FeaturePreprocessor.trim(df_pred_process['raw'],
trim_min,
trim_max,
trim_tolerance)
df_pred_process['raw_trim_round'] = np.rint(df_pred_process['raw_trim'])
df_pred_process['raw_trim_round'] = df_pred_process['raw_trim_round'].astype('int64')
df_pred_process['scale_trim'] = FeaturePreprocessor.trim(df_pred_process['scale'],
trim_min,
trim_max,
trim_tolerance)
df_pred_process['scale_trim_round'] = np.rint(df_pred_process['scale_trim'])
df_pred_process['scale_trim_round'] = df_pred_process['scale_trim_round'].astype('int64')
return df_pred_process
def filter_on_flag_columns(self,
df,
flag_column_dict):
"""
Check that all flag_columns are present in the given
data frame, convert these columns to strings and filter
out the values which do not match the condition in
`flag_column_dict`.
Parameters
----------
df : pd.DataFrame
The DataFrame to filter on.
flag_column_dict : dict
Dictionary containing the flag column
information.
Returns
-------
df_responses_with_requested_flags : pandas DataFrame
Data frame containing the responses remaining
after filtering using the specified flag
columns.
df_responses_with_excluded_flags : pandas DataFrame
Data frame containing the responses filtered
out using the specified flag columns.
Raises
------
KeyError
If the columns listed in the dictionary are
not actually present in the data frame.
ValueError
If no responses remain after filtering based
on the flag column information.
"""
df = df.copy()
flag_columns = list(flag_column_dict.keys())
if not flag_columns:
return df.copy(), pd.DataFrame(columns=df.columns)
else:
# check that all columns are present
missing_flag_columns = set(flag_columns).difference(df.columns)
if missing_flag_columns:
raise KeyError("The data does not contain columns "
"for all flag columns specified in the "
"configuration file. Please check for "
"capitalization and other spelling "
"errors and make sure the flag column "
"names do not contain hyphens. "
"The data does not have the following columns: "
"{}".format(', '.join(missing_flag_columns)))
# since flag column may be a mix of strings and numeric values
# we convert all strings and integers to floats such that, for
# example, “1”, 1, and “1.0" all map to 1.0. To do this, we will
# first convert all the strings to numbers and then convert
# all the integers to floats.
flag_column_dict_to_float = {key: list(map(convert_to_float, value))
for (key, value)
in flag_column_dict.items()}
# and now convert the the values in the feature column
# in the data frame
df_new = df[flag_columns].copy()
df_new = df_new.applymap(convert_to_float)
# identify responses with values which satisfy the condition
full_mask = df_new.isin(flag_column_dict_to_float)
flag_mask = full_mask[list(flag_column_dict_to_float.keys())].all(1)
# return the columns from the original frame that was passed in
# so that all data types remain the same and are not changed
df_responses_with_requested_flags = df[flag_mask].copy()
df_responses_with_excluded_flags = df[~flag_mask].copy()
# make sure that the remaining data frame is not empty
if len(df_responses_with_requested_flags) == 0:
raise ValueError("No responses remaining after filtering "
"on flag columns. No further analysis can "
"be run.")
# reset the index
df_responses_with_requested_flags.reset_index(drop=True,
inplace=True)
df_responses_with_excluded_flags.reset_index(drop=True,
inplace=True)
return (df_responses_with_requested_flags,
df_responses_with_excluded_flags)
def generate_feature_names(self,
df,
reserved_column_names,
feature_subset_specs,
feature_subset):
"""
Generate the feature names from the column
names of the given data frame and select the
specified subset of features.
Parameters
----------
df : pd.DataFrame
The DataFrame from which to generate feature names.
reserved_column_names : list
Names of reserved columns.
feature_subset_specs : pd.DataFrame
Feature subset specs
feature_subset : str
Feature subset column.
Returns
-------
feautre_names : list
A list of features names.
"""
df = df.copy()
# Exclude the reserved names
possible_feature_names = [cname for cname in df.columns
if cname not in reserved_column_names]
# Select the features by subset.
# In the future, we may add option to select
# by other methods, if needed.
if feature_subset is not None:
feature_names = FeatureSubsetProcessor.select_by_subset(possible_feature_names,
feature_subset_specs,
feature_subset)
else:
feature_names = possible_feature_names
return feature_names
def preprocess_feature(self,
values,
feature_name,
feature_transform,
feature_mean,
feature_sd,
exclude_zero_sd=False,
raise_error=True,
truncations=None):
"""
Remove outliers and transform the values in the given numpy array
using the given outlier and transformation parameters. The values
are assumed for the given feature name.
Parameters
----------
values : np.array
The feature values to preprocess
feature_name : str
Name of the feature being pre-processed.
feature_transform : str
Name of the transformation function to apply.
feature_mean : float
Mean value to use for outlier detection instead
of the mean of the given feature values.
feature_sd : float
Std. dev. value to use for outlier detection instead
of the std. dev. of the given feature values.
exclude_zero_sd : bool, optional
Check `data` has a zero
std. dev.
Defaults to False.
raise_error : bool, optional
Raise error if any of the transformations lead to inf values
or may change the ranking of feature values.
Defaults to True.
truncations : pd.DataFrame or None, optional
The truncations set, if we are using pre-defined
truncation values. Otherwise, None.
Defaults to None.
Returns
-------
transformed_feature : numpy array
Numpy array containing the transformed and clamped
feature values.
Raises
------
ValueError
If the given values have zero standard deviation and
`exclude_zero_sd` is set to `True`.
"""
if truncations is not None:
# clamp outlier values using the truncations set
features_no_outliers = self.remove_outliers_using_truncations(values,
feature_name,
truncations)
else:
# clamp any outlier values that are 4 standard deviations
# away from the mean
features_no_outliers = self.remove_outliers(values,
mean=feature_mean,
sd=feature_sd)
# apply the requested transformation to the feature
transformed_feature = FeatureTransformer.transform_feature(features_no_outliers,
feature_name,
feature_transform,
raise_error=raise_error)
# check the standard deviation of the transformed feature
# we set ddof to 1 so that np.std gave the same result as pandas .std
# we also set the tolerance limit to account for cases where std
# is computed as a very low decimal rather than 0
# We only do this for the training set.
if exclude_zero_sd:
feature_sd = np.std(transformed_feature, ddof=1)
if np.isclose(feature_sd, 0, atol=1e-07):
raise ValueError("The standard deviation for "
"feature {} is 0 after pre-processing. "
"Please exclude this feature and re-run "
"the experiment.".format(feature_name))
return transformed_feature
def preprocess_features(self,
df_train,
df_test,
df_feature_specs,
standardize_features=True,
use_truncations=False):
"""
Pre-process those features in the given training and testing
data frame `df` whose specifications are contained in
`feature_specs`. Also return a third data frame containing the
feature specs themselves.
Parameters
----------
df_train : pandas DataFrame
Data frame containing the raw feature values
for the training set.
df_test : pandas DataFrame
Data frame containing the raw feature values
for the test set.
df_feature_specs : pandas DataFrame
Data frame containing the various specifications
from the feature file.
standardize_features : bool, optional
Whether to standardize the features
Defaults to True.
use_truncations : bool, optional
Whether we should use the truncation set
for removing outliers.
Defaults to False.
Returns
-------
df_train_preprocessed : pd.DataFrame
DataFrame with preprocessed training data
df_test_preprocessed : pd.DataFrame
DataFrame with preprocessed test data
df_feature_info : pd.DataFrame
DataFrame with feature information
"""
# keep the original data frames and make copies
# that only include features used in the model
df_train_preprocessed = df_train.copy()
df_test_preprocessed = df_test.copy()
# we also need to create a data frame that includes
# all relevant information about each feature
df_feature_info = pd.DataFrame()
# make feature the index of df_feature_specs
df_feature_specs.index = df_feature_specs['feature']
# if we are should be using truncations, then we create the truncations
# set from the feature specifications
if use_truncations:
truncations = df_feature_specs[['feature', 'min', 'max']].set_index('feature')
else:
truncations = None
# now iterate over each feature
for feature_name in df_feature_specs['feature']:
feature_transformation = df_feature_specs.at[feature_name, 'transform']
feature_sign = df_feature_specs.at[feature_name, 'sign']
train_feature_mean = df_train[feature_name].mean()
train_feature_sd = df_train[feature_name].std()
training_feature_values = df_train[feature_name].values
df_train_preprocessed[feature_name] = self.preprocess_feature(training_feature_values,
feature_name,
feature_transformation,
train_feature_mean,
train_feature_sd,
exclude_zero_sd=True,
truncations=truncations)
testing_feature_values = df_test[feature_name].values
df_test_preprocessed[feature_name] = self.preprocess_feature(testing_feature_values,
feature_name,
feature_transformation,
train_feature_mean,
train_feature_sd,
truncations=truncations)
# Standardize the features using the mean and sd computed on the
# training set. These are computed separately because we need to
# get the mean of transformed feature before standardization.
train_transformed_mean = df_train_preprocessed[feature_name].mean()
train_transformed_sd = df_train_preprocessed[feature_name].std()
if standardize_features:
df_train_without_mean = (df_train_preprocessed[feature_name] -
train_transformed_mean)
df_train_preprocessed[feature_name] = df_train_without_mean / train_transformed_sd
df_test_without_mean = (df_test_preprocessed[feature_name] -
train_transformed_mean)
df_test_preprocessed[feature_name] = df_test_without_mean / train_transformed_sd
# Multiply both train and test feature by sign.
df_train_preprocessed[feature_name] = (df_train_preprocessed[feature_name] *
feature_sign)
df_test_preprocessed[feature_name] = (df_test_preprocessed[feature_name] *
feature_sign)
# update the feature preprocessing metadata frame
df_feature = pd.DataFrame([{"feature": feature_name,
"transform": feature_transformation,
"sign": feature_sign,
"train_mean": train_feature_mean,
"train_sd": train_feature_sd,
"train_transformed_mean": train_transformed_mean,
"train_transformed_sd": train_transformed_sd}])
df_feature_info = df_feature_info.append(df_feature)
# reset the index for the feature metadata frame
# since we built it up row by row
df_feature_info = df_feature_info.reset_index().drop('index', 1)
# return the three data frames
return (df_train_preprocessed,
df_test_preprocessed,
df_feature_info)
def filter_data(self,
df,
label_column,
id_column,
length_column,
second_human_score_column,
candidate_column,
requested_feature_names,
reserved_column_names,
given_trim_min,
given_trim_max,
flag_column_dict,
subgroups,
exclude_zero_scores=True,
exclude_zero_sd=False,
feature_subset_specs=None,
feature_subset=None,
min_candidate_items=None,
use_fake_labels=False):
"""
Filter the data to remove rows that have zero/non-numeric values
for `label_column`. If feature_names are specified, check whether any
features that are specifically requested in `feature_names`
are missing from the data. If no feature_names are specified,
these are generated based on column names and subset information,
if available. The function then excludes non-numeric values for
any feature. If the user requested to exclude candidates with less
than min_items_per_candidates, such candidates are excluded.
It also generates fake labels between 1 and 10 if
`use_fake_parameters` is set to True. Finally, it renames the id
and label column and splits the data into the data frame with
feature values and score label, the data frame with information about
subgroup and candidate (metadata) and the data frame with all other
columns.
Parameters
----------
df : pd.DataFrame
The DataFrame to filter.
label_column : str
The label column in the data.
id_column : str
The ID column in the data.
length_column : str
The length column in the data.
second_human_score_column : str
The second human score column in the data.
candidate_column : str
The candidate column in the data.
requested_feature_names : list
A list of requested feature names.
reserved_column_names : list
A list of reserved column names.
given_trim_min : float
The minimum trim value.
given_trim_max : float
The maximum trim value.
flag_column_dict : dict
A dictionary of flag columns.
subgroups : list, optional
A list of subgroups, if any.
exclude_zero_scores : bool
Whether to exclude zero scores.
Defaults to True.
exclude_zero_sd : bool, optional
Whether to exclude zero standard deviation.
Defaults to False.
feature_subset_specs : pd.DataFrame, optional
The feature_subset_specs DataFrame
Defaults to None.
feature_subset : str, optional
The feature subset group (e.g. 'A').
Defaults to None.
min_candidate_items : int, optional
The minimum number of items needed to include candidate.
Defaults to None
use_fake_labels : bool, optional
Whether to use fake labels.
Defaults to None.
Returns
-------
df_filtered_features : pd.DataFrame
DataFrame with filtered features
df_filtered_metadata : pd.DataFrame
DataFrame with filtered metadata
df_filtered_other_columns : pd.DataFrame
DataFrame with other columns filtered
df_excluded : pd.DataFrame
DataFrame with excluded records
df_filtered_length : pd.DataFrame
DataFrame with length column(s) filtered
df_filtered_human_scores : pd.DataFrame
DataFrame with human scores filtered
df_responses_with_excluded_flags : pd.DataFrame
A DataFrame containing responses with excluded flags
trim_min : float
The maximum trim value
trim_max : float
The minimum trim value
feature_names : list
A list of feature names
"""
# make sure that the columns specified in the
# config file actually exist
columns_to_check = [id_column, label_column]
if length_column:
columns_to_check.append(length_column)
if second_human_score_column:
columns_to_check.append(second_human_score_column)
if candidate_column:
columns_to_check.append(candidate_column)
missing_columns = set(columns_to_check).difference(df.columns)
if missing_columns:
raise KeyError("Columns {} from the config file "
"do not exist in the data.".format(missing_columns))
# it is possible for the `id_column` and `candidate_column` to be
# set to the same column name in the CSV file, e.g., if there is
# only one response per candidate. If this happens, we neeed to
# create a duplicate column for candidates or id for the downstream
# processing to work as usual.
if id_column == candidate_column:
# if the name for both columns is `candidate`, we need to
# create a separate id_column name
if id_column == 'candidate':
df['spkitemid'] = df['candidate'].copy()
id_column = 'spkitemid'
# else we create a separate `candidate` column
else:
df['candidate'] = df[id_column].copy()
candidate_column = 'candidate'
df = self.rename_default_columns(df,
requested_feature_names,
id_column,
label_column,
second_human_score_column,
length_column,
None,
candidate_column)
# check that the id_column contains unique values
if df['spkitemid'].size != df['spkitemid'].unique().size:
raise ValueError("The data contains duplicate response IDs in "
"'{}'. Please make sure all response IDs are "
"unique and re-run the tool.".format(id_column))
# Generate feature names if no specific features were requested by the user
if len(requested_feature_names) == 0:
feature_names = self.generate_feature_names(df,
reserved_column_names,
feature_subset_specs=feature_subset_specs,
feature_subset=feature_subset)
else:
feature_names = requested_feature_names
# make sure that feature names do not contain reserved column names
illegal_feature_names = set(feature_names).intersection(reserved_column_names)
if illegal_feature_names:
raise ValueError("The following reserved "
"column names cannot be "
"used as feature names: '{}'. "
"Please rename these columns "
"and re-run the "
"experiment.".format(', '.join(illegal_feature_names)))
# check to make sure that the subgroup columns are all present
df = FeaturePreprocessor.check_subgroups(df, subgroups)
# filter out the responses based on flag columns
(df_responses_with_requested_flags,
df_responses_with_excluded_flags) = self.filter_on_flag_columns(df, flag_column_dict)
# filter out the rows that have non-numeric or zero labels
# unless we are going to generate fake labels in the first place
if not use_fake_labels:
(df_filtered,
df_excluded) = self.filter_on_column(df_responses_with_requested_flags,
'sc1',
'spkitemid',
exclude_zeros=exclude_zero_scores)
# make sure that the remaining data frame is not empty
if len(df_filtered) == 0:
raise ValueError("No responses remaining after filtering out "
"non-numeric human scores. No further analysis "
"can be run. ")
trim_min = given_trim_min if given_trim_min else df_filtered['sc1'].min()
trim_max = given_trim_max if given_trim_max else df_filtered['sc1'].max()
else:
df_filtered = df_responses_with_requested_flags.copy()
trim_min = given_trim_min if given_trim_min else 1
trim_max = given_trim_max if given_trim_max else 10
logging.info("Generating labels randomly "
"from [{}, {}]".format(trim_min, trim_max))
randgen = RandomState(seed=1234567890)
df_filtered[label_column] = randgen.random_integers(trim_min,
trim_max,
size=len(df_filtered))
# make sure there are no missing features in the data
missing_features = set(feature_names).difference(df_filtered.columns)
if not missing_features:
# make sure all features selected for model building are numeric
# and also replace any non-numeric feature values in already
# excluded data with NaNs for consistency
for feat in feature_names:
df_excluded[feat] = pd.to_numeric(df_excluded[feat],
errors='coerce').astype(float)
newdf, newdf_excluded = self.filter_on_column(df_filtered,
feat,
'spkitemid',
exclude_zeros=False,
exclude_zero_sd=exclude_zero_sd)
del df_filtered
df_filtered = newdf
with np.errstate(divide='ignore'):
df_excluded = pd.concat([df_excluded, newdf_excluded], sort=True)
# make sure that the remaining data frame is not empty
if len(df_filtered) == 0:
raise ValueError("No responses remaining after filtering "
"out non-numeric feature values. No further "
"analysis can be run.")
# Raise warning if we excluded features that were
# specified in the .json file because sd == 0.
omitted_features = set(requested_feature_names).difference(df_filtered.columns)
if omitted_features:
raise ValueError("The following requested features "
"were excluded because their standard "
"deviation on the training set was 0: {}.\n"
"Please edit the feature file to exclude "
"these features and re-run the "
"tool".format(', '.join(omitted_features)))
# Update the feature names
feature_names = [feature for feature in feature_names
if feature in df_filtered]
else:
raise KeyError("DataFrame does not contain "
"columns for all features specified in "
"the feature file. Please check for "
"capitalization and other spelling "
"errors and make sure the feature "
"names do not contain hyphens. "
"The data does not have columns "
"for the following features: "
"{}".format(', '.join(missing_features)))
# if ``length_column`` exists, make sure it's converted to numeric;
# values that cannot be coerced to numeric will be set to ``np.nan``
if length_column:
df_filtered['length'] = pd.to_numeric(df_filtered['length'], errors='coerce')
# check the values for length column. We do this after filtering
# to make sure we have removed responses that have not been
# processed correctly. Else rename length column to
# ##ORIGINAL_NAME##.
if (length_column and
(len(df_filtered[df_filtered['length'].isnull()]) != 0 or
df_filtered['length'].std() <= 0)):
logging.warning("The {} column either has missing values or a standard "
"deviation <= 0. No length-based analysis will be "
"provided. The column will be renamed as ##{}## and "
"saved in *train_other_columns.csv.".format(length_column,
length_column))
df_filtered.rename(columns={'length': '##{}##'.format(length_column)},
inplace=True)
# if requested, exclude the candidates with less than X responses
# left after filtering
if min_candidate_items:
(df_filtered_candidates,
df_excluded_candidates) = FeaturePreprocessor.select_candidates(df_filtered,
min_candidate_items)
# check that there are still responses left for analysis
if len(df_filtered_candidates) == 0:
raise ValueError("After filtering non-numeric scores and "
"non-numeric feature values there were "
"no candidates with {} or more responses "
"left for analysis".format(min_candidate_items))
# redefine df_filtered
df_filtered = df_filtered_candidates.copy()
# update df_excluded
df_excluded = pd.concat([df_excluded, df_excluded_candidates], sort=True)
# create separate data frames for features and sc1, all other
# information, and responses excluded during filtering
not_other_columns = set()
feature_columns = ['spkitemid', 'sc1'] + feature_names
df_filtered_features = df_filtered[feature_columns]
not_other_columns.update(feature_columns)
metadata_columns = ['spkitemid'] + subgroups
if candidate_column:
metadata_columns.append('candidate')
df_filtered_metadata = df_filtered[metadata_columns]
not_other_columns.update(metadata_columns)
df_filtered_length = pd.DataFrame()
length_columns = ['spkitemid', 'length']
if length_column and 'length' in df_filtered:
df_filtered_length = df_filtered[length_columns]
not_other_columns.update(length_columns)
df_filtered_human_scores = pd.DataFrame()
human_score_columns = ['spkitemid', 'sc1', 'sc2']
if second_human_score_column and 'sc2' in df_filtered:
df_filtered_human_scores = df_filtered[human_score_columns].copy()
not_other_columns.update(['sc2'])
# filter out any non-numeric value rows
# as well as zeros, if we were asked to
df_filtered_human_scores['sc2'] = pd.to_numeric(df_filtered_human_scores['sc2'],
errors='coerce').astype(float)
if exclude_zero_scores:
df_filtered_human_scores['sc2'] = df_filtered_human_scores['sc2'].replace(0,
np.nan)
# we need to make sure that `spkitemid` is the first column
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
# now extract all other columns and add 'spkitemid'
other_columns = ['spkitemid'] + [column for column in df_filtered
if column not in not_other_columns]
df_filtered_other_columns = df_filtered[other_columns]
return (df_filtered_features,
df_filtered_metadata,
df_filtered_other_columns,
df_excluded,
df_filtered_length,
df_filtered_human_scores,
df_responses_with_excluded_flags,
trim_min,
trim_max,
feature_names)
def process_data_rsmtool(self, config_obj, data_container_obj):
"""
The main function that sets up the experiment by loading the
training and evaluation data sets and preprocessing them. Raises
appropriate exceptions .
Parameters
----------
config_obj : configuration_parser.Configuration
A configuration object.
data_container_obj : container.DataContainer
A data container object.
Returns
-------
config_obj : configuration_parser.Configuration
A Configuration object.
data_container : container.DataContainer
A DataContainer object.
Raises
------
ValueError
If the columns in the config file do not exist in the data.
"""
train = data_container_obj.train
test = data_container_obj.test
feature_specs = data_container_obj.get_frame('feature_specs')
feature_subset = data_container_obj.get_frame('feature_subset_specs')
configdir = config_obj.configdir
(test_file_location,
train_file_location) = DataReader.locate_files([config_obj['test_file'],
config_obj['train_file']],
configdir)
feature_subset_file = config_obj['feature_subset_file']
if feature_subset_file is not None:
feature_subset_file = DataReader.locate_files(feature_subset_file, configdir)
# get the column name for the labels for the training and testing data
train_label_column = config_obj['train_label_column']
test_label_column = config_obj['test_label_column']
# get the column name that will hold the ID for
# both the training and the test data
id_column = config_obj['id_column']
# get the specified trim min, trim max and trim tolerance values
(spec_trim_min,
spec_trim_max,
spec_trim_tolerance) = config_obj.get_trim_min_max_tolerance()
# get the name of the optional column that
# contains response length.
length_column = config_obj['length_column']
# get the name of the optional column that
# contains the second human score
second_human_score_column = config_obj['second_human_score_column']
# get the name of the optional column that
# contains the candidate ID
candidate_column = config_obj['candidate_column']
# if the test label column is the same as the
# second human score column, raise an error
if test_label_column == second_human_score_column:
raise ValueError("'test_label_column' and "
"'second_human_score_column' cannot have the "
"same value.")
# check if we are excluding candidates based on number of responses
exclude_listwise = config_obj.check_exclude_listwise()
min_items = config_obj['min_items_per_candidate']
# get the name of the model that we want to train and
# check that it's valid
model_name = config_obj['model']
model_type = self.check_model_name(model_name)
# are we excluding zero scores?
exclude_zero_scores = config_obj['exclude_zero_scores']
# should we standardize the features
standardize_features = config_obj['standardize_features']
# if we are excluding zero scores but trim_min
# is set to 0, then we need to warn the user
if exclude_zero_scores and spec_trim_min == 0:
logging.warning("'exclude_zero_scores' is set to True but "
"'trim_min' is set to 0. This may cause "
" unexpected behavior.")
# are we filtering on any other columns?
# is `flag_column` applied to training partition only
# or both partitions?
if 'flag_column_test' in config_obj:
flag_partition = 'train'
else:
flag_partition = 'both'
flag_column_dict = config_obj.check_flag_column(partition=flag_partition)
flag_column_test_dict = config_obj.check_flag_column('flag_column_test',
partition='test')
if (flag_column_dict and not flag_column_test_dict):
flag_column_test_dict = flag_column_dict
# are we generating fake labels?
use_fake_train_labels = train_label_column == 'fake'
use_fake_test_labels = test_label_column == 'fake'
# are we using truncations from the feature specs?
use_truncations = config_obj['use_truncation_thresholds']
# get the subgroups if any
subgroups = config_obj.get('subgroups')
# are there specific general report sections we want to include?
general_report_sections = config_obj['general_sections']
# what about the special or custom sections?
special_report_sections = config_obj['special_sections']
custom_report_section_paths = config_obj['custom_sections']
if custom_report_section_paths and configdir is not None:
logging.info('Locating custom report sections')
custom_report_sections = Reporter.locate_custom_sections(custom_report_section_paths,
configdir)
else:
custom_report_sections = []
section_order = config_obj['section_order']
chosen_notebook_files = Reporter().get_ordered_notebook_files(general_report_sections,
special_report_sections,
custom_report_sections,
section_order,
subgroups,
model_type=model_type,
context='rsmtool')
# Location of feature file
feature_field = config_obj['features']
feature_subset_field = config_obj['feature_subset']
# if the user requested feature_subset file and feature subset,
# read the file and check its format
if feature_subset is not None and feature_subset_field:
FeatureSubsetProcessor.check_feature_subset_file(feature_subset)
# Do we need to automatically find the best transformations/change sign?
select_transformations = config_obj['select_transformations']
feature_sign = config_obj['sign']
requested_features = []
generate_feature_specs_automatically = True
# if the feature field is a list, then simply
# assign it to `requested_features`
if isinstance(feature_field, list):
requested_features = feature_field
elif feature_field is not None:
generate_feature_specs_automatically = False
feature_specs = FeatureSpecsProcessor.validate_feature_specs(feature_specs,
use_truncations)
requested_features = feature_specs['feature'].tolist()
# if we get to this point and both ``generate_feature_specs_automatically``
# and ``use_truncations`` are True, then we need to raise an error
if use_truncations and generate_feature_specs_automatically:
raise ValueError('You have specified the ``use_truncations`` configuration '
'option, but a feature file could not be found.')
# check to make sure that `length_column` or `second_human_score_column`
# are not also included in the requested features, if they are specified
if (length_column and
length_column in requested_features):
raise ValueError("The value of 'length_column' ('{}') cannot be "
"used as a model feature.".format(length_column))
if (second_human_score_column and
second_human_score_column in requested_features):
raise ValueError("The value of 'second_human_score_column' ('{}') cannot be "
"used as a model feature.".format(second_human_score_column))
# Specify column names that cannot be used as features
reserved_column_names = list(set(['spkitemid', 'spkitemlab',
'itemType', 'r1', 'r2', 'score',
'sc', 'sc1', 'adj',
train_label_column,
test_label_column,
id_column] + subgroups + list(flag_column_dict.keys())))
# if `second_human_score_column` is specified, then
# we need to add the original name as well as `sc2` to the list of reserved column
# names. And same for 'length' and 'candidate', if `length_column`
# and `candidate_column` are specified. We add both names to
# simplify things downstream since neither the original name nor
# the standardized name should be used as feature names
if second_human_score_column:
reserved_column_names.append(second_human_score_column)
reserved_column_names.append('sc2')
if length_column:
reserved_column_names.append(length_column)
reserved_column_names.append('length')
if candidate_column:
reserved_column_names.append(candidate_column)
reserved_column_names.append('candidate')
# remove duplicates (if any) from the list of reserved column names
reserved_column_names = list(set(reserved_column_names))
# Make sure that the training data as specified in the
# config file actually exists on disk and if it does,
# load it and filter out the bad rows and features with
# zero standard deviation. Also double check that the requested
# features exist in the data or obtain the feature names if
# no feature file was given.
(df_train_features,
df_train_metadata,
df_train_other_columns,
df_train_excluded,
df_train_length,
_,
df_train_flagged_responses,
used_trim_min,
used_trim_max,
feature_names) = self.filter_data(train,
train_label_column,
id_column,
length_column,
None,
candidate_column,
requested_features,
reserved_column_names,
spec_trim_min,
spec_trim_max,
flag_column_dict,
subgroups,
exclude_zero_scores=exclude_zero_scores,
exclude_zero_sd=True,
feature_subset_specs=feature_subset,
feature_subset=feature_subset_field,
min_candidate_items=min_items,
use_fake_labels=use_fake_train_labels)
# Generate feature specifications now that we know what features to use
if generate_feature_specs_automatically:
if select_transformations:
feature_specs = FeatureSpecsProcessor.generate_specs(df_train_features,
feature_names,
'sc1',
feature_subset=feature_subset,
feature_sign=feature_sign)
else:
feature_specs = FeatureSpecsProcessor.generate_default_specs(feature_names)
# Do the same for the test data except we can ignore the trim min
# and max since we already have that from the training data and
# we have the feature_names when no feature file was specified.
# We also allow features with 0 standard deviation in the test file.
if (test_file_location == train_file_location and
train_label_column == test_label_column):
logging.warning('The same data file and label '
'column are used for both training '
'and evaluating the model. No second '
'score analysis will be performed, even '
'if requested.')
df_test_features = df_train_features.copy()
df_test_metadata = df_train_metadata.copy()
df_test_excluded = df_train_excluded.copy()
df_test_other_columns = df_train_other_columns.copy()
df_test_flagged_responses = df_train_flagged_responses.copy()
df_test_human_scores = pd.DataFrame()
else:
(df_test_features,
df_test_metadata,
df_test_other_columns,
df_test_excluded,
_,
df_test_human_scores,
df_test_flagged_responses,
_, _, _) = self.filter_data(test,
test_label_column,
id_column,
None,
second_human_score_column,
candidate_column,
feature_names,
reserved_column_names,
used_trim_min,
used_trim_max,
flag_column_test_dict,
subgroups,
exclude_zero_scores=exclude_zero_scores,
exclude_zero_sd=False,
min_candidate_items=min_items,
use_fake_labels=use_fake_test_labels)
logging.info('Pre-processing training and test set features')
(df_train_preprocessed_features,
df_test_preprocessed_features,
df_feature_info) = self.preprocess_features(df_train_features,
df_test_features,
feature_specs,
standardize_features,
use_truncations)
# configuration options that either override previous values or are
# entirely for internal use
new_config_obj = config_obj.copy()
internal_options_dict = {'chosen_notebook_files': chosen_notebook_files,
'exclude_listwise': exclude_listwise,
'feature_subset_file': feature_subset_file,
'model_name': model_name,
'model_type': model_type,
'test_file_location': test_file_location,
'train_file_location': train_file_location,
'trim_min': used_trim_min,
'trim_max': used_trim_max}
for key, value in internal_options_dict.items():
new_config_obj[key] = value
new_container = [{'name': 'train_features',
'frame': df_train_features},
{'name': 'test_features',
'frame': df_test_features},
{'name': 'train_preprocessed_features',
'frame': df_train_preprocessed_features},
{'name': 'test_preprocessed_features',
'frame': df_test_preprocessed_features},
{'name': 'train_metadata', 'frame': df_train_metadata},
{'name': 'test_metadata', 'frame': df_test_metadata},
{'name': 'train_other_columns', 'frame': df_train_other_columns},
{'name': 'test_other_columns', 'frame': df_test_other_columns},
{'name': 'train_excluded', 'frame': df_train_excluded},
{'name': 'test_excluded', 'frame': df_test_excluded},
{'name': 'train_length', 'frame': df_train_length},
{'name': 'test_human_scores', 'frame': df_test_human_scores},
{'name': 'train_flagged', 'frame': df_train_flagged_responses},
{'name': 'test_flagged', 'frame': df_test_flagged_responses},
{'name': 'feature_specs', 'frame': feature_specs},
{'name': 'feature_info', 'frame': df_feature_info}]
new_container = DataContainer(new_container)
return new_config_obj, new_container
def process_data_rsmeval(self, config_obj, data_container_obj):
"""
The main function that sets up the experiment by loading the
training and evaluation data sets and preprocessing them. Raises
appropriate exceptions .
Parameters
----------
config_obj : configuration_parser.Configuration
A configuration object.
data_container_obj : container.DataContainer
A data container object.
Returns
-------
config_obj : configuration_parser.Configuration
A new configuration object.
data_congtainer : container.DataContainer
A new data container object.
Raises
------
ValueError
"""
# get the directory where the config file lives
# if this is the 'expm' directory, then go
# up one level.
configpath = config_obj.configdir
pred_file_location = DataReader.locate_files(config_obj['predictions_file'],
configpath)
# get the column name for the labels for the training and testing data
human_score_column = config_obj['human_score_column']
system_score_column = config_obj['system_score_column']
# if the human score column is the same as the
# system score column, raise an error
if human_score_column == system_score_column:
raise ValueError("'human_score_column' and "
"'system_score_column' "
"cannot have the same value.")
# get the name of the optional column that
# contains the second human score
second_human_score_column = config_obj['second_human_score_column']
# if the human score column is the same as the
# second human score column, raise an error
if human_score_column == second_human_score_column:
raise ValueError("'human_score_column' and "
"'second_human_score_column' "
"cannot have the same value.")
# get the column name that will hold the ID for
# both the training and the test data
id_column = config_obj['id_column']
# get the specified trim min and max, if any
# and make sure they are numeric
(spec_trim_min,
spec_trim_max,
spec_trim_tolerance) = config_obj.get_trim_min_max_tolerance()
# get the subgroups if any
subgroups = config_obj.get('subgroups')
# get the candidate column if any and convert it to string
candidate_column = config_obj['candidate_column']
# check if we are excluding candidates based on number of responses
exclude_listwise = config_obj.check_exclude_listwise()
min_items_per_candidate = config_obj['min_items_per_candidate']
general_report_sections = config_obj['general_sections']
# get any special sections that the user might have specified
special_report_sections = config_obj['special_sections']
# get any custom sections and locate them to make sure
# that they exist, otherwise raise an exception
custom_report_section_paths = config_obj['custom_sections']
if custom_report_section_paths:
logging.info('Locating custom report sections')
custom_report_sections = Reporter.locate_custom_sections(custom_report_section_paths,
configpath)
else:
custom_report_sections = []
section_order = config_obj['section_order']
# check all sections values and order and get the
# ordered list of notebook files
chosen_notebook_files = Reporter().get_ordered_notebook_files(general_report_sections,
special_report_sections,
custom_report_sections,
section_order,
subgroups,
model_type=None,
context='rsmeval')
# are we excluding zero scores?
exclude_zero_scores = config_obj['exclude_zero_scores']
# if we are excluding zero scores but trim_min
# is set to 0, then we need to warn the user
if exclude_zero_scores and spec_trim_min == 0:
logging.warning("'exclude_zero_scores' is set to True but "
" 'trim_min' is set to 0. This may cause "
" unexpected behavior.")
# are we filtering on any other columns?
flag_column_dict = config_obj.check_flag_column(partition='test')
# do we have the training set predictions and human scores CSV file
scale_with = config_obj.get('scale_with')
# use scaled predictions for the analyses unless
# we were told not to
use_scaled_predictions = (scale_with is not None)
# log an appropriate message
if scale_with is None:
message = ('Assuming given system predictions '
'are unscaled and will be used as such.')
elif scale_with == 'asis':
message = ('Assuming given system predictions '
'are already scaled and will be used as such.')
else:
message = ('Assuming given system predictions '
'are unscaled and will be scaled before use.')
logging.info(message)
df_pred = data_container_obj.predictions
# make sure that the columns specified in the config file actually exist
# make sure that the columns specified in the config file actually exist
columns_to_check = [id_column, human_score_column, system_score_column]
if second_human_score_column:
columns_to_check.append(second_human_score_column)
if candidate_column:
columns_to_check.append(candidate_column)
missing_columns = set(columns_to_check).difference(df_pred.columns)
if missing_columns:
raise KeyError('Columns {} from the config file do not exist '
'in the predictions file.'.format(missing_columns))
df_pred = self.rename_default_columns(df_pred,
[],
id_column,
human_score_column,
second_human_score_column,
None,
system_score_column,
candidate_column)
# check that the id_column contains unique values
if df_pred['spkitemid'].size != df_pred['spkitemid'].unique().size:
raise ValueError("The data contains duplicate response IDs "
"in '{}'. Please make sure all response IDs "
"are unique and re-run the tool.".format(id_column))
df_pred = self.check_subgroups(df_pred, subgroups)
# filter out the responses based on flag columns
(df_responses_with_requested_flags,
df_responses_with_excluded_flags) = self.filter_on_flag_columns(df_pred,
flag_column_dict)
# filter out rows that have non-numeric or zero human scores
df_filtered, df_excluded = self.filter_on_column(df_responses_with_requested_flags,
'sc1',
'spkitemid',
exclude_zeros=exclude_zero_scores)
# make sure that the remaining data frame is not empty
if len(df_filtered) == 0:
raise ValueError("No responses remaining after filtering out "
"non-numeric human scores. No further analysis "
"can be run. ")
# Change all non-numeric machine scores in excluded
# data to NaNs for consistency with rsmtool.
# NOTE: This will *not* work if *all* of the values
# in column are non-numeric. This is a known bug in
# pandas: https://github.com/pydata/pandas/issues/9589
# Therefore, we need add an additional check after this.
df_excluded['raw'] = pd.to_numeric(df_excluded['raw'], errors='coerce').astype(float)
# filter out the non-numeric machine scores from the rest of the data
newdf, newdf_excluded = self.filter_on_column(df_filtered,
'raw',
'spkitemid',
exclude_zeros=False)
del df_filtered
df_filtered_pred = newdf
# make sure that the remaining data frame is not empty
if len(df_filtered_pred) == 0:
raise ValueError("No responses remaining after filtering out "
"non-numeric machine scores. No further analysis "
"can be run. ")
with np.errstate(divide='ignore'):
df_excluded = pd.concat([df_excluded, newdf_excluded], sort=True)
# if requested, exclude the candidates with less than X responses
# left after filtering
if exclude_listwise:
(df_filtered_candidates,
df_excluded_candidates) = self.select_candidates(df_filtered_pred,
min_items_per_candidate)
# check that there are still responses left for analysis
if len(df_filtered_candidates) == 0:
raise ValueError("After filtering non-numeric human and system scores "
"there were "
"no candidates with {} or more responses "
"left for analysis".format(str(min_items_per_candidate)))
# redefine df_filtered_pred
df_filtered_pred = df_filtered_candidates.copy()
# update df_excluded
df_excluded = pd.concat([df_excluded, df_excluded_candidates], sort=True)
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
# set default values for scaling
scale_pred_mean = 0
scale_pred_sd = 1
scale_human_mean = 0
scale_human_sd = 1
if data_container_obj.get_frame('scale') is not None:
if ('sc1' not in data_container_obj.scale.columns and
'prediction' not in data_container_obj.scale.columns):
raise KeyError('The CSV file specified for scaling ',
'must have the "prediction" and the "sc1" '
'columns.')
else:
scale_pred_mean, scale_pred_sd = (data_container_obj.scale['prediction'].mean(),
data_container_obj.scale['prediction'].std())
scale_human_mean, scale_human_sd = (data_container_obj.scale['sc1'].mean(),
data_container_obj.scale['sc1'].std())
logging.info('Processing predictions')
df_pred_processed = self.process_predictions(df_filtered_pred,
scale_pred_mean,
scale_pred_sd,
scale_human_mean,
scale_human_sd,
spec_trim_min,
spec_trim_max,
spec_trim_tolerance)
if not scale_with:
expected_score_types = ['raw', 'raw_trim', 'raw_trim_round']
elif scale_with == 'asis':
expected_score_types = ['scale', 'scale_trim', 'scale_trim_round']
else:
expected_score_types = ['raw', 'raw_trim', 'raw_trim_round',
'scale', 'scale_trim', 'scale_trim_round']
# extract separated data frames that we will write out
# as separate files
not_other_columns = set()
prediction_columns = ['spkitemid', 'sc1'] + expected_score_types
df_predictions_only = df_pred_processed[prediction_columns]
not_other_columns.update(prediction_columns)
metadata_columns = ['spkitemid'] + subgroups
if candidate_column:
metadata_columns.append('candidate')
df_test_metadata = df_filtered_pred[metadata_columns]
not_other_columns.update(metadata_columns)
df_test_human_scores = pd.DataFrame()
human_score_columns = ['spkitemid', 'sc1', 'sc2']
if second_human_score_column and 'sc2' in df_filtered_pred:
df_test_human_scores = df_filtered_pred[human_score_columns].copy()
not_other_columns.update(['sc2'])
# filter out any non-numeric values nows
# as well as zeros, if we were asked to
df_test_human_scores['sc2'] = pd.to_numeric(df_test_human_scores['sc2'],
errors='coerce').astype(float)
if exclude_zero_scores:
df_test_human_scores['sc2'] = df_test_human_scores['sc2'].replace(0, np.nan)
# remove 'spkitemid' from `not_other_columns`
# because we want that in the other columns
# data frame
not_other_columns.remove('spkitemid')
# extract all of the other columns in the predictions file
other_columns = [column for column in df_filtered_pred.columns
if column not in not_other_columns]
df_pred_other_columns = df_filtered_pred[other_columns]
# add internal configuration options that we need
new_config_obj = config_obj.copy()
internal_options_dict = {'pred_file_location': pred_file_location,
'exclude_listwise': exclude_listwise,
'use_scaled_predictions': use_scaled_predictions,
'chosen_notebook_files': chosen_notebook_files}
for key, value in internal_options_dict.items():
new_config_obj[key] = value
# we need to make sure that `spkitemid` is the first column
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
frames = [df_predictions_only,
df_test_metadata,
df_pred_other_columns,
df_test_human_scores,
df_excluded,
df_responses_with_excluded_flags]
names = ['pred_test',
'test_metadata',
'test_other_columns',
'test_human_scores',
'test_excluded',
'test_responses_with_excluded_flags']
new_container = [{'name': name, 'frame': frame}
for frame, name in zip(frames, names)]
new_container = DataContainer(new_container)
return new_config_obj, new_container
def process_data_rsmpredict(self, config_obj, data_container_obj):
"""
Process data for RSM predict.
Parameters
----------
config_obj : configuration_parser.Configuration
A configuration object.
data_container_obj : container.DataContainer
A data container object.
Returns
-------
config_obj : configuration_parser.Configuration
A new configuration object.
data_congtainer : container.DataContainer
A new data container object.
Raises
------
KeyError
If columns in the config file do not exist in the data
ValueError
If data contains duplicate response IDs
"""
df_input = data_container_obj.input_features
df_feature_info = data_container_obj.feature_info
df_postproc_params = data_container_obj.postprocessing_params
# get the column name that will hold the ID
id_column = config_obj['id_column']
# get the column name for human score (if any)
human_score_column = config_obj['human_score_column']
# get the column name for second human score (if any)
second_human_score_column = config_obj['second_human_score_column']
# get the column name for subgroups (if any)
subgroups = config_obj['subgroups']
# get the model
model = config_obj['model']
# should features be standardized?
standardize_features = config_obj.get('standardize_features', True)
# should we predict expected scores
predict_expected_scores = config_obj['predict_expected_scores']
# get the column names for flag columns (if any)
flag_column_dict = config_obj.check_flag_column(partition='test')
# get the name for the candidate_column (if any)
candidate_column = config_obj['candidate_column']
# make sure that the columns specified in the config file actually exist
columns_to_check = [id_column] + subgroups + list(flag_column_dict.keys())
# add subgroups and the flag columns to the list of columns
# that will be added to the final file
columns_to_copy = subgroups + list(flag_column_dict.keys())
# human_score_column will be set to sc1 by default
# we only raise an error if it's set to something else.
# However, since we cannot distinguish whether the column was set
# to sc1 by default or specified as such in the config file
# we append it to output anyway as long as
# it is in the input file
if human_score_column != 'sc1' or 'sc1' in df_input.columns:
columns_to_check.append(human_score_column)
columns_to_copy.append('sc1')
if candidate_column:
columns_to_check.append(candidate_column)
columns_to_copy.append('candidate')
if second_human_score_column:
columns_to_check.append(second_human_score_column)
columns_to_copy.append('sc2')
missing_columns = set(columns_to_check).difference(df_input.columns)
if missing_columns:
raise KeyError("Columns {} from the config file "
"do not exist in the data.".format(missing_columns))
# rename all columns
df_input = self.rename_default_columns(df_input,
[],
id_column,
human_score_column,
second_human_score_column,
None,
None,
candidate_column=candidate_column)
# check that the id_column contains unique values
if df_input['spkitemid'].size != df_input['spkitemid'].unique().size:
raise ValueError("The data contains repeated response IDs in {}. "
"Please make sure all response IDs are unique and "
"re-run the tool.".format(id_column))
(df_features_preprocessed,
df_excluded) = self.preprocess_new_data(df_input,
df_feature_info,
standardize_features)
trim_min = df_postproc_params['trim_min'].values[0]
trim_max = df_postproc_params['trim_max'].values[0]
h1_mean = df_postproc_params['h1_mean'].values[0]
h1_sd = df_postproc_params['h1_sd'].values[0]
# if we are using a newly trained model, use trim_tolerance from the
# df_postproc_params. If not, set it to the default value and show
# warning
if 'trim_tolerance' in df_postproc_params:
trim_tolerance = df_postproc_params['trim_tolerance'].values[0]
else:
trim_tolerance = 0.4998
logging.warning("The tolerance for trimming scores will be assumed to be 0.4998, "
"the default value in previous versions of RSMTool. "
"We recommend re-training the model to ensure future "
"compatibility.")
# now generate the predictions for the features using this model
logged_str = 'Generating predictions'
logged_str += ' (expected scores).' if predict_expected_scores else '.'
logging.info(logged_str)
# compute minimum and maximum score for expected predictions
min_score = int(np.rint(trim_min - trim_tolerance))
max_score = int(np.rint(trim_max + trim_tolerance))
df_predictions = model.predict(df_features_preprocessed,
min_score,
max_score,
predict_expected=predict_expected_scores)
train_predictions_mean = df_postproc_params['train_predictions_mean'].values[0]
train_predictions_sd = df_postproc_params['train_predictions_sd'].values[0]
df_predictions = self.process_predictions(df_predictions,
train_predictions_mean,
train_predictions_sd,
h1_mean,
h1_sd,
trim_min, trim_max,
trim_tolerance)
# add back the columns that we were requested to copy if any
if len(columns_to_copy) > 0:
df_predictions_with_metadata = pd.merge(df_predictions,
df_input[['spkitemid'] + columns_to_copy])
assert(len(df_predictions) == len(df_predictions_with_metadata))
else:
df_predictions_with_metadata = df_predictions.copy()
# we need to make sure that `spkitemid` is the first column
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
datasets = [{'name': 'features_processed', 'frame': df_features_preprocessed},
{'name': 'excluded', 'frame': df_excluded},
{'name': 'predictions_with_metadata', 'frame': df_predictions_with_metadata},
{'name': 'predictions', 'frame': df_predictions}]
return config_obj, DataContainer(datasets)
def process_data(self, config_obj, data_container_obj, context='rsmtool'):
"""
Process the date for a given context.
Parameters
----------
config_obj : configuration_parser.Configuration
A configuration object.
data_container_obj : container.DataContainer
A data container object.
context : {'rsmtool', 'rsmeval', 'rsmpredict'}
The context of the tool.
Returns
-------
config_obj : configuration_parser.Configuration
A new configuration object.
data_congtainer : container.DataContainer
A new data container object.
Raises
------
ValueError
If the the context is not in {'rsmtool', 'rsmeval', 'rsmpredict'}
"""
if context == 'rsmtool':
return self.process_data_rsmtool(config_obj, data_container_obj)
elif context == 'rsmeval':
return self.process_data_rsmeval(config_obj, data_container_obj)
elif context == 'rsmpredict':
return self.process_data_rsmpredict(config_obj, data_container_obj)
else:
raise ValueError("The `context` argument must be in the set: "
"{'rsmtool', 'rsmeval', 'rsmpredict'}. "
"You passed `{}`.".format(context))
def preprocess_new_data(self,
df_input,
df_feature_info,
standardize_features=True):
"""
Process a data frame with feature values by applying
:ref:`preprocessing parameters <preprocessing_parameters>`
stored in `df_feature_info`.
Parameters
----------
df_input : pandas DataFrame
Data frame with raw feature values that will be used to generate
the scores. Each feature is stored in a separate column. Each row
corresponds to one response. There should also be a column named
`spkitemid` containing a unique ID for each response.
df_feature_info : pandas DataFrame
Data frame with preprocessing parameters stored in the following columns ::
- `feature` : the name of the feature; should match the feature names
in `df_input`.
- `sign` : `1` or `-1`. Indicates whether the feature value needs to
be multiplied by -1.
- `transform` : :ref:`transformation <json_transformation>` that needs
to be applied to this feature
- `train_mean`, `train_sd` : mean and standard deviation for outlier
truncation.
- `train_transformed_mean`,`train_transformed_sd` : mean and standard
deviation for computing `z`-scores.
standardize_features : bool, optional
Whether the features should be standardized prior to prediction.
Defaults to True.
Returns
-------
df_features_preprocessed : pd.DataFrame
Data frame with processed feature values
df_excluded: pd.DataFrame
Data frame with responses excluded from further analysis
due to non-numeric feature values in the original file
or after applying transformations. The data frame always contains the
original feature values.
Raises
------
KeyError
if some of the features specified in `df_feature_info` are not present
in `df_input`
ValueError
if all responses have at least one non-numeric feature value and therefore
no score can be generated for any of the responses.
"""
# get the list of required features
required_features = df_feature_info.index.tolist()
# ensure that all the features that are needed by the model
# are present in the input file
input_feature_columns = [c for c in df_input if c != 'spkitemid']
missing_features = set(required_features).difference(input_feature_columns)
if missing_features:
raise KeyError('The input feature file is missing the '
'following features: {}'.format(missing_features))
extra_features = set(input_feature_columns).difference(required_features + ['spkitemid'])
if extra_features:
logging.warning('The following extraneous features '
'will be ignored: {}'.format(extra_features))
# keep the required features plus the id
features_to_keep = ['spkitemid'] + required_features
# check if actually have the human scores for this data and add
# sc1 to preprocessed features for consistency with other tools
has_human_scores = 'sc1' in df_input
if has_human_scores:
features_to_keep.append('sc1')
df_features = df_input[features_to_keep]
# preprocess the feature values
logging.info('Pre-processing input features')
# first we need to filter out NaNs and any other
# weird features, the same way we did for rsmtool.
df_filtered = df_features.copy()
df_excluded = pd.DataFrame(columns=df_filtered.columns)
for feature_name in required_features:
newdf, newdf_excluded = self.filter_on_column(df_filtered,
feature_name,
'spkitemid',
exclude_zeros=False,
exclude_zero_sd=False)
del df_filtered
df_filtered = newdf
with np.errstate(divide='ignore'):
df_excluded = pd.concat([df_excluded, newdf_excluded], sort=True)
# make sure that the remaining data frame is not empty
if len(df_filtered) == 0:
raise ValueError("There are no responses left after "
"filtering out non-numeric feature values. No analysis "
"will be run")
df_features = df_filtered.copy()
df_features_preprocess = df_features.copy()
for feature_name in required_features:
feature_values = df_features_preprocess[feature_name].values
feature_transformation = df_feature_info.loc[feature_name]['transform']
feature_sign = df_feature_info.loc[feature_name]['sign']
train_feature_mean = df_feature_info.loc[feature_name]['train_mean']
train_feature_sd = df_feature_info.loc[feature_name]['train_sd']
train_transformed_mean = df_feature_info.loc[feature_name]['train_transformed_mean']
train_transformed_sd = df_feature_info.loc[feature_name]['train_transformed_sd']
# transform the feature values and remove outliers
df_features_preprocess[feature_name] = self.preprocess_feature(feature_values,
feature_name,
feature_transformation,
train_feature_mean,
train_feature_sd,
exclude_zero_sd=False,
raise_error=False)
# filter the feature values once again to remove possible NaN and inf values that
# might have emerged when applying transformations.
# We do not need to do that if no transformation was applied.
if feature_transformation not in ['raw', 'org']:
# check that there are indeed inf or Nan values
if np.isnan(df_features_preprocess[feature_name]).any() or \
np.isinf(df_features_preprocess[feature_name]).any():
(newdf,
newdf_excluded) = self.filter_on_column(df_features_preprocess,
feature_name,
'spkitemid',
exclude_zeros=False,
exclude_zero_sd=False)
del df_features_preprocess
df_features_preprocess = newdf
# add the response(s) with missing values to the excluded responses
# but make sure we are adding the original values, not the
# preprocessed ones
missing_values = df_features['spkitemid'].isin(newdf_excluded['spkitemid'])
df_excluded_original = df_features[missing_values].copy()
df_excluded = pd.merge(df_excluded, df_excluded_original, how='outer')
# print(standardized_features)
if standardize_features:
# now standardize the feature values
df_feature_minus_mean = (df_features_preprocess[feature_name] -
train_transformed_mean)
df_features_preprocess[feature_name] = (df_feature_minus_mean /
train_transformed_sd)
# Multiply features by sign.
df_features_preprocess[feature_name] = (df_features_preprocess[feature_name] *
feature_sign)
# we need to make sure that `spkitemid` is the first column
df_excluded = df_excluded[['spkitemid'] + [column for column in df_excluded
if column != 'spkitemid']]
return (df_features_preprocess, df_excluded)
| [
"numpy.isclose",
"re.compile",
"pandas.merge",
"logging.warning",
"logging.info",
"numpy.array",
"numpy.errstate",
"numpy.rint",
"pandas.to_numeric",
"numpy.isnan",
"numpy.std",
"pandas.DataFrame",
"numpy.isinf",
"pandas.concat",
"numpy.random.RandomState"
] | [((6204, 6244), 'pandas.DataFrame', 'pd.DataFrame', (["{'feature': feature_names}"], {}), "({'feature': feature_names})\n", (6216, 6244), True, 'import pandas as pd\n'), ((12767, 12794), 'pandas.DataFrame', 'pd.DataFrame', (['feature_specs'], {}), '(feature_specs)\n', (12779, 12794), True, 'import pandas as pd\n'), ((16443, 16475), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float'}), '(values, dtype=np.float)\n', (16451, 16475), True, 'import numpy as np\n'), ((17706, 17738), 'numpy.array', 'np.array', (['values'], {'dtype': 'np.float'}), '(values, dtype=np.float)\n', (17714, 17738), True, 'import numpy as np\n'), ((20986, 21006), 're.compile', 're.compile', (['"""^\\\\s*$"""'], {}), "('^\\\\s*$')\n", (20996, 21006), False, 'import re\n'), ((28726, 28775), 'pandas.concat', 'pd.concat', (['[df_bad_rows, df_zero_rows]'], {'sort': '(True)'}), '([df_bad_rows, df_zero_rows], sort=True)\n', (28735, 28775), True, 'import pandas as pd\n'), ((32511, 32547), 'numpy.rint', 'np.rint', (["df_pred_process['raw_trim']"], {}), "(df_pred_process['raw_trim'])\n", (32518, 32547), True, 'import numpy as np\n'), ((33012, 33050), 'numpy.rint', 'np.rint', (["df_pred_process['scale_trim']"], {}), "(df_pred_process['scale_trim'])\n", (33019, 33050), True, 'import numpy as np\n'), ((44707, 44721), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (44719, 44721), True, 'import pandas as pd\n'), ((64390, 64404), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (64402, 64404), True, 'import pandas as pd\n'), ((64658, 64672), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (64670, 64672), True, 'import pandas as pd\n'), ((81129, 81190), 'logging.info', 'logging.info', (['"""Pre-processing training and test set features"""'], {}), "('Pre-processing training and test set features')\n", (81141, 81190), False, 'import logging\n'), ((89864, 89885), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (89876, 89885), False, 'import logging\n'), ((95881, 95919), 'logging.info', 'logging.info', (['"""Processing predictions"""'], {}), "('Processing predictions')\n", (95893, 95919), False, 'import logging\n'), ((97457, 97471), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (97469, 97471), True, 'import pandas as pd\n'), ((105550, 105574), 'logging.info', 'logging.info', (['logged_str'], {}), '(logged_str)\n', (105562, 105574), False, 'import logging\n'), ((112747, 112792), 'logging.info', 'logging.info', (['"""Pre-processing input features"""'], {}), "('Pre-processing input features')\n", (112759, 112792), False, 'import logging\n'), ((112973, 113014), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df_filtered.columns'}), '(columns=df_filtered.columns)\n', (112985, 113014), True, 'import pandas as pd\n'), ((14716, 14732), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (14724, 14732), True, 'import numpy as np\n'), ((27887, 27914), 'numpy.isinf', 'np.isinf', (['df_filter[column]'], {}), '(df_filter[column])\n', (27895, 27914), True, 'import numpy as np\n'), ((28095, 28211), 'logging.info', 'logging.info', (['f"""Feature {column} was excluded from the model because it only contains non-numeric values."""'], {}), "(\n f'Feature {column} was excluded from the model because it only contains non-numeric values.'\n )\n", (28107, 28211), False, 'import logging\n'), ((28624, 28638), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (28636, 28638), True, 'import pandas as pd\n'), ((29378, 29415), 'numpy.isclose', 'np.isclose', (['feature_sd', '(0)'], {'atol': '(1e-07)'}), '(feature_sd, 0, atol=1e-07)\n', (29388, 29415), True, 'import numpy as np\n'), ((42402, 42437), 'numpy.std', 'np.std', (['transformed_feature'], {'ddof': '(1)'}), '(transformed_feature, ddof=1)\n', (42408, 42437), True, 'import numpy as np\n'), ((42453, 42490), 'numpy.isclose', 'np.isclose', (['feature_sd', '(0)'], {'atol': '(1e-07)'}), '(feature_sd, 0, atol=1e-07)\n', (42463, 42490), True, 'import numpy as np\n'), ((48230, 48502), 'pandas.DataFrame', 'pd.DataFrame', (["[{'feature': feature_name, 'transform': feature_transformation, 'sign':\n feature_sign, 'train_mean': train_feature_mean, 'train_sd':\n train_feature_sd, 'train_transformed_mean': train_transformed_mean,\n 'train_transformed_sd': train_transformed_sd}]"], {}), "([{'feature': feature_name, 'transform': feature_transformation,\n 'sign': feature_sign, 'train_mean': train_feature_mean, 'train_sd':\n train_feature_sd, 'train_transformed_mean': train_transformed_mean,\n 'train_transformed_sd': train_transformed_sd}])\n", (48242, 48502), True, 'import pandas as pd\n'), ((58397, 58425), 'numpy.random.RandomState', 'RandomState', ([], {'seed': '(1234567890)'}), '(seed=1234567890)\n', (58408, 58425), False, 'from numpy.random import RandomState\n'), ((61788, 61841), 'pandas.to_numeric', 'pd.to_numeric', (["df_filtered['length']"], {'errors': '"""coerce"""'}), "(df_filtered['length'], errors='coerce')\n", (61801, 61841), True, 'import pandas as pd\n'), ((63715, 63774), 'pandas.concat', 'pd.concat', (['[df_excluded, df_excluded_candidates]'], {'sort': '(True)'}), '([df_excluded, df_excluded_candidates], sort=True)\n', (63724, 63774), True, 'import pandas as pd\n'), ((69978, 70108), 'logging.warning', 'logging.warning', (['"""\'exclude_zero_scores\' is set to True but \'trim_min\' is set to 0. This may cause unexpected behavior."""'], {}), '(\n "\'exclude_zero_scores\' is set to True but \'trim_min\' is set to 0. This may cause unexpected behavior."\n )\n', (69993, 70108), False, 'import logging\n'), ((71571, 71618), 'logging.info', 'logging.info', (['"""Locating custom report sections"""'], {}), "('Locating custom report sections')\n", (71583, 71618), False, 'import logging\n'), ((79253, 79433), 'logging.warning', 'logging.warning', (['"""The same data file and label column are used for both training and evaluating the model. No second score analysis will be performed, even if requested."""'], {}), "(\n 'The same data file and label column are used for both training and evaluating the model. No second score analysis will be performed, even if requested.'\n )\n", (79268, 79433), False, 'import logging\n'), ((79892, 79906), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (79904, 79906), True, 'import pandas as pd\n'), ((87429, 87476), 'logging.info', 'logging.info', (['"""Locating custom report sections"""'], {}), "('Locating custom report sections')\n", (87441, 87476), False, 'import logging\n'), ((88770, 88901), 'logging.warning', 'logging.warning', (['"""\'exclude_zero_scores\' is set to True but \'trim_min\' is set to 0. This may cause unexpected behavior."""'], {}), '(\n "\'exclude_zero_scores\' is set to True but \'trim_min\' is set to 0. This may cause unexpected behavior."\n )\n', (88785, 88901), False, 'import logging\n'), ((93674, 93702), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (93685, 93702), True, 'import numpy as np\n'), ((93730, 93781), 'pandas.concat', 'pd.concat', (['[df_excluded, newdf_excluded]'], {'sort': '(True)'}), '([df_excluded, newdf_excluded], sort=True)\n', (93739, 93781), True, 'import pandas as pd\n'), ((94704, 94763), 'pandas.concat', 'pd.concat', (['[df_excluded, df_excluded_candidates]'], {'sort': '(True)'}), '([df_excluded, df_excluded_candidates], sort=True)\n', (94713, 94763), True, 'import pandas as pd\n'), ((105048, 105258), 'logging.warning', 'logging.warning', (['"""The tolerance for trimming scores will be assumed to be 0.4998, the default value in previous versions of RSMTool. We recommend re-training the model to ensure future compatibility."""'], {}), "(\n 'The tolerance for trimming scores will be assumed to be 0.4998, the default value in previous versions of RSMTool. We recommend re-training the model to ensure future compatibility.'\n )\n", (105063, 105258), False, 'import logging\n'), ((105669, 105703), 'numpy.rint', 'np.rint', (['(trim_min - trim_tolerance)'], {}), '(trim_min - trim_tolerance)\n', (105676, 105703), True, 'import numpy as np\n'), ((105729, 105763), 'numpy.rint', 'np.rint', (['(trim_max + trim_tolerance)'], {}), '(trim_max + trim_tolerance)\n', (105736, 105763), True, 'import numpy as np\n'), ((106800, 106867), 'pandas.merge', 'pd.merge', (['df_predictions', "df_input[['spkitemid'] + columns_to_copy]"], {}), "(df_predictions, df_input[['spkitemid'] + columns_to_copy])\n", (106808, 106867), True, 'import pandas as pd\n'), ((27419, 27468), 'pandas.to_numeric', 'pd.to_numeric', (['df_filter[column]'], {'errors': '"""coerce"""'}), "(df_filter[column], errors='coerce')\n", (27432, 27468), True, 'import pandas as pd\n'), ((29433, 29570), 'logging.info', 'logging.info', (['f"""Feature {column} was excluded from the model because its standard deviation in the training set is equal to 0."""'], {}), "(\n f'Feature {column} was excluded from the model because its standard deviation in the training set is equal to 0.'\n )\n", (29445, 29570), False, 'import logging\n'), ((34559, 34591), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'df.columns'}), '(columns=df.columns)\n', (34571, 34591), True, 'import pandas as pd\n'), ((92885, 92935), 'pandas.to_numeric', 'pd.to_numeric', (["df_excluded['raw']"], {'errors': '"""coerce"""'}), "(df_excluded['raw'], errors='coerce')\n", (92898, 92935), True, 'import pandas as pd\n'), ((113514, 113542), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (113525, 113542), True, 'import numpy as np\n'), ((113574, 113625), 'pandas.concat', 'pd.concat', (['[df_excluded, newdf_excluded]'], {'sort': '(True)'}), '([df_excluded, newdf_excluded], sort=True)\n', (113583, 113625), True, 'import pandas as pd\n'), ((59713, 59741), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (59724, 59741), True, 'import numpy as np\n'), ((59777, 59828), 'pandas.concat', 'pd.concat', (['[df_excluded, newdf_excluded]'], {'sort': '(True)'}), '([df_excluded, newdf_excluded], sort=True)\n', (59786, 59828), True, 'import pandas as pd\n'), ((65070, 65133), 'pandas.to_numeric', 'pd.to_numeric', (["df_filtered_human_scores['sc2']"], {'errors': '"""coerce"""'}), "(df_filtered_human_scores['sc2'], errors='coerce')\n", (65083, 65133), True, 'import pandas as pd\n'), ((97871, 97930), 'pandas.to_numeric', 'pd.to_numeric', (["df_test_human_scores['sc2']"], {'errors': '"""coerce"""'}), "(df_test_human_scores['sc2'], errors='coerce')\n", (97884, 97930), True, 'import pandas as pd\n'), ((116813, 116869), 'pandas.merge', 'pd.merge', (['df_excluded', 'df_excluded_original'], {'how': '"""outer"""'}), "(df_excluded, df_excluded_original, how='outer')\n", (116821, 116869), True, 'import pandas as pd\n'), ((59114, 59163), 'pandas.to_numeric', 'pd.to_numeric', (['df_excluded[feat]'], {'errors': '"""coerce"""'}), "(df_excluded[feat], errors='coerce')\n", (59127, 59163), True, 'import pandas as pd\n'), ((115739, 115785), 'numpy.isnan', 'np.isnan', (['df_features_preprocess[feature_name]'], {}), '(df_features_preprocess[feature_name])\n', (115747, 115785), True, 'import numpy as np\n'), ((115816, 115862), 'numpy.isinf', 'np.isinf', (['df_features_preprocess[feature_name]'], {}), '(df_features_preprocess[feature_name])\n', (115824, 115862), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""AMI Corpus"""
import os
import xml.etree.ElementTree as ET
import numpy as np
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{10.1007/11677482_3,
author = {<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>},
title = {The AMI Meeting Corpus: A Pre-Announcement},
year = {2005},
isbn = {3540325492},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
url = {https://doi.org/10.1007/11677482_3},
doi = {10.1007/11677482_3},
abstract = {The AMI Meeting Corpus is a multi-modal data set consisting of 100 hours of meeting
recordings. It is being created in the context of a project that is developing meeting
browsing technology and will eventually be released publicly. Some of the meetings
it contains are naturally occurring, and some are elicited, particularly using a scenario
in which the participants play different roles in a design team, taking a design project
from kick-off to completion over the course of a day. The corpus is being recorded
using a wide range of devices including close-talking and far-field microphones, individual
and room-view video cameras, projection, a whiteboard, and individual pens, all of
which produce output signals that are synchronized with each other. It is also being
hand-annotated for many different phenomena, including orthographic transcription,
discourse properties such as named entities and dialogue acts, summaries, emotions,
and some head and hand gestures. We describe the data set, including the rationale
behind using elicited material, and explain how the material is being recorded, transcribed
and annotated.},
booktitle = {Proceedings of the Second International Conference on Machine Learning for Multimodal Interaction},
pages = {28–39},
numpages = {12},
location = {Edinburgh, UK},
series = {MLMI'05}
}
"""
_URL = "https://groups.inf.ed.ac.uk/ami/corpus/"
_DL_URL_ANNOTATIONS = "http://groups.inf.ed.ac.uk/ami/AMICorpusAnnotations/ami_public_manual_1.6.2.zip"
_DL_SAMPLE_FORMAT = "https://groups.inf.ed.ac.uk/ami/AMICorpusMirror//amicorpus/{}/audio/{}"
_SPEAKERS = ["A", "B", "C", "D", "E"]
# Commented out samples don't seem to exist
_TRAIN_SAMPLE_IDS = [
"ES2002a",
"ES2002b",
"ES2002c",
"ES2002d",
"ES2003a",
"ES2003b",
"ES2003c",
"ES2003d",
"ES2005a",
"ES2005b",
"ES2005c",
"ES2005d",
"ES2006a",
"ES2006b",
"ES2006c",
"ES2006d",
"ES2007a",
"ES2007b",
"ES2007c",
"ES2007d",
"ES2008a",
"ES2008b",
"ES2008c",
"ES2008d",
"ES2009a",
"ES2009b",
"ES2009c",
"ES2009d",
"ES2010a",
"ES2010b",
"ES2010c",
"ES2010d",
"ES2012a",
"ES2012b",
"ES2012c",
"ES2012d",
"ES2013a",
"ES2013b",
"ES2013c",
"ES2013d",
"ES2014a",
"ES2014b",
"ES2014c",
"ES2014d",
"ES2015a",
"ES2015b",
"ES2015c",
"ES2015d",
"ES2016a",
"ES2016b",
"ES2016c",
"ES2016d",
"IS1000a",
"IS1000b",
"IS1000c",
"IS1000d",
"IS1001a",
"IS1001b",
"IS1001c",
"IS1001d",
"IS1002b",
"IS1002c",
"IS1002d",
"IS1003a",
"IS1003b",
"IS1003c",
"IS1003d",
"IS1004a",
"IS1004b",
"IS1004c",
"IS1004d",
"IS1005a",
"IS1005b",
"IS1005c",
"IS1006a",
"IS1006b",
"IS1006c",
"IS1006d",
"IS1007a",
"IS1007b",
"IS1007c",
"IS1007d",
"TS3005a",
"TS3005b",
"TS3005c",
"TS3005d",
"TS3006a",
"TS3006b",
"TS3006c",
"TS3006d",
"TS3007a",
"TS3007b",
"TS3007c",
"TS3007d",
"TS3008a",
"TS3008b",
"TS3008c",
"TS3008d",
"TS3009a",
"TS3009b",
"TS3009c",
"TS3009d",
"TS3010a",
"TS3010b",
"TS3010c",
"TS3010d",
"TS3011a",
"TS3011b",
"TS3011c",
"TS3011d",
"TS3012a",
"TS3012b",
"TS3012c",
"TS3012d",
"EN2001a",
"EN2001b",
"EN2001d",
"EN2001e",
"EN2003a",
"EN2004a",
"EN2005a",
"EN2006a",
"EN2006b",
"EN2009b",
"EN2009c",
"EN2009d",
"IN1001",
"IN1002",
"IN1005",
"IN1007",
"IN1008",
"IN1009",
"IN1012",
"IN1013",
"IN1014",
"IN1016",
]
_VALIDATION_SAMPLE_IDS = [
"ES2011a",
"ES2011b",
"ES2011c",
"ES2011d",
"IS1008a",
"IS1008b",
"IS1008c",
"IS1008d",
"TS3004a",
"TS3004b",
"TS3004c",
"TS3004d",
"IB4001",
"IB4002",
"IB4003",
"IB4004",
"IB4010",
"IB4011",
]
_EVAL_SAMPLE_IDS = [
"ES2004a",
"ES2004b",
"ES2004c",
"ES2004d",
"IS1009a",
"IS1009b",
"IS1009c",
"IS1009d",
"TS3003a",
"TS3003b",
"TS3003c",
"TS3003d",
"EN2002a",
"EN2002b",
"EN2002c",
"EN2002d",
]
_DESCRIPTION = """\
The AMI Meeting Corpus consists of 100 hours of meeting recordings. The recordings use a range of signals
synchronized to a common timeline. These include close-talking and far-field microphones, individual and
room-view video cameras, and output from a slide projector and an electronic whiteboard. During the meetings,
the participants also have unsynchronized pens available to them that record what is written. The meetings
were recorded in English using three different rooms with different acoustic properties, and include mostly
non-native speakers. \n
"""
class AMIConfig(datasets.BuilderConfig):
"""BuilderConfig for LibriSpeechASR."""
def __init__(self, formats, missing_files=None, **kwargs):
"""
Args:
formats: `List[string]`, a list of audio file formats
missing_files: `List[string]`, a list of missing audio file ids
**kwargs: keyword arguments forwarded to super.
"""
self.dl_path_formats = [_DL_SAMPLE_FORMAT + "." + f + ".wav" for f in formats]
# for microphone configs some audio files are missing
self.missing_files = missing_files if missing_files is not None else []
super(AMIConfig, self).__init__(version=datasets.Version("1.6.2", ""), **kwargs)
class AMI(datasets.GeneratorBasedBuilder):
"""AMI dataset."""
BUILDER_CONFIGS = [
AMIConfig(name="headset-single", formats=["Mix-Headset"], description=""),
AMIConfig(name="headset-multi", formats=["Headset-0", "Headset-1", "Headset-2", "Headset-3"], description=""),
AMIConfig(
name="microphone-single",
formats=["Array1-01"],
missing_files=["IS1003b", "IS1007d"],
),
AMIConfig(
name="microphone-multi",
formats=[
"Array1-01",
"Array1-02",
"Array1-03",
"Array1-04",
"Array1-05",
"Array1-06",
"Array1-07",
"Array1-08",
],
missing_files=["IS1003b", "IS1007d"],
),
]
def _info(self):
features_dict = {
"word_ids": datasets.Sequence(datasets.Value("string")),
"word_start_times": datasets.Sequence(datasets.Value("float")),
"word_end_times": datasets.Sequence(datasets.Value("float")),
"word_speakers": datasets.Sequence(datasets.Value("string")),
"segment_ids": datasets.Sequence(datasets.Value("string")),
"segment_start_times": datasets.Sequence(datasets.Value("float")),
"segment_end_times": datasets.Sequence(datasets.Value("float")),
"segment_speakers": datasets.Sequence(datasets.Value("string")),
"words": datasets.Sequence(datasets.Value("string")),
"channels": datasets.Sequence(datasets.Value("string")),
}
if self.config.name == "headset-single":
features_dict.update({"file": datasets.Value("string")})
config_description = (
"Close talking audio of single headset. "
"This configuration only includes audio belonging to the "
"headset of the person currently speaking."
)
elif self.config.name == "microphone-single":
features_dict.update({"file": datasets.Value("string")})
config_description = (
"Far field audio of single microphone. "
"This configuration only includes audio belonging the first microphone, "
"*i.e.* 1-1, of the microphone array."
)
elif self.config.name == "headset-multi":
features_dict.update(
{
"file-0": datasets.Value("string"),
"file-1": datasets.Value("string"),
"file-2": datasets.Value("string"),
"file-3": datasets.Value("string"),
}
)
config_description = (
"Close talking audio of four individual headset. "
"This configuration includes audio belonging to four individual headsets."
" For each annotation there are 4 audio files 0, 1, 2, 3."
)
elif self.config.name == "microphone-multi":
features_dict.update(
{
"file-1-1": datasets.Value("string"),
"file-1-2": datasets.Value("string"),
"file-1-3": datasets.Value("string"),
"file-1-4": datasets.Value("string"),
"file-1-5": datasets.Value("string"),
"file-1-6": datasets.Value("string"),
"file-1-7": datasets.Value("string"),
"file-1-8": datasets.Value("string"),
}
)
config_description = (
"Far field audio of microphone array. "
"This configuration includes audio of "
"the first microphone array 1-1, 1-2, ..., 1-8."
)
else:
raise ValueError(f"Configuration {self.config.name} does not exist.")
return datasets.DatasetInfo(
description=_DESCRIPTION + config_description,
features=datasets.Features(features_dict),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# multi-processing doesn't work for AMI
if hasattr(dl_manager, "_download_config") and dl_manager._download_config.num_proc != 1:
logger.warning(
"AMI corpus cannot be downloaded using multi-processing. "
"Setting number of downloaded processes `num_proc` to 1. "
)
dl_manager._download_config.num_proc = 1
annotation_path = dl_manager.download_and_extract(_DL_URL_ANNOTATIONS)
# train
train_files = [path.format(_id, _id) for _id in _TRAIN_SAMPLE_IDS for path in self.config.dl_path_formats]
train_files = list(
filter(lambda f: f.split("/")[-1].split(".")[0] not in self.config.missing_files, train_files)
)
train_ids = [f.split("/")[-1].split(".")[0] for f in train_files]
train_path = dl_manager.download_and_extract(train_files)
# validation
validation_files = [
path.format(_id, _id) for _id in _VALIDATION_SAMPLE_IDS for path in self.config.dl_path_formats
]
validation_ids = [f.split("/")[-1].split(".")[0] for f in validation_files]
validation_path = dl_manager.download_and_extract(validation_files)
# test
eval_files = [path.format(_id, _id) for _id in _EVAL_SAMPLE_IDS for path in self.config.dl_path_formats]
eval_ids = [f.split("/")[-1].split(".")[0] for f in eval_files]
eval_path = dl_manager.download_and_extract(eval_files)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"annotation_path": annotation_path,
"samples_paths": train_path,
"ids": _TRAIN_SAMPLE_IDS,
"verification_ids": train_ids,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"annotation_path": annotation_path,
"samples_paths": validation_path,
"ids": _VALIDATION_SAMPLE_IDS,
"verification_ids": validation_ids,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"annotation_path": annotation_path,
"samples_paths": eval_path,
"ids": _EVAL_SAMPLE_IDS,
"verification_ids": eval_ids,
},
),
]
@staticmethod
def _sort(key, lists):
indices = np.argsort(key)
sorted_lists = [np.array(array)[indices].tolist() for array in lists]
return sorted_lists
@staticmethod
def _extract_words_annotations(paths):
word_ids = []
word_start_times = []
word_end_times = []
words = []
word_speakers = []
for path in paths:
# retrive speaker
speaker = path.split(".")[-3]
with open(path, "r", encoding="utf-8") as words_file:
root = ET.parse(words_file).getroot()
for type_tag in root.findall("w"):
word_id = type_tag.get("{http://nite.sourceforge.net/}id")
word_start_time = type_tag.get("starttime")
word_end_time = type_tag.get("endtime")
text = type_tag.text
if word_start_time is not None and word_end_time is not None:
word_ids.append(word_id)
word_start_times.append(float(word_start_time))
word_end_times.append(float(word_end_time))
words.append(text)
word_speakers.append(speaker)
else:
logger.warning(
f"Annotation {word_id} of file {path} is missing information about"
"either word_start_time or word_end_time. Skipping sample..."
)
return AMI._sort(word_start_times, [word_ids, word_start_times, word_end_times, words, word_speakers])
@staticmethod
def _extract_segments_annotations(paths):
segment_ids = []
channels = []
segment_start_times = []
segment_end_times = []
segment_speakers = []
for path in paths:
speaker = path.split(".")[-3]
with open(path, "r", encoding="utf-8") as segments_file:
root = ET.parse(segments_file).getroot()
for type_tag in root.findall("segment"):
segment_ids.append(type_tag.get("{http://nite.sourceforge.net/}id"))
segment_start_times.append(float(type_tag.get("transcriber_start")))
segment_end_times.append(float(type_tag.get("transcriber_end")))
channels.append(type_tag.get("channel"))
segment_speakers.append(speaker)
return AMI._sort(
segment_start_times, [segment_ids, segment_start_times, segment_end_times, channels, segment_speakers]
)
def _generate_examples(self, annotation_path, samples_paths, ids, verification_ids):
logger.info(f"⏳ Generating {', '.join(ids)}")
# number of audio files of config
num_audios = len(self.config.dl_path_formats)
# filter missing ids
ids = list(filter(lambda n: n not in self.config.missing_files, ids))
# audio
samples_paths_dict = {}
for i, _id in enumerate(ids):
sample_paths = samples_paths[num_audios * i : num_audios * (i + 1)]
sample_verification_id = set(verification_ids[num_audios * i : num_audios * (i + 1)])
# make sure that multi microphone samples are correctly atttributed to labels
if len(sample_verification_id) > 1 or next(iter(sample_verification_id)) != _id:
raise ValueError(
f"Incorrect dataset generation. The files {sample_paths} of id {_id} have incorrect verification_ids {sample_verification_id}."
)
# set correct files correctly
samples_paths_dict[_id] = sample_paths
# words
words_paths = {
_id: [os.path.join(annotation_path, "words/{}.{}.words.xml".format(_id, speaker)) for speaker in _SPEAKERS]
for _id in ids
}
words_paths = {_id: list(filter(lambda path: os.path.isfile(path), words_paths[_id])) for _id in ids}
words_paths = {key: words_paths[key] for key in words_paths if len(words_paths[key]) > 0}
# segments
segments_paths = {
_id: [
os.path.join(annotation_path, "segments/{}.{}.segments.xml".format(_id, speaker))
for speaker in _SPEAKERS
]
for _id in ids
}
segments_paths = {_id: list(filter(lambda path: os.path.isfile(path), segments_paths[_id])) for _id in ids}
segments_paths = {key: segments_paths[key] for key in segments_paths if len(segments_paths[key]) > 0}
for _id in words_paths.keys():
word_ids, word_start_times, word_end_times, words, word_speakers = self._extract_words_annotations(
words_paths[_id]
)
(
segment_ids,
segment_start_times,
segment_end_times,
channels,
segment_speakers,
) = self._extract_segments_annotations(segments_paths[_id])
result = {
"word_ids": word_ids,
"word_start_times": word_start_times,
"word_end_times": word_end_times,
"word_speakers": word_speakers,
"segment_ids": segment_ids,
"segment_start_times": segment_start_times,
"segment_end_times": segment_end_times,
"segment_speakers": segment_speakers,
"channels": channels,
"words": words,
}
if self.config.name in ["headset-single", "microphone-single"]:
result.update({"file": samples_paths_dict[_id][0]})
elif self.config.name in ["headset-multi"]:
result.update({f"file-{i}": samples_paths_dict[_id][i] for i in range(num_audios)})
elif self.config.name in ["microphone-multi"]:
result.update({f"file-1-{i+1}": samples_paths_dict[_id][i] for i in range(num_audios)})
else:
raise ValueError(f"Configuration {self.config.name} does not exist.")
yield _id, result
| [
"datasets.SplitGenerator",
"xml.etree.ElementTree.parse",
"datasets.Version",
"numpy.argsort",
"numpy.array",
"os.path.isfile",
"datasets.logging.get_logger",
"datasets.Value",
"datasets.Features"
] | [((779, 816), 'datasets.logging.get_logger', 'datasets.logging.get_logger', (['__name__'], {}), '(__name__)\n', (806, 816), False, 'import datasets\n'), ((13623, 13638), 'numpy.argsort', 'np.argsort', (['key'], {}), '(key)\n', (13633, 13638), True, 'import numpy as np\n'), ((12510, 12704), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TRAIN', 'gen_kwargs': "{'annotation_path': annotation_path, 'samples_paths': train_path, 'ids':\n _TRAIN_SAMPLE_IDS, 'verification_ids': train_ids}"}), "(name=datasets.Split.TRAIN, gen_kwargs={\n 'annotation_path': annotation_path, 'samples_paths': train_path, 'ids':\n _TRAIN_SAMPLE_IDS, 'verification_ids': train_ids})\n", (12533, 12704), False, 'import datasets\n'), ((12855, 13069), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.VALIDATION', 'gen_kwargs': "{'annotation_path': annotation_path, 'samples_paths': validation_path,\n 'ids': _VALIDATION_SAMPLE_IDS, 'verification_ids': validation_ids}"}), "(name=datasets.Split.VALIDATION, gen_kwargs={\n 'annotation_path': annotation_path, 'samples_paths': validation_path,\n 'ids': _VALIDATION_SAMPLE_IDS, 'verification_ids': validation_ids})\n", (12878, 13069), False, 'import datasets\n'), ((13220, 13410), 'datasets.SplitGenerator', 'datasets.SplitGenerator', ([], {'name': 'datasets.Split.TEST', 'gen_kwargs': "{'annotation_path': annotation_path, 'samples_paths': eval_path, 'ids':\n _EVAL_SAMPLE_IDS, 'verification_ids': eval_ids}"}), "(name=datasets.Split.TEST, gen_kwargs={\n 'annotation_path': annotation_path, 'samples_paths': eval_path, 'ids':\n _EVAL_SAMPLE_IDS, 'verification_ids': eval_ids})\n", (13243, 13410), False, 'import datasets\n'), ((6760, 6789), 'datasets.Version', 'datasets.Version', (['"""1.6.2"""', '""""""'], {}), "('1.6.2', '')\n", (6776, 6789), False, 'import datasets\n'), ((7731, 7755), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (7745, 7755), False, 'import datasets\n'), ((7808, 7831), 'datasets.Value', 'datasets.Value', (['"""float"""'], {}), "('float')\n", (7822, 7831), False, 'import datasets\n'), ((7882, 7905), 'datasets.Value', 'datasets.Value', (['"""float"""'], {}), "('float')\n", (7896, 7905), False, 'import datasets\n'), ((7955, 7979), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (7969, 7979), False, 'import datasets\n'), ((8027, 8051), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (8041, 8051), False, 'import datasets\n'), ((8107, 8130), 'datasets.Value', 'datasets.Value', (['"""float"""'], {}), "('float')\n", (8121, 8130), False, 'import datasets\n'), ((8184, 8207), 'datasets.Value', 'datasets.Value', (['"""float"""'], {}), "('float')\n", (8198, 8207), False, 'import datasets\n'), ((8260, 8284), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (8274, 8284), False, 'import datasets\n'), ((8326, 8350), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (8340, 8350), False, 'import datasets\n'), ((8395, 8419), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (8409, 8419), False, 'import datasets\n'), ((10848, 10880), 'datasets.Features', 'datasets.Features', (['features_dict'], {}), '(features_dict)\n', (10865, 10880), False, 'import datasets\n'), ((8524, 8548), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (8538, 8548), False, 'import datasets\n'), ((8889, 8913), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (8903, 8913), False, 'import datasets\n'), ((13664, 13679), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (13672, 13679), True, 'import numpy as np\n'), ((14124, 14144), 'xml.etree.ElementTree.parse', 'ET.parse', (['words_file'], {}), '(words_file)\n', (14132, 14144), True, 'import xml.etree.ElementTree as ET\n'), ((15579, 15602), 'xml.etree.ElementTree.parse', 'ET.parse', (['segments_file'], {}), '(segments_file)\n', (15587, 15602), True, 'import xml.etree.ElementTree as ET\n'), ((17542, 17562), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (17556, 17562), False, 'import os\n'), ((18009, 18029), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (18023, 18029), False, 'import os\n'), ((9299, 9323), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (9313, 9323), False, 'import datasets\n'), ((9355, 9379), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (9369, 9379), False, 'import datasets\n'), ((9411, 9435), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (9425, 9435), False, 'import datasets\n'), ((9467, 9491), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (9481, 9491), False, 'import datasets\n'), ((9944, 9968), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (9958, 9968), False, 'import datasets\n'), ((10002, 10026), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10016, 10026), False, 'import datasets\n'), ((10060, 10084), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10074, 10084), False, 'import datasets\n'), ((10118, 10142), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10132, 10142), False, 'import datasets\n'), ((10176, 10200), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10190, 10200), False, 'import datasets\n'), ((10234, 10258), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10248, 10258), False, 'import datasets\n'), ((10292, 10316), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10306, 10316), False, 'import datasets\n'), ((10350, 10374), 'datasets.Value', 'datasets.Value', (['"""string"""'], {}), "('string')\n", (10364, 10374), False, 'import datasets\n')] |
"""HPO script."""
import argparse
import logging
import pathlib
import random as rn
import sys
from enum import Enum
from typing import Any, Mapping, Tuple
import numpy as np
import torch
from ray import tune
from sklearn.metrics import mean_absolute_error, mean_squared_error
from nlkda.data.base import DatasetEnum, get_dataset_size
from nlkda.data.loader import get_data
from nlkda.eval import evaluate_model, get_model_size
from nlkda.models.base import BoundModel, FormulationWrapperEnum, is_multi_output
from nlkda.models.boosting import BoostingWithCsModel
from nlkda.models.utils import ModelEnum, configure_param_space_nn, create_model_from_config, \
create_wrapper_from_config, save_model_to_directory
from nlkda.settings import K_MAX
from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform
def _sample_max_leaf_nodes(spec) -> int:
n = spec.config.n
k = spec.config.k
return tune_q_log_uniform(low=1, high=n * k, q=1)
def get_model_search_space(model_type: ModelEnum) -> Mapping[str, Any]:
if model_type == ModelEnum.RF:
return dict(
max_depth=tune.randint(1, 10),
n_estimators=tune_q_log_uniform(high=100, q=1),
)
elif model_type == ModelEnum.DT:
return dict(
max_leaf_nodes=tune.sample_from(_sample_max_leaf_nodes),
)
elif model_type == ModelEnum.ADB:
return dict(
n_estimators=tune_q_log_uniform(low=1, high=500, q=1),
learning_rate=tune.loguniform(1.0e-04, 1.0e+01),
)
elif model_type == ModelEnum.GB:
return dict(
max_leaf_nodes=tune_q_log_uniform(low=4, high=15, q=1),
n_estimators=tune_q_log_uniform(high=500, q=1),
learning_rate=tune.loguniform(1.0e-04, 1.0e+01),
)
elif model_type == ModelEnum.NN:
return dict(
units=tune.randint(10, 28),
layers=tune.randint(2, 9),
dropout=tune_bool(),
dropout_rate=tune.uniform(0.1, 0.5),
batch_size=tune.choice([2 ** i for i in range(6, 10)]),
loss=tune.choice(['mean_squared_error', 'mean_absolute_error']),
batch_normalization=tune_bool(),
is_normalized=tune_bool(),
patience=4,
)
elif model_type == ModelEnum.KN:
return dict(
n_neighbors=1,
)
elif model_type == ModelEnum.COP:
return dict()
else:
raise ValueError(model_type)
def _prepare_logger():
# setup logger
logger_main = logging.getLogger(__name__)
logger_main.setLevel(logging.DEBUG)
# create console handler and set level to debug
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("\n%(asctime)s - %(name)s - %(levelname)s : %(message)s")
# add formatter to ch
sh.setFormatter(formatter)
logger_main.addHandler(sh)
logger_main.propagate = False
return logger_main
def objective(config, reporter):
"""The optimization objective."""
logger_main = _prepare_logger()
# setup random seed
random_state = 0
np.random.seed(random_state)
rn.seed(random_state)
torch.random.manual_seed(seed=random_state)
# get data
data_root = pathlib.Path(config["data_root"])
x, y, distance = get_data(
dataset_enum=DatasetEnum(config["dataset"]),
data_root=data_root,
)
n_samples, dimensions = x.shape
skd_max = np.max(y, axis=0)
skd_min = np.min(y, axis=0)
# SAMPLE WEIGHTS
sw_type = config["sample_weights"]
if sw_type == SampleWeightsEnum.MEAN_CS_K.value:
boost_iterations = config["boosting"]["iterations"]
else:
boost_iterations = 1
if config["model"]["model_type"] == ModelEnum.NN.value:
config = configure_param_space_nn(config, dimensions)
config["clipped"] = True
db = MLFlowClient(root=data_root / "experiments", tracking_uri=config["tracking_uri"])
try:
# create base model like RandomForest or GradientBoost etc.
model_obj = create_model_from_config(
model=ModelEnum(config["model"]["model_type"]),
params=config["model"]["params"],
is_multi=is_multi_output(FormulationWrapperEnum(config["model"]["formulation"]))
)
# create model wrapper like KAsInputWrapper or DiffKOutputWrapper etc.
model = create_wrapper_from_config(
model_wrapper=FormulationWrapperEnum(config["model"]["formulation"]),
params={"base": model_obj}
)
# Fit model
if boost_iterations > 1:
sw_agg_point = False if sw_type == SampleWeightsEnum.MEAN_CS_K.value else True
logger_main.info("Creating Boosting Model for CS!")
boosting_model = BoostingWithCsModel(
base=model,
dataset=config["dataset"],
sw_agg_point=sw_agg_point,
iterations=boost_iterations,
)
boosting_model.fit(x=x, y=y, sample_weights=None)
model = boosting_model.base
else:
model.fit(x=x, y=y, sample_weights=None)
# create experiment
experiment_parameters = flatten_dict(config)
run_id, output_path = db.init_experiment(hyper_parameters=experiment_parameters)
output_path = pathlib.Path(output_path)
# save fitted model to output path
save_model_to_directory(directory=output_path, model=model)
# evaluate
eval_batch_size = 30
pred = model.predict(x=x)
save_to_file(
output_root=output_path,
file_name="pred_k_dist",
data=pred,
)
# ..... MODEL_SIZE
model_size = get_model_size(model.base)
# ..... MAE , MSE
mae = mean_absolute_error(y.reshape(-1), pred)
mse = mean_squared_error(y.reshape(-1), pred)
# error over points --> O(k_max), model size increases by 2*k_max
min_error, max_error, cs_mean_p, cs_median_p, cs_mean_mono_p, cs_median_mono_p = _evaluate_aggregation(
x=x,
kd=y,
distance=distance,
eval_batch_size=eval_batch_size,
model=model,
output_path=output_path,
pred=pred,
skd_max=skd_max,
skd_min=skd_min,
agg_point=True,
)
# error over k --> O(n), model size increases by 2*n
_, _, cs_mean_k, cs_median_k, cs_mean_mono_k, cs_median_mono_k = _evaluate_aggregation(
x=x,
kd=y,
distance=distance,
eval_batch_size=eval_batch_size,
model=model,
output_path=output_path,
pred=pred,
skd_max=skd_max,
skd_min=skd_min,
agg_point=False,
)
# combine error over points and k, model size increases by 2*n + 2*k_max
_, _, cs_mean_comb, cs_median_comb, cs_mean_mono_comb, cs_median_mono_comb = _evaluate_aggregation(
x=x,
kd=y,
distance=distance,
eval_batch_size=eval_batch_size,
model=model,
output_path=output_path,
pred=pred,
skd_max=skd_max,
skd_min=skd_min,
both=True
)
# finalise experiment
result = {
"mae": mae,
"mse": mse,
"model_size": model_size,
"size_agg_p": model_size + (2 * K_MAX),
"size_agg_k": model_size + (2 * config["n"]),
"size_combined": model_size + (2 * K_MAX) + (2 * config["n"]),
"max_error": {
"error": max_error,
"k": int(np.argmax(model.max_diff)) + 1,
},
"min_error": {
"error": min_error,
"k": int(np.argmin(model.min_diff)) + 1,
},
"cs_mean_agg_p": cs_mean_p,
"cs_median_agg_p": cs_median_p,
"cs_mean_mono_agg_p": cs_mean_mono_p,
"cs_median_mono_agg_p": cs_median_mono_p,
"cs_mean_agg_k": cs_mean_k,
"cs_median_agg_k": cs_median_k,
"cs_mean_mono_agg_k": cs_mean_mono_k,
"cs_median_mono_agg_k": cs_median_mono_k,
"cs_mean_combined": cs_mean_comb,
"cs_median_combined": cs_median_comb,
"cs_mean_mono_combined": cs_mean_mono_comb,
"cs_median_mono_combined": cs_median_mono_comb
}
# log all results
result = flatten_dict(result)
db.finalise_experiment(result=result)
reporter(cs_mean_mono_p=cs_mean_mono_p)
return result
except RuntimeError as error:
logger_main.warning(error)
logger_main.warning("Oops!", sys.exc_info()[0], "occured.")
logger_main.warning(sys.exc_info()[1], " : value")
return
def _evaluate_aggregation(
x: np.ndarray,
kd: np.ndarray,
distance: str,
eval_batch_size: int,
model: BoundModel,
output_path: pathlib.Path,
pred: np.ndarray,
skd_max: np.ndarray,
skd_min: np.ndarray,
agg_point: bool = False,
both: bool = False
) -> Tuple[float, float, float, float, float, float]:
model.set_min_max(x=pred, y=kd, is_predicted=True, agg_point=agg_point)
max_error = max(model.max_diff)
min_error = min(model.min_diff)
result = tuple()
for monotonous in (False, True):
result = result + evaluate_model(
x=x,
distance=distance,
eval_batch_size=eval_batch_size,
model=model,
output_path=output_path,
pred=pred,
skd_max=skd_max,
skd_min=skd_min,
agg_point=agg_point,
monotonous=monotonous,
both=both,
kd=kd
)
return (min_error, max_error) + result
class SampleWeightsEnum(Enum):
"""Enum for sample weights."""
NONE = "NO"
MEAN_CS_K = "mean_cs_agg_k"
def main(
model_type: ModelEnum,
dataset: DatasetEnum,
s_w: SampleWeightsEnum,
tracking_uri: str,
data_root: pathlib.Path = pathlib.Path("/mnt/data"),
local_dir: pathlib.Path = pathlib.Path("~"),
num_samples: int = 10,
) -> None:
"""The main HPO routine, using ray tune."""
n_gpus = 1 if torch.cuda.is_available() else 0
analysis = tune.run(
objective,
config=dict(
data_root=str(data_root),
tracking_uri=tracking_uri,
dataset=dataset.value,
k=K_MAX,
n=get_dataset_size(dataset),
model=dict(
model_type=model_type.value,
formulation=tune_enum(enum_cls=FormulationWrapperEnum),
params=get_model_search_space(model_type),
),
sample_weights=s_w.value,
boosting=dict(
iterations=3,
)
),
local_dir=str(local_dir.expanduser().absolute()),
num_samples=num_samples,
resources_per_trial={
"cpu": 3,
"gpu": n_gpus
},
)
print("Best config: ", analysis.get_best_config(metric="cs_mean_mono_p", mode="min"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, default=DatasetEnum.OL.value, help="The name of the dataset.",
choices=enum_values(enum_cls=DatasetEnum))
parser.add_argument("--model", type=str, default=ModelEnum.NN.value, help="The name of the model.",
choices=enum_values(enum_cls=ModelEnum))
parser.add_argument("--sample_weight", type=str, default=SampleWeightsEnum.NONE.value,
help="The name of the sample weights.", choices=enum_values(enum_cls=SampleWeightsEnum))
parser.add_argument("--data_root", type=str, default="/tmp/data", help="The directory where data is stored.")
parser.add_argument("--tracking_uri", type=str, default="http://localhost:5000", help="The MLFlow tracking URI.")
parser.add_argument("--trials", type=int, default=10, help="The number of HPO trials to run.")
args = parser.parse_args()
main(
model_type=ModelEnum(args.model),
dataset=DatasetEnum(args.dataset),
s_w=SampleWeightsEnum(args.sample_weight),
data_root=pathlib.Path(args.data_root),
tracking_uri=args.tracking_uri,
num_samples=args.trials,
)
| [
"logging.getLogger",
"logging.StreamHandler",
"nlkda.eval.get_model_size",
"nlkda.models.base.FormulationWrapperEnum",
"sys.exc_info",
"torch.cuda.is_available",
"nlkda.models.utils.ModelEnum",
"torch.random.manual_seed",
"nlkda.utils.tune_bool",
"nlkda.utils.enum_values",
"argparse.ArgumentPars... | [((969, 1011), 'nlkda.utils.tune_q_log_uniform', 'tune_q_log_uniform', ([], {'low': '(1)', 'high': '(n * k)', 'q': '(1)'}), '(low=1, high=n * k, q=1)\n', (987, 1011), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((2592, 2619), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2609, 2619), False, 'import logging\n'), ((2721, 2744), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2742, 2744), False, 'import logging\n'), ((2815, 2893), 'logging.Formatter', 'logging.Formatter', (['"""\n%(asctime)s - %(name)s - %(levelname)s : %(message)s"""'], {}), '("""\n%(asctime)s - %(name)s - %(levelname)s : %(message)s""")\n', (2832, 2893), False, 'import logging\n'), ((3195, 3223), 'numpy.random.seed', 'np.random.seed', (['random_state'], {}), '(random_state)\n', (3209, 3223), True, 'import numpy as np\n'), ((3228, 3249), 'random.seed', 'rn.seed', (['random_state'], {}), '(random_state)\n', (3235, 3249), True, 'import random as rn\n'), ((3254, 3297), 'torch.random.manual_seed', 'torch.random.manual_seed', ([], {'seed': 'random_state'}), '(seed=random_state)\n', (3278, 3297), False, 'import torch\n'), ((3330, 3363), 'pathlib.Path', 'pathlib.Path', (["config['data_root']"], {}), "(config['data_root'])\n", (3342, 3363), False, 'import pathlib\n'), ((3533, 3550), 'numpy.max', 'np.max', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (3539, 3550), True, 'import numpy as np\n'), ((3565, 3582), 'numpy.min', 'np.min', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (3571, 3582), True, 'import numpy as np\n'), ((3959, 4045), 'nlkda.utils.MLFlowClient', 'MLFlowClient', ([], {'root': "(data_root / 'experiments')", 'tracking_uri': "config['tracking_uri']"}), "(root=data_root / 'experiments', tracking_uri=config[\n 'tracking_uri'])\n", (3971, 4045), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((10225, 10250), 'pathlib.Path', 'pathlib.Path', (['"""/mnt/data"""'], {}), "('/mnt/data')\n", (10237, 10250), False, 'import pathlib\n'), ((10282, 10299), 'pathlib.Path', 'pathlib.Path', (['"""~"""'], {}), "('~')\n", (10294, 10299), False, 'import pathlib\n'), ((11331, 11356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11354, 11356), False, 'import argparse\n'), ((3874, 3918), 'nlkda.models.utils.configure_param_space_nn', 'configure_param_space_nn', (['config', 'dimensions'], {}), '(config, dimensions)\n', (3898, 3918), False, 'from nlkda.models.utils import ModelEnum, configure_param_space_nn, create_model_from_config, create_wrapper_from_config, save_model_to_directory\n'), ((5291, 5311), 'nlkda.utils.flatten_dict', 'flatten_dict', (['config'], {}), '(config)\n', (5303, 5311), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((5423, 5448), 'pathlib.Path', 'pathlib.Path', (['output_path'], {}), '(output_path)\n', (5435, 5448), False, 'import pathlib\n'), ((5501, 5560), 'nlkda.models.utils.save_model_to_directory', 'save_model_to_directory', ([], {'directory': 'output_path', 'model': 'model'}), '(directory=output_path, model=model)\n', (5524, 5560), False, 'from nlkda.models.utils import ModelEnum, configure_param_space_nn, create_model_from_config, create_wrapper_from_config, save_model_to_directory\n'), ((5653, 5726), 'nlkda.utils.save_to_file', 'save_to_file', ([], {'output_root': 'output_path', 'file_name': '"""pred_k_dist"""', 'data': 'pred'}), "(output_root=output_path, file_name='pred_k_dist', data=pred)\n", (5665, 5726), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((5823, 5849), 'nlkda.eval.get_model_size', 'get_model_size', (['model.base'], {}), '(model.base)\n', (5837, 5849), False, 'from nlkda.eval import evaluate_model, get_model_size\n'), ((8622, 8642), 'nlkda.utils.flatten_dict', 'flatten_dict', (['result'], {}), '(result)\n', (8634, 8642), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((10405, 10430), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10428, 10430), False, 'import torch\n'), ((3416, 3446), 'nlkda.data.base.DatasetEnum', 'DatasetEnum', (["config['dataset']"], {}), "(config['dataset'])\n", (3427, 3446), False, 'from nlkda.data.base import DatasetEnum, get_dataset_size\n'), ((4867, 4986), 'nlkda.models.boosting.BoostingWithCsModel', 'BoostingWithCsModel', ([], {'base': 'model', 'dataset': "config['dataset']", 'sw_agg_point': 'sw_agg_point', 'iterations': 'boost_iterations'}), "(base=model, dataset=config['dataset'], sw_agg_point=\n sw_agg_point, iterations=boost_iterations)\n", (4886, 4986), False, 'from nlkda.models.boosting import BoostingWithCsModel\n'), ((9549, 9777), 'nlkda.eval.evaluate_model', 'evaluate_model', ([], {'x': 'x', 'distance': 'distance', 'eval_batch_size': 'eval_batch_size', 'model': 'model', 'output_path': 'output_path', 'pred': 'pred', 'skd_max': 'skd_max', 'skd_min': 'skd_min', 'agg_point': 'agg_point', 'monotonous': 'monotonous', 'both': 'both', 'kd': 'kd'}), '(x=x, distance=distance, eval_batch_size=eval_batch_size,\n model=model, output_path=output_path, pred=pred, skd_max=skd_max,\n skd_min=skd_min, agg_point=agg_point, monotonous=monotonous, both=both,\n kd=kd)\n', (9563, 9777), False, 'from nlkda.eval import evaluate_model, get_model_size\n'), ((11499, 11532), 'nlkda.utils.enum_values', 'enum_values', ([], {'enum_cls': 'DatasetEnum'}), '(enum_cls=DatasetEnum)\n', (11510, 11532), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((11670, 11701), 'nlkda.utils.enum_values', 'enum_values', ([], {'enum_cls': 'ModelEnum'}), '(enum_cls=ModelEnum)\n', (11681, 11701), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((11866, 11905), 'nlkda.utils.enum_values', 'enum_values', ([], {'enum_cls': 'SampleWeightsEnum'}), '(enum_cls=SampleWeightsEnum)\n', (11877, 11905), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((12299, 12320), 'nlkda.models.utils.ModelEnum', 'ModelEnum', (['args.model'], {}), '(args.model)\n', (12308, 12320), False, 'from nlkda.models.utils import ModelEnum, configure_param_space_nn, create_model_from_config, create_wrapper_from_config, save_model_to_directory\n'), ((12338, 12363), 'nlkda.data.base.DatasetEnum', 'DatasetEnum', (['args.dataset'], {}), '(args.dataset)\n', (12349, 12363), False, 'from nlkda.data.base import DatasetEnum, get_dataset_size\n'), ((12434, 12462), 'pathlib.Path', 'pathlib.Path', (['args.data_root'], {}), '(args.data_root)\n', (12446, 12462), False, 'import pathlib\n'), ((1164, 1183), 'ray.tune.randint', 'tune.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1176, 1183), False, 'from ray import tune\n'), ((1210, 1243), 'nlkda.utils.tune_q_log_uniform', 'tune_q_log_uniform', ([], {'high': '(100)', 'q': '(1)'}), '(high=100, q=1)\n', (1228, 1243), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((4183, 4223), 'nlkda.models.utils.ModelEnum', 'ModelEnum', (["config['model']['model_type']"], {}), "(config['model']['model_type'])\n", (4192, 4223), False, 'from nlkda.models.utils import ModelEnum, configure_param_space_nn, create_model_from_config, create_wrapper_from_config, save_model_to_directory\n'), ((4524, 4578), 'nlkda.models.base.FormulationWrapperEnum', 'FormulationWrapperEnum', (["config['model']['formulation']"], {}), "(config['model']['formulation'])\n", (4546, 4578), False, 'from nlkda.models.base import BoundModel, FormulationWrapperEnum, is_multi_output\n'), ((1340, 1380), 'ray.tune.sample_from', 'tune.sample_from', (['_sample_max_leaf_nodes'], {}), '(_sample_max_leaf_nodes)\n', (1356, 1380), False, 'from ray import tune\n'), ((4308, 4362), 'nlkda.models.base.FormulationWrapperEnum', 'FormulationWrapperEnum', (["config['model']['formulation']"], {}), "(config['model']['formulation'])\n", (4330, 4362), False, 'from nlkda.models.base import BoundModel, FormulationWrapperEnum, is_multi_output\n'), ((8866, 8880), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8878, 8880), False, 'import sys\n'), ((8925, 8939), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (8937, 8939), False, 'import sys\n'), ((10650, 10675), 'nlkda.data.base.get_dataset_size', 'get_dataset_size', (['dataset'], {}), '(dataset)\n', (10666, 10675), False, 'from nlkda.data.base import DatasetEnum, get_dataset_size\n'), ((1476, 1516), 'nlkda.utils.tune_q_log_uniform', 'tune_q_log_uniform', ([], {'low': '(1)', 'high': '(500)', 'q': '(1)'}), '(low=1, high=500, q=1)\n', (1494, 1516), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((1544, 1573), 'ray.tune.loguniform', 'tune.loguniform', (['(0.0001)', '(10.0)'], {}), '(0.0001, 10.0)\n', (1559, 1573), False, 'from ray import tune\n'), ((7799, 7824), 'numpy.argmax', 'np.argmax', (['model.max_diff'], {}), '(model.max_diff)\n', (7808, 7824), True, 'import numpy as np\n'), ((7934, 7959), 'numpy.argmin', 'np.argmin', (['model.min_diff'], {}), '(model.min_diff)\n', (7943, 7959), True, 'import numpy as np\n'), ((1674, 1713), 'nlkda.utils.tune_q_log_uniform', 'tune_q_log_uniform', ([], {'low': '(4)', 'high': '(15)', 'q': '(1)'}), '(low=4, high=15, q=1)\n', (1692, 1713), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((1740, 1773), 'nlkda.utils.tune_q_log_uniform', 'tune_q_log_uniform', ([], {'high': '(500)', 'q': '(1)'}), '(high=500, q=1)\n', (1758, 1773), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((1801, 1830), 'ray.tune.loguniform', 'tune.loguniform', (['(0.0001)', '(10.0)'], {}), '(0.0001, 10.0)\n', (1816, 1830), False, 'from ray import tune\n'), ((10774, 10816), 'nlkda.utils.tune_enum', 'tune_enum', ([], {'enum_cls': 'FormulationWrapperEnum'}), '(enum_cls=FormulationWrapperEnum)\n', (10783, 10816), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((1922, 1942), 'ray.tune.randint', 'tune.randint', (['(10)', '(28)'], {}), '(10, 28)\n', (1934, 1942), False, 'from ray import tune\n'), ((1963, 1981), 'ray.tune.randint', 'tune.randint', (['(2)', '(9)'], {}), '(2, 9)\n', (1975, 1981), False, 'from ray import tune\n'), ((2003, 2014), 'nlkda.utils.tune_bool', 'tune_bool', ([], {}), '()\n', (2012, 2014), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((2041, 2063), 'ray.tune.uniform', 'tune.uniform', (['(0.1)', '(0.5)'], {}), '(0.1, 0.5)\n', (2053, 2063), False, 'from ray import tune\n'), ((2150, 2208), 'ray.tune.choice', 'tune.choice', (["['mean_squared_error', 'mean_absolute_error']"], {}), "(['mean_squared_error', 'mean_absolute_error'])\n", (2161, 2208), False, 'from ray import tune\n'), ((2242, 2253), 'nlkda.utils.tune_bool', 'tune_bool', ([], {}), '()\n', (2251, 2253), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n'), ((2281, 2292), 'nlkda.utils.tune_bool', 'tune_bool', ([], {}), '()\n', (2290, 2292), False, 'from nlkda.utils import MLFlowClient, enum_values, flatten_dict, save_to_file, tune_bool, tune_enum, tune_q_log_uniform\n')] |
import hydra
import hydra.utils as utils
from pathlib import Path
import torch
import numpy as np
from tqdm import tqdm
import soundfile as sf
from model_encoder import Encoder, Encoder_lf0
from model_decoder import Decoder_ac
from model_encoder import SpeakerEncoder as Encoder_spk
import os
import random
from glob import glob
import subprocess
from spectrogram import logmelspectrogram
import kaldiio
import resampy
import pyworld as pw
def select_wavs(paths, min_dur=2, max_dur=8):
pp = []
for p in paths:
x, fs = sf.read(p)
if len(x)/fs>=min_dur and len(x)/fs<=8:
pp.append(p)
return pp
def extract_logmel(wav_path, mean, std, sr=16000):
# wav, fs = librosa.load(wav_path, sr=sr)
wav, fs = sf.read(wav_path)
if fs != sr:
wav = resampy.resample(wav, fs, sr, axis=0)
fs = sr
#wav, _ = librosa.effects.trim(wav, top_db=15)
# duration = len(wav)/fs
assert fs == 16000
peak = np.abs(wav).max()
if peak > 1.0:
wav /= peak
mel = logmelspectrogram(
x=wav,
fs=fs,
n_mels=80,
n_fft=400,
n_shift=160,
win_length=400,
window='hann',
fmin=80,
fmax=7600,
)
mel = (mel - mean) / (std + 1e-8)
tlen = mel.shape[0]
frame_period = 160/fs*1000
f0, timeaxis = pw.dio(wav.astype('float64'), fs, frame_period=frame_period)
f0 = pw.stonemask(wav.astype('float64'), f0, timeaxis, fs)
f0 = f0[:tlen].reshape(-1).astype('float32')
nonzeros_indices = np.nonzero(f0)
lf0 = f0.copy()
lf0[nonzeros_indices] = np.log(f0[nonzeros_indices]) # for f0(Hz), lf0 > 0 when f0 != 0
mean, std = np.mean(lf0[nonzeros_indices]), np.std(lf0[nonzeros_indices])
lf0[nonzeros_indices] = (lf0[nonzeros_indices] - mean) / (std + 1e-8)
return mel, lf0
@hydra.main(config_path="config/convert.yaml")
def convert(cfg):
src_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p225/*mic1.flac') # modified to absolute wavs path, can select any unseen speakers
src_wav_paths = select_wavs(src_wav_paths)
tar1_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p231/*mic1.flac') # can select any unseen speakers
tar2_wav_paths = glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p243/*mic1.flac') # can select any unseen speakers
# tar1_wav_paths = select_wavs(tar1_wav_paths)
# tar2_wav_paths = select_wavs(tar2_wav_paths)
tar1_wav_paths = [sorted(tar1_wav_paths)[0]]
tar2_wav_paths = [sorted(tar2_wav_paths)[0]]
print('len(src):', len(src_wav_paths), 'len(tar1):', len(tar1_wav_paths), 'len(tar2):', len(tar2_wav_paths))
tmp = cfg.checkpoint.split('/')
steps = tmp[-1].split('-')[-1].split('.')[0]
out_dir = f'test/{tmp[-3]}-{tmp[-2]}-{steps}'
out_dir = Path(utils.to_absolute_path(out_dir))
out_dir.mkdir(exist_ok=True, parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
encoder_lf0 = Encoder_lf0()
encoder_spk = Encoder_spk()
decoder = Decoder_ac(dim_neck=64)
encoder.to(device)
encoder_lf0.to(device)
encoder_spk.to(device)
decoder.to(device)
print("Load checkpoint from: {}:".format(cfg.checkpoint))
checkpoint_path = utils.to_absolute_path(cfg.checkpoint)
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
encoder_spk.load_state_dict(checkpoint["encoder_spk"])
decoder.load_state_dict(checkpoint["decoder"])
encoder.eval()
encoder_spk.eval()
decoder.eval()
mel_stats = np.load('./data/mel_stats.npy')
mean = mel_stats[0]
std = mel_stats[1]
feat_writer = kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=str(out_dir)+'/feats.1'))
for i, src_wav_path in tqdm(enumerate(src_wav_paths, 1)):
if i>10:
break
mel, lf0 = extract_logmel(src_wav_path, mean, std)
if i % 2 == 1:
ref_wav_path = random.choice(tar2_wav_paths)
tar = 'tarMale_'
else:
ref_wav_path = random.choice(tar1_wav_paths)
tar = 'tarFemale_'
ref_mel, _ = extract_logmel(ref_wav_path, mean, std)
mel = torch.FloatTensor(mel.T).unsqueeze(0).to(device)
lf0 = torch.FloatTensor(lf0).unsqueeze(0).to(device)
ref_mel = torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)
out_filename = os.path.basename(src_wav_path).split('.')[0]
with torch.no_grad():
z, _, _, _ = encoder.encode(mel)
lf0_embs = encoder_lf0(lf0)
spk_embs = encoder_spk(ref_mel)
output = decoder(z, lf0_embs, spk_embs)
logmel = output.squeeze(0).cpu().numpy()
feat_writer[out_filename] = logmel
feat_writer[out_filename+'_src'] = mel.squeeze(0).cpu().numpy().T
feat_writer[out_filename+'_ref'] = ref_mel.squeeze(0).cpu().numpy().T
subprocess.call(['cp', src_wav_path, out_dir])
feat_writer.close()
print('synthesize waveform...')
cmd = ['parallel-wavegan-decode', '--checkpoint', \
'/vocoder/checkpoint-3000000steps.pkl', \
'--feats-scp', f'{str(out_dir)}/feats.1.scp', '--outdir', str(out_dir)]
subprocess.call(cmd)
if __name__ == "__main__":
convert()
| [
"numpy.log",
"hydra.utils.to_absolute_path",
"torch.cuda.is_available",
"spectrogram.logmelspectrogram",
"model_encoder.SpeakerEncoder",
"resampy.resample",
"numpy.mean",
"hydra.main",
"model_encoder.Encoder",
"subprocess.call",
"glob.glob",
"numpy.abs",
"random.choice",
"numpy.nonzero",
... | [((1921, 1966), 'hydra.main', 'hydra.main', ([], {'config_path': '"""config/convert.yaml"""'}), "(config_path='config/convert.yaml')\n", (1931, 1966), False, 'import hydra\n'), ((751, 768), 'soundfile.read', 'sf.read', (['wav_path'], {}), '(wav_path)\n', (758, 768), True, 'import soundfile as sf\n'), ((1035, 1156), 'spectrogram.logmelspectrogram', 'logmelspectrogram', ([], {'x': 'wav', 'fs': 'fs', 'n_mels': '(80)', 'n_fft': '(400)', 'n_shift': '(160)', 'win_length': '(400)', 'window': '"""hann"""', 'fmin': '(80)', 'fmax': '(7600)'}), "(x=wav, fs=fs, n_mels=80, n_fft=400, n_shift=160,\n win_length=400, window='hann', fmin=80, fmax=7600)\n", (1052, 1156), False, 'from spectrogram import logmelspectrogram\n'), ((1620, 1634), 'numpy.nonzero', 'np.nonzero', (['f0'], {}), '(f0)\n', (1630, 1634), True, 'import numpy as np\n'), ((1683, 1711), 'numpy.log', 'np.log', (['f0[nonzeros_indices]'], {}), '(f0[nonzeros_indices])\n', (1689, 1711), True, 'import numpy as np\n'), ((2005, 2071), 'glob.glob', 'glob', (['"""/Dataset/VCTK-Corpus/wav48_silence_trimmed/p225/*mic1.flac"""'], {}), "('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p225/*mic1.flac')\n", (2009, 2071), False, 'from glob import glob\n'), ((2210, 2276), 'glob.glob', 'glob', (['"""/Dataset/VCTK-Corpus/wav48_silence_trimmed/p231/*mic1.flac"""'], {}), "('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p231/*mic1.flac')\n", (2214, 2276), False, 'from glob import glob\n'), ((2331, 2397), 'glob.glob', 'glob', (['"""/Dataset/VCTK-Corpus/wav48_silence_trimmed/p243/*mic1.flac"""'], {}), "('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p243/*mic1.flac')\n", (2335, 2397), False, 'from glob import glob\n'), ((3074, 3102), 'model_encoder.Encoder', 'Encoder', ([], {}), '(**cfg.model.encoder)\n', (3081, 3102), False, 'from model_encoder import Encoder, Encoder_lf0\n'), ((3121, 3134), 'model_encoder.Encoder_lf0', 'Encoder_lf0', ([], {}), '()\n', (3132, 3134), False, 'from model_encoder import Encoder, Encoder_lf0\n'), ((3153, 3166), 'model_encoder.SpeakerEncoder', 'Encoder_spk', ([], {}), '()\n', (3164, 3166), True, 'from model_encoder import SpeakerEncoder as Encoder_spk\n'), ((3181, 3204), 'model_decoder.Decoder_ac', 'Decoder_ac', ([], {'dim_neck': '(64)'}), '(dim_neck=64)\n', (3191, 3204), False, 'from model_decoder import Decoder_ac\n'), ((3390, 3428), 'hydra.utils.to_absolute_path', 'utils.to_absolute_path', (['cfg.checkpoint'], {}), '(cfg.checkpoint)\n', (3412, 3428), True, 'import hydra.utils as utils\n'), ((3446, 3516), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': '(lambda storage, loc: storage)'}), '(checkpoint_path, map_location=lambda storage, loc: storage)\n', (3456, 3516), False, 'import torch\n'), ((3761, 3792), 'numpy.load', 'np.load', (['"""./data/mel_stats.npy"""'], {}), "('./data/mel_stats.npy')\n", (3768, 3792), True, 'import numpy as np\n'), ((5462, 5482), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (5477, 5482), False, 'import subprocess\n'), ((540, 550), 'soundfile.read', 'sf.read', (['p'], {}), '(p)\n', (547, 550), True, 'import soundfile as sf\n'), ((800, 837), 'resampy.resample', 'resampy.resample', (['wav', 'fs', 'sr'], {'axis': '(0)'}), '(wav, fs, sr, axis=0)\n', (816, 837), False, 'import resampy\n'), ((1763, 1793), 'numpy.mean', 'np.mean', (['lf0[nonzeros_indices]'], {}), '(lf0[nonzeros_indices])\n', (1770, 1793), True, 'import numpy as np\n'), ((1795, 1824), 'numpy.std', 'np.std', (['lf0[nonzeros_indices]'], {}), '(lf0[nonzeros_indices])\n', (1801, 1824), True, 'import numpy as np\n'), ((2904, 2935), 'hydra.utils.to_absolute_path', 'utils.to_absolute_path', (['out_dir'], {}), '(out_dir)\n', (2926, 2935), True, 'import hydra.utils as utils\n'), ((5154, 5200), 'subprocess.call', 'subprocess.call', (["['cp', src_wav_path, out_dir]"], {}), "(['cp', src_wav_path, out_dir])\n", (5169, 5200), False, 'import subprocess\n'), ((968, 979), 'numpy.abs', 'np.abs', (['wav'], {}), '(wav)\n', (974, 979), True, 'import numpy as np\n'), ((3021, 3046), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3044, 3046), False, 'import torch\n'), ((4145, 4174), 'random.choice', 'random.choice', (['tar2_wav_paths'], {}), '(tar2_wav_paths)\n', (4158, 4174), False, 'import random\n'), ((4245, 4274), 'random.choice', 'random.choice', (['tar1_wav_paths'], {}), '(tar1_wav_paths)\n', (4258, 4274), False, 'import random\n'), ((4662, 4677), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4675, 4677), False, 'import torch\n'), ((4603, 4633), 'os.path.basename', 'os.path.basename', (['src_wav_path'], {}), '(src_wav_path)\n', (4619, 4633), False, 'import os\n'), ((4390, 4414), 'torch.FloatTensor', 'torch.FloatTensor', (['mel.T'], {}), '(mel.T)\n', (4407, 4414), False, 'import torch\n'), ((4453, 4475), 'torch.FloatTensor', 'torch.FloatTensor', (['lf0'], {}), '(lf0)\n', (4470, 4475), False, 'import torch\n'), ((4518, 4546), 'torch.FloatTensor', 'torch.FloatTensor', (['ref_mel.T'], {}), '(ref_mel.T)\n', (4535, 4546), False, 'import torch\n')] |
import numpy as np
from nms import nms
import cfg
from shapely.geometry import Polygon
class Averager(object):
"""Compute average for torch.Tensor, used for loss average."""
def __init__(self):
self.reset()
def add(self, v):
count = v.data.numel()
v = v.data.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
class eval_pre_rec_f1(object):
'''输入每个batch的预测结果跟图片真实矩形框,计算查准率precision/召回率recall/F1 score'''
def __init__(self):
self.pixel_threshold = float(cfg.pixel_threshold)
self.reset()
def reset(self):
self.img_num = 0
self.pre = 0
self.rec = 0
self.f1_score = 0
def val(self):
mpre = self.pre / self.img_num * 100
mrec = self.rec / self.img_num * 100
mf1_score = self.f1_score / self.img_num * 100
return mpre, mrec, mf1_score
def sigmoid(self, x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def get_iou(self, g, p):
g = Polygon(g)
p = Polygon(p)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter/union
def eval_one(self, quad_scores, quad_after_nms, gt_xy, quiet=cfg.quiet):
num_gts = len(gt_xy)
quad_scores_no_zero = [] # 剔除残缺quad,并储存每个quad的score
quad_after_nms_no_zero = [] # 剔除残缺quad
for score, geo in zip(quad_scores, quad_after_nms):
if np.amin(score) > 0:
quad_scores_no_zero.append(sum(score))
quad_after_nms_no_zero.append(geo)
elif not quiet:
print('quad invalid with vertex num less then 4.')
continue
num_quads = len(quad_after_nms_no_zero)
if num_quads == 0:
return 0, 0, 0
quad_flag = np.zeros(num_quads) # 记录quad是否被匹配
gt_flag = np.zeros(num_gts) # 记录gt是否被匹配
# print(num_quads, '-------', num_gts)
quad_scores_no_zero = np.array(quad_scores_no_zero)
scores_idx = np.argsort(quad_scores_no_zero)[::-1] # 记录quad_scores从大到小坐标
for i in range(num_quads):
idx = scores_idx[i]
# score = quad_scores_no_zero[idx]
geo = quad_after_nms_no_zero[idx] # 按score值从大到小依次取出对应矩形框
for j in range(num_gts):
if gt_flag[j] == 0:
gt_geo = gt_xy[j]
iou = self.get_iou(geo, gt_geo)
if iou >= cfg.iou_threshold:
gt_flag[j] = 1 # 记录被匹配的gt框
quad_flag[i] = 1 # 记录被匹配的quad框
tp = np.sum(quad_flag)
fp = num_quads - tp
fn = num_gts - tp
pre = tp / (tp + fp) # 查准率
rec = tp / (tp + fn) # 查全率
if pre + rec == 0:
f1_score = 0
else:
f1_score = 2 * pre * rec / (pre + rec)
# print(pre, '---', rec, '---', f1_score)
return pre, rec, f1_score
def add(self, out, gt_xy_list):
self.img_num += len(gt_xy_list)
ys = out.cpu().detach().numpy() # (N, 7, 64, 64)
if ys.shape[1] == 7:
ys = ys.transpose((0, 2, 3, 1)) # NCHW->NHWC
for y, gt_xy in zip(ys, gt_xy_list): # 取出每张图片的预测结果与矩形框
y[:, :, :3] = self.sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], self.pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
# nms返回的quad_scores为:[[a, a, b, b], [c, c, d, d]...]
# 每个矩形框返回四个score,四个score中头两个相同,后两个相同分别代表头部跟尾部的分数
if (len(quad_after_nms) == 0) or (sum(sum(quad_scores)) == 0):
if not cfg.quiet:
print('NMS后不存在矩形框!!')
continue
else:
pre, rec, f1_score = self.eval_one(quad_scores, quad_after_nms, gt_xy)
self.pre += pre
self.rec += rec
self.f1_score += f1_score
| [
"numpy.amin",
"numpy.where",
"numpy.argsort",
"numpy.array",
"shapely.geometry.Polygon",
"numpy.sum",
"numpy.zeros",
"nms.nms",
"numpy.exp",
"numpy.greater_equal"
] | [((1223, 1233), 'shapely.geometry.Polygon', 'Polygon', (['g'], {}), '(g)\n', (1230, 1233), False, 'from shapely.geometry import Polygon\n'), ((1246, 1256), 'shapely.geometry.Polygon', 'Polygon', (['p'], {}), '(p)\n', (1253, 1256), False, 'from shapely.geometry import Polygon\n'), ((2168, 2187), 'numpy.zeros', 'np.zeros', (['num_quads'], {}), '(num_quads)\n', (2176, 2187), True, 'import numpy as np\n'), ((2221, 2238), 'numpy.zeros', 'np.zeros', (['num_gts'], {}), '(num_gts)\n', (2229, 2238), True, 'import numpy as np\n'), ((2329, 2358), 'numpy.array', 'np.array', (['quad_scores_no_zero'], {}), '(quad_scores_no_zero)\n', (2337, 2358), True, 'import numpy as np\n'), ((2959, 2976), 'numpy.sum', 'np.sum', (['quad_flag'], {}), '(quad_flag)\n', (2965, 2976), True, 'import numpy as np\n'), ((2380, 2411), 'numpy.argsort', 'np.argsort', (['quad_scores_no_zero'], {}), '(quad_scores_no_zero)\n', (2390, 2411), True, 'import numpy as np\n'), ((3661, 3711), 'numpy.greater_equal', 'np.greater_equal', (['y[:, :, 0]', 'self.pixel_threshold'], {}), '(y[:, :, 0], self.pixel_threshold)\n', (3677, 3711), True, 'import numpy as np\n'), ((3744, 3758), 'numpy.where', 'np.where', (['cond'], {}), '(cond)\n', (3752, 3758), True, 'import numpy as np\n'), ((3801, 3826), 'nms.nms', 'nms', (['y', 'activation_pixels'], {}), '(y, activation_pixels)\n', (3804, 3826), False, 'from nms import nms\n'), ((1169, 1179), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1175, 1179), True, 'import numpy as np\n'), ((1363, 1373), 'shapely.geometry.Polygon', 'Polygon', (['p'], {}), '(p)\n', (1370, 1373), False, 'from shapely.geometry import Polygon\n'), ((1800, 1814), 'numpy.amin', 'np.amin', (['score'], {}), '(score)\n', (1807, 1814), True, 'import numpy as np\n'), ((1339, 1349), 'shapely.geometry.Polygon', 'Polygon', (['g'], {}), '(g)\n', (1346, 1349), False, 'from shapely.geometry import Polygon\n')] |
import numpy as np
from sklearn.utils import class_weight
from sklearn.metrics import accuracy_score
from sklearn.model_selection import KFold
from sklearn.base import clone
import optuna
from pathlib import Path
import os
class Objective:
def __init__(self, classifier, parameter_distributions, cv, x, y, class_weights, sample_weights):
self.classifier = classifier
self.parameter_distributions = parameter_distributions
self.cv = cv
self.x = x
self.y = y
self.class_weights = class_weights
self.sample_weights = sample_weights
def __call__(self, trial):
parameters = {name: trial._suggest(name, distribution) for name, distribution in
self.parameter_distributions.items()}
score = 0.0
for X_train, X_test in KFold(self.cv, shuffle=False).split(self.x):
train_x_fold, train_y_fold = self.x[X_train], self.y[X_train]
test_x_fold, test_y_fold = self.x[X_test], self.y[X_test]
self.classifier.set_params(**parameters)
if hasattr(self.classifier, "name") and self.classifier.name == "keras_model":
self.classifier.fit(train_x_fold, train_y_fold, self.class_weights, test_x_fold, test_y_fold)
else:
# clone un-fit classifier
self.classifier = clone(self.classifier)
self.classifier.fit(train_x_fold, train_y_fold, sample_weight=self.sample_weights[X_train])
test_y_fold_pred = self.classifier.predict(test_x_fold)
score -= accuracy_score(y_true=test_y_fold, y_pred=test_y_fold_pred)
return score / self.cv
class OptunaCrossValidationSearch:
""" A base class implementing cross validation
Parameters:
"""
"""
X (array): the array of features
y (array): the array of targets
balance (str): weighting scheme ('equal' or None)
cv: the number of cross validations
"""
def __init__(self, classifier, parameter_distributions, cv_folds, n_trials, sample_weight_balance):
self.classifier = classifier
self.parameter_distributions = parameter_distributions
self.cv_folds = cv_folds
self.n_trials = n_trials
self.sample_weight_balance = sample_weight_balance
def optuna_get_study(self, remove_storage=True):
model_name = type(self.classifier).__name__
study_name = model_name + "_optimization"
file_storage_name = model_name + ".sqlite"
storage = "sqlite:///" + file_storage_name
storage_path = Path(file_storage_name)
if remove_storage and storage_path.is_file():
os.remove(file_storage_name)
return optuna.create_study(study_name=study_name, load_if_exists=True, storage=storage)
def fit(self, x, y):
x = np.array(x)
y = np.array(y)
unique_values = np.unique(y)
class_weights = class_weight.compute_class_weight(self.sample_weight_balance, unique_values, y)
class_weights = {i: class_weights[i] for i in range(len(unique_values))}
sample_weights = np.zeros(len(y), dtype=np.float)
for i, val in enumerate(y):
for j, unique_val in enumerate(unique_values):
if val == unique_val:
sample_weights[i] = class_weights[j]
break
objective = Objective(self.classifier,
self.parameter_distributions,
self.cv_folds,
x,
y,
class_weights,
sample_weights)
study = self.optuna_get_study(remove_storage=True)
print("Searching the best hyperparameters...")
study.optimize(objective, n_trials=self.n_trials)
print("Finished searching the best hyperparameters...")
study = self.optuna_get_study(remove_storage=False)
self.classifier.set_params(**study.best_params)
if hasattr(self.classifier, "name") and self.classifier.name == "keras_model":
self.classifier.fit(x, y, class_weight=class_weights)
else:
self.classifier = clone(self.classifier)
self.classifier.fit(x, y, sample_weight=sample_weights)
return self
def predict(self, x):
return self.classifier.predict(x)
| [
"numpy.unique",
"pathlib.Path",
"sklearn.base.clone",
"sklearn.utils.class_weight.compute_class_weight",
"numpy.array",
"sklearn.model_selection.KFold",
"sklearn.metrics.accuracy_score",
"optuna.create_study",
"os.remove"
] | [((2589, 2612), 'pathlib.Path', 'Path', (['file_storage_name'], {}), '(file_storage_name)\n', (2593, 2612), False, 'from pathlib import Path\n'), ((2725, 2810), 'optuna.create_study', 'optuna.create_study', ([], {'study_name': 'study_name', 'load_if_exists': '(True)', 'storage': 'storage'}), '(study_name=study_name, load_if_exists=True, storage=storage\n )\n', (2744, 2810), False, 'import optuna\n'), ((2845, 2856), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2853, 2856), True, 'import numpy as np\n'), ((2869, 2880), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2877, 2880), True, 'import numpy as np\n'), ((2906, 2918), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2915, 2918), True, 'import numpy as np\n'), ((2943, 3022), 'sklearn.utils.class_weight.compute_class_weight', 'class_weight.compute_class_weight', (['self.sample_weight_balance', 'unique_values', 'y'], {}), '(self.sample_weight_balance, unique_values, y)\n', (2976, 3022), False, 'from sklearn.utils import class_weight\n'), ((1587, 1646), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'test_y_fold', 'y_pred': 'test_y_fold_pred'}), '(y_true=test_y_fold, y_pred=test_y_fold_pred)\n', (1601, 1646), False, 'from sklearn.metrics import accuracy_score\n'), ((2680, 2708), 'os.remove', 'os.remove', (['file_storage_name'], {}), '(file_storage_name)\n', (2689, 2708), False, 'import os\n'), ((4244, 4266), 'sklearn.base.clone', 'clone', (['self.classifier'], {}), '(self.classifier)\n', (4249, 4266), False, 'from sklearn.base import clone\n'), ((827, 856), 'sklearn.model_selection.KFold', 'KFold', (['self.cv'], {'shuffle': '(False)'}), '(self.cv, shuffle=False)\n', (832, 856), False, 'from sklearn.model_selection import KFold\n'), ((1366, 1388), 'sklearn.base.clone', 'clone', (['self.classifier'], {}), '(self.classifier)\n', (1371, 1388), False, 'from sklearn.base import clone\n')] |
import numpy as np
from sacred import Ingredient
config_ingredient = Ingredient("cfg")
@config_ingredient.config
def cfg():
# Base configuration
model_config = {"musdb_path" : "/home/daniel/Datasets/MUSDB18", # SET MUSDB PATH HERE, AND SET CCMIXTER PATH IN CCMixter.xml
"estimates_path" : "/mnt/windaten/Source_Estimates", # SET THIS PATH TO WHERE YOU WANT SOURCE ESTIMATES PRODUCED BY THE TRAINED MODEL TO BE SAVED. Folder itself must exist!
"model_base_dir" : "checkpoints", # Base folder for model checkpoints
"log_dir" : "logs", # Base folder for logs files
"batch_size" : 16, # Batch size
"init_sup_sep_lr" : 1e-4, # Supervised separator learning rate
"epoch_it" : 2000, # Number of supervised separator steps per epoch
'cache_size' : 16, # Number of audio excerpts that are cached to build batches from
'num_workers' : 6, # Number of processes reading audio and filling up the cache
"duration" : 2, # Duration in seconds of the audio excerpts in the cache. Has to be at least the output length of the network!
'min_replacement_rate' : 16, # roughly: how many cache entries to replace at least per batch on average. Can be fractional
'num_layers' : 12, # How many U-Net layers
'filter_size' : 15, # For Wave-U-Net: Filter size of conv in downsampling block
'merge_filter_size' : 5, # For Wave-U-Net: Filter size of conv in upsampling block
'num_initial_filters' : 24, # Number of filters for convolution in first layer of network
"num_frames": 16384, # DESIRED number of time frames in the output waveform per samples (could be changed when using valid padding)
'expected_sr': 22050, # Downsample all audio input to this sampling rate
'mono_downmix': True, # Whether to downsample the audio input
'output_type' : 'direct', # Type of output layer, either "direct" or "difference". Direct output: Each source is result of tanh activation and independent. DIfference: Last source output is equal to mixture input - sum(all other sources)
'context' : False, # Type of padding for convolutions in separator. If False, feature maps double or half in dimensions after each convolution, and convolutions are padded with zeros ("same" padding). If True, convolution is only performed on the available mixture input, thus the output is smaller than the input
'network' : 'unet', # Type of network architecture, either unet (our model) or unet_spectrogram (Jansson et al 2017 model)
'upsampling' : 'linear', # Type of technique used for upsampling the feature maps in a unet architecture, either 'linear' interpolation or 'learned' filling in of extra samples
'task' : 'voice', # Type of separation task. 'voice' : Separate music into voice and accompaniment. 'multi_instrument': Separate music into guitar, bass, vocals, drums and other (Sisec)
'augmentation' : True, # Random attenuation of source signals to improve generalisation performance (data augmentation)
'raw_audio_loss' : True, # Only active for unet_spectrogram network. True: L2 loss on audio. False: L1 loss on spectrogram magnitudes for training and validation and test loss
'worse_epochs' : 20, # Patience for early stoppping on validation set
}
seed=1337
experiment_id = np.random.randint(0,1000000)
model_config["num_sources"] = 4 if model_config["task"] == "multi_instrument" else 2
model_config["num_channels"] = 1 if model_config["mono_downmix"] else 2
@config_ingredient.named_config
def baseline():
print("Training baseline model")
@config_ingredient.named_config
def baseline_diff():
print("Training baseline model with difference output")
model_config = {
"output_type" : "difference"
}
@config_ingredient.named_config
def baseline_context():
print("Training baseline model with difference output and input context (valid convolutions)")
model_config = {
"output_type" : "difference",
"context" : True
}
@config_ingredient.named_config
def baseline_stereo():
print("Training baseline model with difference output and input context (valid convolutions)")
model_config = {
"output_type" : "difference",
"context" : True,
"mono_downmix" : False
}
@config_ingredient.named_config
def full():
print("Training full singing voice separation model, with difference output and input context (valid convolutions) and stereo input/output, and learned upsampling layer")
model_config = {
"output_type" : "difference",
"context" : True,
"upsampling": "learned",
"mono_downmix" : False
}
@config_ingredient.named_config
def full_44KHz():
print("Training full singing voice separation model, with difference output and input context (valid convolutions) and stereo input/output, and learned upsampling layer, and 44.1 KHz sampling rate")
model_config = {
"output_type" : "difference",
"context" : True,
"upsampling": "learned",
"mono_downmix" : False,
"expected_sr" : 44100
}
@config_ingredient.named_config
def baseline_context_smallfilter_deep():
model_config = {
"output_type": "difference",
"context": True,
"num_layers" : 14,
"duration" : 7,
"filter_size" : 5,
"merge_filter_size" : 1
}
@config_ingredient.named_config
def full_multi_instrument():
print("Training multi-instrument separation with best model")
model_config = {
"output_type": "difference",
"context": True,
"upsampling": "linear",
"mono_downmix": False,
"task" : "multi_instrument"
}
@config_ingredient.named_config
def baseline_comparison():
model_config = {
"batch_size": 4, # Less output since model is so big. Doesn't matter since the model's output is not dependent on its output or input size (only convolutions)
"cache_size": 4,
"min_replacement_rate" : 4,
"output_type": "difference",
"context": True,
"num_frames" : 768*127 + 1024,
"duration" : 13,
"expected_sr" : 8192,
"num_initial_filters" : 34
}
@config_ingredient.named_config
def unet_spectrogram():
model_config = {
"batch_size": 4, # Less output since model is so big.
"cache_size": 4,
"min_replacement_rate" : 4,
"network" : "unet_spectrogram",
"num_layers" : 6,
"expected_sr" : 8192,
"num_frames" : 768 * 127 + 1024, # hop_size * (time_frames_of_spectrogram_input - 1) + fft_length
"duration" : 13,
"num_initial_filters" : 16
}
@config_ingredient.named_config
def unet_spectrogram_l1():
model_config = {
"batch_size": 4, # Less output since model is so big.
"cache_size": 4,
"min_replacement_rate" : 4,
"network" : "unet_spectrogram",
"num_layers" : 6,
"expected_sr" : 8192,
"num_frames" : 768 * 127 + 1024, # hop_size * (time_frames_of_spectrogram_input - 1) + fft_length
"duration" : 13,
"num_initial_filters" : 16,
"loss" : "magnitudes"
} | [
"numpy.random.randint",
"sacred.Ingredient"
] | [((70, 87), 'sacred.Ingredient', 'Ingredient', (['"""cfg"""'], {}), "('cfg')\n", (80, 87), False, 'from sacred import Ingredient\n'), ((3676, 3705), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000000)'], {}), '(0, 1000000)\n', (3693, 3705), True, 'import numpy as np\n')] |
# Reverse photography
##h3D-II sensor size
# 36 * 48 mm, 0.036 x 0.048m
## focal length
# 28mm, 0.028m
## multiplier
# 1.0
from skimage import io
import matplotlib.pyplot as plt
import numpy as np
import cv2
from scipy.spatial import distance
import shapefile as shp
def buildshape(corners, filename):
"""build a shapefile geometry from the vertices of the image in
world coordinates, then save it using the image name. Sub critical"""
#create a shapefile instance
#shape = shp.writer(shape.POLYGON)
#shape.poly(parts = [[proj_coords[:,0], proj_coords[:,1]], [proj_coords[:,1], proj_coords[:,2]]
# [proj_coords[:,3], proj_coords[:,2]], [proj_coords[:,0], proj_coords[:,3]]]
#shape.save("./", filename)
def worldfile(corners, im_pix, filename, filepath):
"""build a world file from the vertices of the image in
world coordinates, then save it using the image name.
here we build a small array and then dump it to a file
input is:
- the image file name
- projected corners in world coordinates (*not* bounding box)
- pxel resolution as a two-element vector [pix_x, pix_y]
- path to warped image files
reference:
http://support.esri.com/en/knowledgebase/techarticles/detail/17489
"""
world_arr = np.zeros([6,1])
#line 1 is the X pixel resolution in M
world_arr[0] = im_pix[0]
#line 2 is the Y pixel resolution in M
world_arr[3] = -im_pix[1]
#now the X coord of the top left corner
world_arr[4] = np.min(corners[0,:])
#and the Y coordinate of the top left corner
world_arr[5] = np.max(corners[1,:])
#strip some parts from the filename
filename = filename[0:len(filename)-4]
np.savetxt(filepath + filename + '.jpgw', world_arr, "%.3f")
#------
# 2D homogeneous vectors and transformations
def hom2(x, y):
"""2D homogeneous column vector."""
return np.matrix([x, y, 1]).T
def scale2d(s_x, s_y):
"""Scale matrix that scales 2D homogeneous coordinates"""
return np.matrix([[s_x, 0, 0],
[0, s_y, 0],
[0, 0, 1]] )
def trans2d(t_x, t_y):
"""Translation matrix that moves a (homogeneous) vector [v_x, v_y, 1]
to [v_x + t_x, v_y + t_y, 1]"""
return np.matrix([[1, 0, t_x],
[0, 1, t_y],
[0, 0, 1]] )
#-----
# 3D homogeneous vectors and transformations
def hom3(x, y, z):
"""3D homogeneous column vector."""
return np.matrix([x, y, z, 1]).T
def unhom(v):
"""Convert homogeneous coords (v_x, v_y, v_z, v_w) to 3D by
(v_x, v_y, v_z) / v_w."""
return v[:-1]/v[-1]
def trans3d(t):
"""Translation matrix that moves a (homogeneous) vector [v_x, v_y, v_z, 1]
to [v_x + t_x, v_y + t_y, v_z + t_z, 1]."""
I = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])
return np.hstack([I, t])
# return np.matrix([[1, 0, 0, t_x],
# [0, 1, 0, t_y],
# [0, 0, 1, t_z],
# [0, 0, 0, 1 ]] )
def persp3d(v_x, v_y, v_z):
"""Perspective transformation in homogeneous coordinates
where v represents the viewer's position relative to the
display surface (i.e., (v_x, v_y) is centre, v_z is focal
distance."""
return np.matrix([[1, 0, -v_x/v_z, 0],
[0, 1, -v_y/v_z, 0],
[0, 0, 1, 0],
[0, 0, 1/v_z, 0]] )
def cross(a, b):
"""Compute 3D cross product of homogeneous vectors, returning
the result as a 3D column vector."""
a3, b3 = unhom(a), unhom(b)
return np.matrix(np.cross(a3.T, b3.T)).T
# Compute zero point in hom. coords
ZERO = hom3(0,0,0)
#-------
# Homogeneous lines, planes, and their intersections
# Based on the discussion here:
# http://math.stackexchange.com/questions/400268/equation-for-a-line-through-a-plane-in-homogeneous-coordinates
def line(v, x):
"""A homoegeneous line with direction v through a point x is the pair
(v, x `cross` v)."""
return np.vstack([unhom(v), cross(x, v)])
def plane(n, x):
"""A plane with normal n passing through x is represented homogeneously
by (n, -x.n)."""
n3 = unhom(n)
x3 = unhom(x)
return np.vstack([n3, -(x3.T * n3)])
def meet(P):
"""Compute the meet operator for the given plane W."""
n, d = P[:3], P[3].item(0,0)
nx = np.matrix([[0, -n[2], -n[1]],
[n[2], 0, -n[0]],
[-n[1], n[0], 0]])
left = np.vstack([np.diag([-d, -d, -d]), n.T])
right = np.vstack([nx, np.matrix([0, 0, 0])])
return np.hstack([left, right])
def intersect(L, P):
"""Compute the point of intersection between the line L and plane P.
Returned point is homogenous."""
return meet(P) * L
#-------
# Camera
class Attitude:
def __init__(self, heading, pitch, roll):
"""Construct a new attitude. Input in degrees, stored in radians.
ADS: adjusted heading by 180 to keep corner procession intact.
TL_im -> TL_grnd, TR_im -> TR_gnd, BR_im -> BR_gnd, BL_im -> BR_gnd
"""
self.heading = heading * np.pi / 180.0
self.pitch = pitch * np.pi / 180.0
self.roll = roll * np.pi / 180.0
def rotation(self):
"""4 x 4 rotation matrix for 3D hom. vectors for this attitude."""
heading, pitch, roll = self.heading, self.pitch, self.roll
RX = np.matrix(
[[1, 0, 0, 0],
[0, np.cos(pitch), -np.sin(pitch), 0],
[0, np.sin(pitch), np.cos(pitch), 0],
[0, 0, 0, 1]] )
RY = np.matrix(
[[np.cos(roll), 0, np.sin(roll), 0],
[0, 1, 0, 0],
[-np.sin(roll), 0, np.cos(roll), 0],
[0, 0, 0, 1]] )
RZ = np.matrix(
[[np.cos(heading), -np.sin(heading), 0, 0],
[np.sin(heading), np.cos(heading), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]] )
return RZ * RY * RX
"""original rotations
RX = np.matrix(
[[1, 0, 0, 0],
[0, np.cos(roll), -np.sin(roll), 0],
[0, np.sin(roll), np.cos(roll), 0],
[0, 0, 0, 1]] )
RY = np.matrix(
[[np.cos(pitch), 0, np.sin(pitch), 0],
[0, 1, 0, 0],
[-np.sin(pitch), 0, np.cos(pitch), 0],
[0, 0, 0, 1]] )
"""
class Sensor:
def __init__(self, pixel_dim, sensor_dim, focal_length):
"""New sensor of given focal length, sensor dimensions (width, height)
in metres, and pixel dimensions (width, height)."""
self.pixels = pixel_dim
self.screen = sensor_dim
self.f = focal_length
def pixel_to_screen(self):
"""Returns a 3x3 matrix transformation that takes 2D hom. pixel
coordinates to 2D hom. sensor coordinates."""
px,sc = self.pixels, self.screen
T_centre = trans2d(-px[0]/2, -px[1]/2)
T_scale = scale2d(sc[0]/px[0], sc[1]/px[1])
return T_scale * T_centre
def fov_angle(self):
"""Get the FOV angle for this sensor."""
return 2 * np.arctan(self.w / (2 * self.f))
class Camera:
def __init__(self, position, attitude, sensor):
self.position = position
self.attitude = attitude
self.sensor = sensor
def pix_to_world(self):
"""Compute the matrix transform from image to world coordinates.
Returns a 4 x 3 matrix that converts 2D hom. coords to 3D hom. coords."""
T_px_to_sc = self.sensor.pixel_to_screen()
T_2d3d = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 1]],
dtype = 'float64' )
T_trans_s = trans3d(hom3(0, 0, self.sensor.f))
T_att = self.attitude.rotation()
T_trans_w = trans3d(self.position)
return T_trans_w * T_att * T_trans_s * T_2d3d * T_px_to_sc
def project(self, r, P):
"""Calculate (unhom.) 3D point on plane P that corresponds to pixel
coordinate given in hom. 2D coords by r."""
# Contruct a line through pixel position in world and camera in world
r_w = self.pix_to_world() * r
v = ZERO + self.position - r_w
L = line(v, r_w)
# Get location of project on surface of plane
return unhom(intersect(L, P))
#===========
# Set up example camera and ground plane
# Camera sensor is 640 x 480 pixel sensor that is 48mm x 36mm with focal length
# of 28mm and is located at (1000, 1000, 100) with a pitch of -15 degrees.
#
#test data for image 20121023_f13_0044.jpg
# local XYZ
#74.81761600 -55.15724800 303.97706400
#HPR
#306.977 -3.119 1.668
#need to know LiDAR mean range for the flight - let's say it is -30m
# relative to the ellipsoid... so we add that to aircraft Z.
xpix = 8176
ypix = 6132
sensor_x = 0.048
sensor_y = 0.036
focal_len = 0.028
# Set up corners of the image in pixel coordinates
botleft = hom2(0, 6132)
topleft = hom2(0, 0)
botright = hom2(8176, 6132)
topright =hom2(8176, 0)
raw_coords = np.hstack([topleft, topright, botright, botleft])
print("Pixel Coordinates:\n{}".format(raw_coords))
# Ground plane is z=0
ground = plane(hom3(0,0,1), hom3(0,0,0))
im_dir = '../SIPEX2_f9_test/'
trajectory_file = '../SIPEX2_f9_test/20121003_f9_survey_testims_utm.txt'
imlist = '../SIPEX2_f9_test/imnames.txt'
lidar_z = -2.5
# this will be replaced by an estimate for each camera centre...
cameracentres = np.genfromtxt(trajectory_file)
#apply any boresight misalignment information...
# from the 2012 camberidge calibration flight.
#can be determined empirically if you have time!
h_adj = -1.93
p_adj = 1.8292
r_adj = 1.2262
cameracentres[:,6] = cameracentres[:,6] + h_adj
cameracentres[:,5] = cameracentres[:,5] + p_adj
cameracentres[:,4] = cameracentres[:,4] + r_adj
with open(imlist) as f:
image_list = f.read().splitlines()
i = 0
#now to time-dependent things...
for image in image_list:
flight_x = cameracentres[i,1]
flight_y = cameracentres[i,2]
flight_z = cameracentres[i,3]
flight_h = cameracentres[i,6]
flight_p = cameracentres[i,5]
flight_r = cameracentres[i,4]
range_to_ground = flight_z - lidar_z;
print("camera E:\n{}".format(flight_x))
print("camera N:\n{}".format(flight_y))
print("camera U:\n{}".format(range_to_ground))
print("camera H:\n{}".format(flight_h))
print("camera P:\n{}".format(flight_p))
print("camera R:\n{}".format(flight_r))
camera = Camera(
hom3(flight_x, flight_y, range_to_ground),
Attitude(flight_h - 180, flight_p, flight_r),
Sensor((xpix, ypix), (sensor_x, sensor_y), focal_len))
proj_coords = np.hstack([
camera.project(topleft, ground), camera.project(topright, ground),
camera.project(botright, ground), camera.project(botleft, ground)
])
print("Ground Coordinates:\n{}".format(proj_coords))
##now we have some ground coordinates to make projection data, we
#make a display image
# a translation and scale. First, scale
#length of each side...
toplen = distance.euclidean(proj_coords[:,0], proj_coords[:,1])
rightlen = distance.euclidean(proj_coords[:,1], proj_coords[:,2])
botlen = distance.euclidean(proj_coords[:,3], proj_coords[:,2])
leftlen = distance.euclidean(proj_coords[:,0], proj_coords[:,3])
worldres_top = toplen/xpix
worldres_bot = botlen/xpix
worldres_x = np.mean([worldres_top, worldres_bot])
print("mean X pixel resolution:\n{}".format(worldres_x))
worldres_left = leftlen/ypix
worldres_right = rightlen/ypix
worldres_y = np.mean([worldres_left, worldres_right])
print("mean Y pixel resolution:\n{}".format(worldres_y))
#display_coords
world_bbox_x = np.ceil(np.max(proj_coords[0,:]) - np.abs(np.min(proj_coords[0,:])))
world_bbox_y = np.ceil(np.max(proj_coords[1,:]) - np.abs(np.min(proj_coords[1,:])))
print("world bbox X:\n{}".format(world_bbox_x))
print("world bbox Y:\n{}".format(world_bbox_y))
pix_bbox_x = np.ceil(world_bbox_x / worldres_x)
pix_bbox_y = np.ceil(world_bbox_y / worldres_y)
print("pixel bbox X:\n{}".format(pix_bbox_x))
print("pixel bbox Y:\n{}".format(pix_bbox_y))
##problem here - we're not projecting from the centroid of a bounding box...
##so we start with an arbitrary x/y
camera2 = Camera(
#hom3(pix_bbox_x/2, pix_bbox_y/2, flight_z/worldres_x),
hom3(10000, 10000, flight_z/worldres_x),
Attitude(flight_h - 180, flight_p, flight_r),
Sensor((xpix, ypix), (sensor_x, sensor_y), focal_len))
im_plot_coords = np.hstack([
camera2.project(topleft, ground), camera2.project(topright, ground),
camera2.project(botright, ground), camera2.project(botleft, ground)
])
##and compute a bounding box from the pixel dimensions in im_pot_coords
pix_bbox_x = np.max(np.ceil(im_plot_coords[0,:])) - np.min(np.floor(im_plot_coords[0,:]))
pix_bbox_y = np.max(np.ceil(im_plot_coords[1,:])) - np.min(np.floor(im_plot_coords[1,:]))
print("pixel bbox X:\n{}".format(pix_bbox_x))
print("pixel bbox Y:\n{}".format(pix_bbox_y))
#from_coords = raw_coords[0:2,:]
from_coords = np.float32(raw_coords[0:2,:])
#to_coords = im_plot_coords[0:2,:]
# a hah! we need to reset these so that uppermost Y = 0, leftmost X = 0...
#lets try...
trans_coords = np.zeros_like(im_plot_coords)
trans_coords[0,:] = im_plot_coords[0,:] - np.min(im_plot_coords[0,:])
trans_coords[1,:] = im_plot_coords[1,:] - np.min(im_plot_coords[1,:])
to_coords = np.float32(trans_coords[0:2,:])
#to_coords = np.float32(im_plot_coords[0:2,:])
img = io.imread(im_dir + image)
p_tform = cv2.getPerspectiveTransform(from_coords.T, to_coords.T)
img_rot = cv2.warpPerspective(img, p_tform, (np.int(pix_bbox_x), np.int(pix_bbox_y)), cv2.INTER_LANCZOS4)
#img_rot = cv2.warpPerspective(img, p_tform, (np.int(pix_bbox_x), np.int(pix_bbox_y)))
im_pix = img_rot.shape
#punch out a little world file for warped images
worldfile(proj_coords, (worldres_x, worldres_y), image, '../warped/')
#f, (ax0, ax1) = plt.subplots(1, 2)
#ax0.imshow(img, cmap=plt.cm.gray, interpolation='nearest')
#ax1.imshow(img_rot, cmap=plt.cm.gray, interpolation='nearest')
#plt.show()
cv2.imwrite('../warped/'+image, img_rot)
i = i+1
| [
"numpy.hstack",
"numpy.array",
"numpy.sin",
"numpy.genfromtxt",
"numpy.mean",
"numpy.cross",
"numpy.max",
"numpy.vstack",
"numpy.min",
"numpy.arctan",
"numpy.ceil",
"cv2.getPerspectiveTransform",
"numpy.floor",
"skimage.io.imread",
"numpy.cos",
"numpy.savetxt",
"numpy.int",
"cv2.im... | [((9338, 9387), 'numpy.hstack', 'np.hstack', (['[topleft, topright, botright, botleft]'], {}), '([topleft, topright, botright, botleft])\n', (9347, 9387), True, 'import numpy as np\n'), ((9747, 9777), 'numpy.genfromtxt', 'np.genfromtxt', (['trajectory_file'], {}), '(trajectory_file)\n', (9760, 9777), True, 'import numpy as np\n'), ((1370, 1386), 'numpy.zeros', 'np.zeros', (['[6, 1]'], {}), '([6, 1])\n', (1378, 1386), True, 'import numpy as np\n'), ((1594, 1615), 'numpy.min', 'np.min', (['corners[0, :]'], {}), '(corners[0, :])\n', (1600, 1615), True, 'import numpy as np\n'), ((1683, 1704), 'numpy.max', 'np.max', (['corners[1, :]'], {}), '(corners[1, :])\n', (1689, 1704), True, 'import numpy as np\n'), ((1805, 1865), 'numpy.savetxt', 'np.savetxt', (["(filepath + filename + '.jpgw')", 'world_arr', '"""%.3f"""'], {}), "(filepath + filename + '.jpgw', world_arr, '%.3f')\n", (1815, 1865), True, 'import numpy as np\n'), ((2122, 2170), 'numpy.matrix', 'np.matrix', (['[[s_x, 0, 0], [0, s_y, 0], [0, 0, 1]]'], {}), '([[s_x, 0, 0], [0, s_y, 0], [0, 0, 1]])\n', (2131, 2170), True, 'import numpy as np\n'), ((2336, 2384), 'numpy.matrix', 'np.matrix', (['[[1, 0, t_x], [0, 1, t_y], [0, 0, 1]]'], {}), '([[1, 0, t_x], [0, 1, t_y], [0, 0, 1]])\n', (2345, 2384), True, 'import numpy as np\n'), ((2838, 2893), 'numpy.matrix', 'np.matrix', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]])\n', (2847, 2893), True, 'import numpy as np\n'), ((2905, 2922), 'numpy.hstack', 'np.hstack', (['[I, t]'], {}), '([I, t])\n', (2914, 2922), True, 'import numpy as np\n'), ((3289, 3385), 'numpy.matrix', 'np.matrix', (['[[1, 0, -v_x / v_z, 0], [0, 1, -v_y / v_z, 0], [0, 0, 1, 0], [0, 0, 1 / v_z, 0]\n ]'], {}), '([[1, 0, -v_x / v_z, 0], [0, 1, -v_y / v_z, 0], [0, 0, 1, 0], [0, \n 0, 1 / v_z, 0]])\n', (3298, 3385), True, 'import numpy as np\n'), ((4210, 4239), 'numpy.vstack', 'np.vstack', (['[n3, -(x3.T * n3)]'], {}), '([n3, -(x3.T * n3)])\n', (4219, 4239), True, 'import numpy as np\n'), ((4355, 4421), 'numpy.matrix', 'np.matrix', (['[[0, -n[2], -n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]]'], {}), '([[0, -n[2], -n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]])\n', (4364, 4421), True, 'import numpy as np\n'), ((4588, 4612), 'numpy.hstack', 'np.hstack', (['[left, right]'], {}), '([left, right])\n', (4597, 4612), True, 'import numpy as np\n'), ((11420, 11476), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['proj_coords[:, 0]', 'proj_coords[:, 1]'], {}), '(proj_coords[:, 0], proj_coords[:, 1])\n', (11438, 11476), False, 'from scipy.spatial import distance\n'), ((11490, 11546), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['proj_coords[:, 1]', 'proj_coords[:, 2]'], {}), '(proj_coords[:, 1], proj_coords[:, 2])\n', (11508, 11546), False, 'from scipy.spatial import distance\n'), ((11559, 11615), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['proj_coords[:, 3]', 'proj_coords[:, 2]'], {}), '(proj_coords[:, 3], proj_coords[:, 2])\n', (11577, 11615), False, 'from scipy.spatial import distance\n'), ((11628, 11684), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['proj_coords[:, 0]', 'proj_coords[:, 3]'], {}), '(proj_coords[:, 0], proj_coords[:, 3])\n', (11646, 11684), False, 'from scipy.spatial import distance\n'), ((11767, 11804), 'numpy.mean', 'np.mean', (['[worldres_top, worldres_bot]'], {}), '([worldres_top, worldres_bot])\n', (11774, 11804), True, 'import numpy as np\n'), ((11961, 12001), 'numpy.mean', 'np.mean', (['[worldres_left, worldres_right]'], {}), '([worldres_left, worldres_right])\n', (11968, 12001), True, 'import numpy as np\n'), ((12405, 12439), 'numpy.ceil', 'np.ceil', (['(world_bbox_x / worldres_x)'], {}), '(world_bbox_x / worldres_x)\n', (12412, 12439), True, 'import numpy as np\n'), ((12457, 12491), 'numpy.ceil', 'np.ceil', (['(world_bbox_y / worldres_y)'], {}), '(world_bbox_y / worldres_y)\n', (12464, 12491), True, 'import numpy as np\n'), ((13616, 13646), 'numpy.float32', 'np.float32', (['raw_coords[0:2, :]'], {}), '(raw_coords[0:2, :])\n', (13626, 13646), True, 'import numpy as np\n'), ((13811, 13840), 'numpy.zeros_like', 'np.zeros_like', (['im_plot_coords'], {}), '(im_plot_coords)\n', (13824, 13840), True, 'import numpy as np\n'), ((14013, 14045), 'numpy.float32', 'np.float32', (['trans_coords[0:2, :]'], {}), '(trans_coords[0:2, :])\n', (14023, 14045), True, 'import numpy as np\n'), ((14113, 14138), 'skimage.io.imread', 'io.imread', (['(im_dir + image)'], {}), '(im_dir + image)\n', (14122, 14138), False, 'from skimage import io\n'), ((14158, 14213), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['from_coords.T', 'to_coords.T'], {}), '(from_coords.T, to_coords.T)\n', (14185, 14213), False, 'import cv2\n'), ((14786, 14828), 'cv2.imwrite', 'cv2.imwrite', (["('../warped/' + image)", 'img_rot'], {}), "('../warped/' + image, img_rot)\n", (14797, 14828), False, 'import cv2\n'), ((2002, 2022), 'numpy.matrix', 'np.matrix', (['[x, y, 1]'], {}), '([x, y, 1])\n', (2011, 2022), True, 'import numpy as np\n'), ((2524, 2547), 'numpy.matrix', 'np.matrix', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (2533, 2547), True, 'import numpy as np\n'), ((7798, 7869), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]]'], {'dtype': '"""float64"""'}), "([[1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, 1]], dtype='float64')\n", (7806, 7869), True, 'import numpy as np\n'), ((13887, 13915), 'numpy.min', 'np.min', (['im_plot_coords[0, :]'], {}), '(im_plot_coords[0, :])\n', (13893, 13915), True, 'import numpy as np\n'), ((13961, 13989), 'numpy.min', 'np.min', (['im_plot_coords[1, :]'], {}), '(im_plot_coords[1, :])\n', (13967, 13989), True, 'import numpy as np\n'), ((3590, 3610), 'numpy.cross', 'np.cross', (['a3.T', 'b3.T'], {}), '(a3.T, b3.T)\n', (3598, 3610), True, 'import numpy as np\n'), ((4498, 4519), 'numpy.diag', 'np.diag', (['[-d, -d, -d]'], {}), '([-d, -d, -d])\n', (4505, 4519), True, 'import numpy as np\n'), ((4554, 4574), 'numpy.matrix', 'np.matrix', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4563, 4574), True, 'import numpy as np\n'), ((7348, 7380), 'numpy.arctan', 'np.arctan', (['(self.w / (2 * self.f))'], {}), '(self.w / (2 * self.f))\n', (7357, 7380), True, 'import numpy as np\n'), ((12125, 12150), 'numpy.max', 'np.max', (['proj_coords[0, :]'], {}), '(proj_coords[0, :])\n', (12131, 12150), True, 'import numpy as np\n'), ((12213, 12238), 'numpy.max', 'np.max', (['proj_coords[1, :]'], {}), '(proj_coords[1, :])\n', (12219, 12238), True, 'import numpy as np\n'), ((13290, 13319), 'numpy.ceil', 'np.ceil', (['im_plot_coords[0, :]'], {}), '(im_plot_coords[0, :])\n', (13297, 13319), True, 'import numpy as np\n'), ((13329, 13359), 'numpy.floor', 'np.floor', (['im_plot_coords[0, :]'], {}), '(im_plot_coords[0, :])\n', (13337, 13359), True, 'import numpy as np\n'), ((13384, 13413), 'numpy.ceil', 'np.ceil', (['im_plot_coords[1, :]'], {}), '(im_plot_coords[1, :])\n', (13391, 13413), True, 'import numpy as np\n'), ((13423, 13453), 'numpy.floor', 'np.floor', (['im_plot_coords[1, :]'], {}), '(im_plot_coords[1, :])\n', (13431, 13453), True, 'import numpy as np\n'), ((14264, 14282), 'numpy.int', 'np.int', (['pix_bbox_x'], {}), '(pix_bbox_x)\n', (14270, 14282), True, 'import numpy as np\n'), ((14284, 14302), 'numpy.int', 'np.int', (['pix_bbox_y'], {}), '(pix_bbox_y)\n', (14290, 14302), True, 'import numpy as np\n'), ((12159, 12184), 'numpy.min', 'np.min', (['proj_coords[0, :]'], {}), '(proj_coords[0, :])\n', (12165, 12184), True, 'import numpy as np\n'), ((12247, 12272), 'numpy.min', 'np.min', (['proj_coords[1, :]'], {}), '(proj_coords[1, :])\n', (12253, 12272), True, 'import numpy as np\n'), ((5489, 5502), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (5495, 5502), True, 'import numpy as np\n'), ((5548, 5561), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (5554, 5561), True, 'import numpy as np\n'), ((5565, 5578), 'numpy.cos', 'np.cos', (['pitch'], {}), '(pitch)\n', (5571, 5578), True, 'import numpy as np\n'), ((5679, 5691), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (5685, 5691), True, 'import numpy as np\n'), ((5703, 5715), 'numpy.sin', 'np.sin', (['roll'], {}), '(roll)\n', (5709, 5715), True, 'import numpy as np\n'), ((5802, 5814), 'numpy.cos', 'np.cos', (['roll'], {}), '(roll)\n', (5808, 5814), True, 'import numpy as np\n'), ((5918, 5933), 'numpy.cos', 'np.cos', (['heading'], {}), '(heading)\n', (5924, 5933), True, 'import numpy as np\n'), ((5975, 5990), 'numpy.sin', 'np.sin', (['heading'], {}), '(heading)\n', (5981, 5990), True, 'import numpy as np\n'), ((5993, 6008), 'numpy.cos', 'np.cos', (['heading'], {}), '(heading)\n', (5999, 6008), True, 'import numpy as np\n'), ((5507, 5520), 'numpy.sin', 'np.sin', (['pitch'], {}), '(pitch)\n', (5513, 5520), True, 'import numpy as np\n'), ((5779, 5791), 'numpy.sin', 'np.sin', (['roll'], {}), '(roll)\n', (5785, 5791), True, 'import numpy as np\n'), ((5937, 5952), 'numpy.sin', 'np.sin', (['heading'], {}), '(heading)\n', (5943, 5952), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import streamlit as st
@st.cache
def infer_dtypes(df):
dtype_dict = dict()
for col_name in df.columns:
series = df[col_name]
dtype_dict[col_name] = 'categorical'
nunique = df[col_name].nunique()
if nunique > 10 and \
not pd.api.types.is_bool_dtype(series) and \
pd.api.types.is_numeric_dtype(series):
dtype_dict[col_name] = 'numerical'
return dtype_dict
@st.cache
def numerical_cols_summary_stats(df, numerical_col_names):
if len(numerical_col_names) == 0:
return pd.DataFrame()
summary_df = df.loc[:, numerical_col_names] \
.describe(percentiles=[0.5]) \
.transpose() \
.rename({'50%': 'median'}, axis='columns')
num_rows = df.shape[0]
# Calculate % missing
missing = [df[col_name].isna().sum() / num_rows * 100 for col_name in numerical_col_names]
summary_df['missing'] = missing
# Count zeros
zeros = [num_rows - np.count_nonzero(df[col_name]) for col_name in numerical_col_names]
summary_df['zeros'] = zeros
return summary_df
@st.cache
def categorical_cols_summary_stats(df, categorical_col_names):
if len(categorical_col_names) == 0:
return pd.DataFrame()
df = df.copy()
for col in categorical_col_names:
df[col] = df[col].astype('category')
return df.loc[:, categorical_col_names] \
.describe(include='all') \
.transpose()
@st.cache
def get_spec(numerical_summary_df,
categorical_summary_df,
numerical_cols_width_prop=0.6,
chart_col_width_scale=3):
num_numerical_cols = numerical_summary_df.shape[1]
num_categorical_cols = categorical_summary_df.shape[1]
numerical_cols_spec = np.ones(num_numerical_cols)
numerical_cols_spec = np.append(numerical_cols_spec, [chart_col_width_scale]) # Numerical features visualisations
spec = numerical_cols_width_prop / numerical_cols_spec.sum() * numerical_cols_spec # Standardise prop to numerical width
categorical_cols_spec = np.ones(num_categorical_cols)
categorical_cols_spec = np.append(categorical_cols_spec, [chart_col_width_scale]) # Categorical features visualisations
spec = np.append(spec, (1 - numerical_cols_width_prop) / categorical_cols_spec.sum() * categorical_cols_spec) # Standardise prop to numerical width
return spec
def extract_row_data(row_idx, numerical_summary_df, categorical_summary_df, data_df):
feature_names = list()
data = list()
if row_idx < numerical_summary_df.shape[0]:
feature_names += [numerical_summary_df.iloc[row_idx].name]
data += list(numerical_summary_df.iloc[row_idx].values)
data += [plot_histogram(data_df[feature_names[-1]])]
else:
# Pad (there are more categorical columns)
data += [''] * (numerical_summary_df.shape[1] + 1) # Add 1 for chart col
if row_idx < categorical_summary_df.shape[0]:
feature_names += [categorical_summary_df.iloc[row_idx].name]
data += list(categorical_summary_df.iloc[row_idx].values)
data += [plot_histogram(data_df[feature_names[-1]])]
return (feature_names, data)
@st.cache
def plot_histogram(series):
fig = go.Figure(data=go.Histogram(x=series))
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0), height=100)
return fig
def render(sidebar_handler):
# Sidebar
eg_dict = {
'Baseline Dataset': 'data/baseline.csv'
}
df_dict, select_key = sidebar_handler('Dataset(s) for Feature Explorer', ['csv'], eg_dict)
df = df_dict[select_key]
# Main
col1, col2 = st.columns([0.6, 0.4])
with col1:
st.subheader('Data Sample (100 rows)')
st.dataframe(df.sample(100))
with col2:
st.subheader('Inferred Data Types')
with st.expander('Show All Inferred Data Types'):
dtype_dict = infer_dtypes(df)
st.json(dtype_dict)
numerical_cols_width_prop = 0.5
numerical_col, categorical_col = st.columns([
numerical_cols_width_prop,
1 - numerical_cols_width_prop
])
with numerical_col:
numerical_col_names = [col for col, dtype in dtype_dict.items() if dtype == 'numerical']
# Load 5 Features
num_num = len(numerical_col_names)
subset_numerical_col = numerical_col_names
if num_num > 4:
subset_numerical_col = numerical_col_names[0:5]
# Define Containers
num_h_ctnr = st.container()
num_sl_ctnr = st.container()
# Select All
num_all = st.checkbox("Select all %s Numerical Features" % num_num)
if num_all:
select_numerical_col = num_sl_ctnr.multiselect('Select Numerical Features',
options=numerical_col_names,
default=numerical_col_names)
else:
select_numerical_col = num_sl_ctnr.multiselect('Select Numerical Features',
options=numerical_col_names,
default=subset_numerical_col)
# Add Subheader to Container
num_h_ctnr.subheader('Numerical features (' + str(len(select_numerical_col)) + ')')
with categorical_col:
categorical_col_names = [col for col, dtype in dtype_dict.items() if dtype == 'categorical']
# Load 5 Features
num_cat = len(categorical_col_names)
subset_categorical_cols = categorical_col_names[0:num_cat+1]
if num_cat > 4:
subset_categorical_cols = categorical_col_names[0:5]
# Define Containers
cat_h_ctnr = st.container()
cat_sl_ctnr = st.container()
# Select All
cat_all = st.checkbox("Select all %s Categorical Features" % num_cat)
if cat_all:
select_categorical_col = cat_sl_ctnr.multiselect('Select Categorical Features',
options=categorical_col_names,
default=categorical_col_names)
else:
select_categorical_col = cat_sl_ctnr.multiselect('Select Categorical Features',
options=categorical_col_names,
default=subset_categorical_cols)
# Add Subheader to Container
cat_h_ctnr.subheader('Categorical features (' + str(len(select_categorical_col)) + ')')
# Calculate summaries
numerical_summary_df = numerical_cols_summary_stats(df, select_numerical_col)
categorical_summary_df = categorical_cols_summary_stats(df, select_categorical_col)
# Get column specifications for rendering
spec = get_spec(numerical_summary_df,
categorical_summary_df,
numerical_cols_width_prop=numerical_cols_width_prop)
# Render column names
cols = st.columns(spec=spec)
col_names = list(numerical_summary_df.columns) + \
['chart'] + \
list(categorical_summary_df.columns) + \
['chart']
for col, col_name in zip(cols, col_names):
with col:
st.markdown('**' + col_name + '**')
# Render values
num_rows = max(numerical_summary_df.shape[0], categorical_summary_df.shape[0])
for idx in range(num_rows):
feature_names, row_data = extract_row_data(idx,
numerical_summary_df,
categorical_summary_df,
df)
# Render feature names as separate row
cols = st.columns(spec=[numerical_cols_width_prop, (1 - numerical_cols_width_prop)])
for col, feature_name in zip(cols, feature_names):
with col:
col.text(feature_name)
# Render feature summary and chart
cols = st.columns(spec=spec)
for col, (col_idx, col_name), data in zip(cols, enumerate(col_names), row_data):
with col:
if col_name.lower() == 'chart':
st.plotly_chart(data,
use_container_width=True,
config={'responsive': False, 'displayModeBar': False})
elif isinstance(data, str):
st.text(data)
elif col_name.lower() == 'count':
st.text(round(data))
elif col_name.lower() in ['missing', 'zeros']:
st.text(str(round(data, 2)) + '%')
else:
st.text(round(data, 2)) | [
"streamlit.checkbox",
"plotly.graph_objects.Histogram",
"streamlit.markdown",
"numpy.ones",
"streamlit.container",
"pandas.api.types.is_numeric_dtype",
"streamlit.expander",
"pandas.api.types.is_bool_dtype",
"numpy.append",
"numpy.count_nonzero",
"streamlit.json",
"streamlit.plotly_chart",
"... | [((1833, 1860), 'numpy.ones', 'np.ones', (['num_numerical_cols'], {}), '(num_numerical_cols)\n', (1840, 1860), True, 'import numpy as np\n'), ((1887, 1942), 'numpy.append', 'np.append', (['numerical_cols_spec', '[chart_col_width_scale]'], {}), '(numerical_cols_spec, [chart_col_width_scale])\n', (1896, 1942), True, 'import numpy as np\n'), ((2134, 2163), 'numpy.ones', 'np.ones', (['num_categorical_cols'], {}), '(num_categorical_cols)\n', (2141, 2163), True, 'import numpy as np\n'), ((2192, 2249), 'numpy.append', 'np.append', (['categorical_cols_spec', '[chart_col_width_scale]'], {}), '(categorical_cols_spec, [chart_col_width_scale])\n', (2201, 2249), True, 'import numpy as np\n'), ((3694, 3716), 'streamlit.columns', 'st.columns', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (3704, 3716), True, 'import streamlit as st\n'), ((4091, 4161), 'streamlit.columns', 'st.columns', (['[numerical_cols_width_prop, 1 - numerical_cols_width_prop]'], {}), '([numerical_cols_width_prop, 1 - numerical_cols_width_prop])\n', (4101, 4161), True, 'import streamlit as st\n'), ((6975, 6996), 'streamlit.columns', 'st.columns', ([], {'spec': 'spec'}), '(spec=spec)\n', (6985, 6996), True, 'import streamlit as st\n'), ((638, 652), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (650, 652), True, 'import pandas as pd\n'), ((1302, 1316), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1314, 1316), True, 'import pandas as pd\n'), ((3741, 3779), 'streamlit.subheader', 'st.subheader', (['"""Data Sample (100 rows)"""'], {}), "('Data Sample (100 rows)')\n", (3753, 3779), True, 'import streamlit as st\n'), ((3841, 3876), 'streamlit.subheader', 'st.subheader', (['"""Inferred Data Types"""'], {}), "('Inferred Data Types')\n", (3853, 3876), True, 'import streamlit as st\n'), ((4578, 4592), 'streamlit.container', 'st.container', ([], {}), '()\n', (4590, 4592), True, 'import streamlit as st\n'), ((4615, 4629), 'streamlit.container', 'st.container', ([], {}), '()\n', (4627, 4629), True, 'import streamlit as st\n'), ((4670, 4727), 'streamlit.checkbox', 'st.checkbox', (["('Select all %s Numerical Features' % num_num)"], {}), "('Select all %s Numerical Features' % num_num)\n", (4681, 4727), True, 'import streamlit as st\n'), ((5754, 5768), 'streamlit.container', 'st.container', ([], {}), '()\n', (5766, 5768), True, 'import streamlit as st\n'), ((5791, 5805), 'streamlit.container', 'st.container', ([], {}), '()\n', (5803, 5805), True, 'import streamlit as st\n'), ((5846, 5905), 'streamlit.checkbox', 'st.checkbox', (["('Select all %s Categorical Features' % num_cat)"], {}), "('Select all %s Categorical Features' % num_cat)\n", (5857, 5905), True, 'import streamlit as st\n'), ((7721, 7796), 'streamlit.columns', 'st.columns', ([], {'spec': '[numerical_cols_width_prop, 1 - numerical_cols_width_prop]'}), '(spec=[numerical_cols_width_prop, 1 - numerical_cols_width_prop])\n', (7731, 7796), True, 'import streamlit as st\n'), ((7980, 8001), 'streamlit.columns', 'st.columns', ([], {'spec': 'spec'}), '(spec=spec)\n', (7990, 8001), True, 'import streamlit as st\n'), ((402, 439), 'pandas.api.types.is_numeric_dtype', 'pd.api.types.is_numeric_dtype', (['series'], {}), '(series)\n', (431, 439), True, 'import pandas as pd\n'), ((1050, 1080), 'numpy.count_nonzero', 'np.count_nonzero', (['df[col_name]'], {}), '(df[col_name])\n', (1066, 1080), True, 'import numpy as np\n'), ((3319, 3341), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'series'}), '(x=series)\n', (3331, 3341), True, 'import plotly.graph_objects as go\n'), ((3890, 3933), 'streamlit.expander', 'st.expander', (['"""Show All Inferred Data Types"""'], {}), "('Show All Inferred Data Types')\n", (3901, 3933), True, 'import streamlit as st\n'), ((3989, 4008), 'streamlit.json', 'st.json', (['dtype_dict'], {}), '(dtype_dict)\n', (3996, 4008), True, 'import streamlit as st\n'), ((7219, 7254), 'streamlit.markdown', 'st.markdown', (["('**' + col_name + '**')"], {}), "('**' + col_name + '**')\n", (7230, 7254), True, 'import streamlit as st\n'), ((349, 383), 'pandas.api.types.is_bool_dtype', 'pd.api.types.is_bool_dtype', (['series'], {}), '(series)\n', (375, 383), True, 'import pandas as pd\n'), ((8181, 8287), 'streamlit.plotly_chart', 'st.plotly_chart', (['data'], {'use_container_width': '(True)', 'config': "{'responsive': False, 'displayModeBar': False}"}), "(data, use_container_width=True, config={'responsive': False,\n 'displayModeBar': False})\n", (8196, 8287), True, 'import streamlit as st\n'), ((8420, 8433), 'streamlit.text', 'st.text', (['data'], {}), '(data)\n', (8427, 8433), True, 'import streamlit as st\n')] |
"""Pareto tail indices plot."""
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgba_array
import matplotlib.cm as cm
import numpy as np
from xarray import DataArray
from .plot_utils import (
_scale_fig_size,
get_coords,
color_from_dim,
format_coords_as_labels,
get_plotting_function,
)
from ..stats import ELPDData
def plot_khat(
khats,
color=None,
xlabels=False,
show_bins=False,
bin_format="{1:.1f}%",
annotate=False,
hover_label=False,
hover_format="{1}",
figsize=None,
textsize=None,
coords=None,
legend=False,
markersize=None,
ax=None,
hlines_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
**kwargs
):
"""
Plot Pareto tail indices.
Parameters
----------
khats : ELPDData cointaining pareto shapes information or array
Pareto tail indices.
color : str or array_like, optional
Colors of the scatter plot, if color is a str all dots will have the same color,
if it is the size of the observations, each dot will have the specified color,
otherwise, it will be interpreted as a list of the dims to be used for the color code
xlabels : bool, optional
Use coords as xticklabels
show_bins : bool, optional
Show the number of khats which fall in each bin.
bin_format : str, optional
The string is used as formatting guide calling ``bin_format.format(count, pct)``.
annotate : bool, optional
Show the labels of k values larger than 1.
hover_label : bool, optional
Show the datapoint label when hovering over it with the mouse. Requires an interactive
backend.
hover_format : str, optional
String used to format the hover label via ``hover_format.format(idx, coord_label)``
figsize : tuple, optional
Figure size. If None it will be defined automatically.
textsize: float, optional
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
coords : mapping, optional
Coordinates of points to plot. **All** values are used for computation, but only a
a subset can be plotted for convenience.
legend : bool, optional
Include a legend to the plot. Only taken into account when color argument is a dim name.
markersize: int, optional
markersize for scatter plot. Defaults to `None` in which case it will
be chosen based on autoscaling for figsize.
ax: axes, optional
Matplotlib axes or bokeh figures.
hlines_kwargs: dictionary, optional
Additional keywords passed to ax.hlines.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show : bool, optional
Call backend show function.
kwargs :
Additional keywords passed to ax.scatter.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
Plot estimated pareto shape parameters showing how many fall in each category.
.. plot::
:context: close-figs
>>> import arviz as az
>>> radon = az.load_arviz_data("radon")
>>> loo_radon = az.loo(radon, pointwise=True)
>>> az.plot_khat(loo_radon, show_bins=True)
Show xlabels
.. plot::
:context: close-figs
>>> centered_eight = az.load_arviz_data("centered_eight")
>>> khats = az.loo(centered_eight, pointwise=True).pareto_k
>>> az.plot_khat(khats, xlabels=True, annotate=True)
Use coord values to create color mapping
.. plot::
:context: close-figs
>>> az.plot_khat(loo_radon, color="observed_county", cmap="tab20")
Use custom color scheme
.. plot::
:context: close-figs
>>> counties = radon.posterior.observed_county.values
>>> colors = [
... "blue" if county[-1] in ("A", "N") else "green" for county in counties
... ]
>>> az.plot_khat(loo_radon, color=colors)
"""
if hlines_kwargs is None:
hlines_kwargs = {}
hlines_kwargs.setdefault("linestyle", [":", "-.", "--", "-"])
hlines_kwargs.setdefault("alpha", 0.7)
hlines_kwargs.setdefault("zorder", -1)
hlines_kwargs.setdefault("color", "C1")
if coords is None:
coords = {}
if color is None:
color = "C0"
if isinstance(khats, np.ndarray):
khats = khats.flatten()
xlabels = False
legend = False
dims = []
else:
if isinstance(khats, ELPDData):
khats = khats.pareto_k
if not isinstance(khats, DataArray):
raise ValueError("Incorrect khat data input. Check the documentation")
khats = get_coords(khats, coords)
dims = khats.dims
n_data_points = khats.size
xdata = np.arange(n_data_points)
if isinstance(khats, DataArray):
coord_labels = format_coords_as_labels(khats)
else:
coord_labels = xdata.astype(str)
(figsize, ax_labelsize, _, xt_labelsize, linewidth, scaled_markersize) = _scale_fig_size(
figsize, textsize
)
if markersize is None:
markersize = scaled_markersize ** 2 # s in scatter plot mus be markersize square
# for dots to have the same size
kwargs.setdefault("s", markersize)
kwargs.setdefault("marker", "+")
color_mapping = None
cmap = None
if isinstance(color, str):
if color in dims:
colors, color_mapping = color_from_dim(khats, color)
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(colors)
else:
legend = False
rgba_c = to_rgba_array(np.full(n_data_points, color))
else:
legend = False
try:
rgba_c = to_rgba_array(color)
except ValueError:
cmap_name = kwargs.get("cmap", plt.rcParams["image.cmap"])
cmap = getattr(cm, cmap_name)
rgba_c = cmap(color)
khats = khats if isinstance(khats, np.ndarray) else khats.values.flatten()
alphas = 0.5 + 0.2 * (khats > 0.5) + 0.3 * (khats > 1)
rgba_c[:, 3] = alphas
plot_khat_kwargs = dict(
hover_label=hover_label,
hover_format=hover_format,
ax=ax,
figsize=figsize,
xdata=xdata,
khats=khats,
rgba_c=rgba_c,
kwargs=kwargs,
annotate=annotate,
coord_labels=coord_labels,
ax_labelsize=ax_labelsize,
xt_labelsize=xt_labelsize,
show_bins=show_bins,
linewidth=linewidth,
hlines_kwargs=hlines_kwargs,
xlabels=xlabels,
legend=legend,
color_mapping=color_mapping,
cmap=cmap,
color=color,
n_data_points=n_data_points,
bin_format=bin_format,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
plot_khat_kwargs.pop("hover_label")
plot_khat_kwargs.pop("hover_format")
plot_khat_kwargs.pop("kwargs")
plot_khat_kwargs.pop("ax_labelsize")
plot_khat_kwargs.pop("xt_labelsize")
plot_khat_kwargs.pop("hlines_kwargs")
plot_khat_kwargs.pop("xlabels")
plot_khat_kwargs.pop("legend")
plot_khat_kwargs.pop("color_mapping")
plot_khat_kwargs.pop("cmap")
plot_khat_kwargs.pop("color")
# TODO: Add backend kwargs
plot = get_plotting_function("plot_khat", "khatplot", backend)
axes = plot(**plot_khat_kwargs)
return axes
| [
"matplotlib.colors.to_rgba_array",
"numpy.full",
"numpy.arange"
] | [((5047, 5071), 'numpy.arange', 'np.arange', (['n_data_points'], {}), '(n_data_points)\n', (5056, 5071), True, 'import numpy as np\n'), ((6060, 6080), 'matplotlib.colors.to_rgba_array', 'to_rgba_array', (['color'], {}), '(color)\n', (6073, 6080), False, 'from matplotlib.colors import to_rgba_array\n'), ((5962, 5991), 'numpy.full', 'np.full', (['n_data_points', 'color'], {}), '(n_data_points, color)\n', (5969, 5991), True, 'import numpy as np\n')] |
''' Agents: stop/random/shortest/seq2seq '''
import json
import sys
import numpy as np
import random
from collections import namedtuple
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.distributions as D
from utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda
#from env import FOLLOWER_MODEL_ACTIONS, FOLLOWER_ENV_ACTIONS, IGNORE_ACTION_INDEX, LEFT_ACTION_INDEX, RIGHT_ACTION_INDEX, START_ACTION_INDEX, END_ACTION_INDEX, FORWARD_ACTION_INDEX, index_action_tuple
InferenceState = namedtuple("InferenceState", "prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha")
Cons = namedtuple("Cons", "first, rest")
def cons_to_list(cons):
l = []
while True:
l.append(cons.first)
cons = cons.rest
if cons is None:
break
return l
def backchain_inference_states(last_inference_state):
states = []
observations = []
actions = []
inf_state = last_inference_state
scores = []
last_score = None
attentions = []
while inf_state is not None:
states.append(inf_state.world_state)
observations.append(inf_state.observation)
actions.append(inf_state.last_action)
attentions.append(inf_state.last_alpha)
if last_score is not None:
scores.append(last_score - inf_state.score)
last_score = inf_state.score
inf_state = inf_state.prev_inference_state
scores.append(last_score)
return list(reversed(states)), list(reversed(observations)), list(reversed(actions))[1:], list(reversed(scores))[1:], list(reversed(attentions))[1:] # exclude start action
def least_common_viewpoint_path(inf_state_a, inf_state_b):
# return inference states traversing from A to X, then from Y to B,
# where X and Y are the least common ancestors of A and B respectively that share a viewpointId
path_to_b_by_viewpoint = {
}
b = inf_state_b
b_stack = Cons(b, None)
while b is not None:
path_to_b_by_viewpoint[b.world_state.viewpointId] = b_stack
b = b.prev_inference_state
b_stack = Cons(b, b_stack)
a = inf_state_a
path_from_a = [a]
while a is not None:
vp = a.world_state.viewpointId
if vp in path_to_b_by_viewpoint:
path_to_b = cons_to_list(path_to_b_by_viewpoint[vp])
assert path_from_a[-1].world_state.viewpointId == path_to_b[0].world_state.viewpointId
return path_from_a + path_to_b[1:]
a = a.prev_inference_state
path_from_a.append(a)
raise AssertionError("no common ancestor found")
def batch_instructions_from_encoded(encoded_instructions, max_length, reverse=False, sort=False):
# encoded_instructions: list of lists of token indices (should not be padded, or contain BOS or EOS tokens)
#seq_tensor = np.array(encoded_instructions)
# make sure pad does not start any sentence
num_instructions = len(encoded_instructions)
seq_tensor = np.full((num_instructions, max_length), vocab_pad_idx)
seq_lengths = []
for i, inst in enumerate(encoded_instructions):
if len(inst) > 0:
assert inst[-1] != vocab_eos_idx
if reverse:
inst = inst[::-1]
inst = np.concatenate((inst, [vocab_eos_idx]))
inst = inst[:max_length]
seq_tensor[i,:len(inst)] = inst
seq_lengths.append(len(inst))
seq_tensor = torch.from_numpy(seq_tensor)
if sort:
seq_lengths, perm_idx = torch.from_numpy(np.array(seq_lengths)).sort(0, True)
seq_lengths = list(seq_lengths)
seq_tensor = seq_tensor[perm_idx]
mask = (seq_tensor == vocab_pad_idx)[:, :max(seq_lengths)]
ret_tp = try_cuda(Variable(seq_tensor, requires_grad=False).long()), \
try_cuda(mask.byte()), \
seq_lengths
if sort:
ret_tp = ret_tp + (list(perm_idx),)
return ret_tp
class BaseAgent(object):
''' Base class for an R2R agent to generate and save trajectories. '''
def __init__(self, env, results_path):
self.env = env
self.results_path = results_path
random.seed(1)
self.results = {}
self.losses = [] # For learning agents
def write_results(self):
results = {}
for key, item in self.results.items():
results[key] = {
'instr_id': item['instr_id'],
'trajectory': item['trajectory'],
}
with open(self.results_path, 'w') as f:
json.dump(results, f)
def rollout(self):
''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''
raise NotImplementedError
@staticmethod
def get_agent(name):
return globals()[name+"Agent"]
def test(self):
self.env.reset_epoch()
self.losses = []
self.results = {}
# We rely on env showing the entire batch before repeating anything
#print 'Testing %s' % self.__class__.__name__
looped = False
rollout_scores = []
beam_10_scores = []
while True:
rollout_results = self.rollout()
# if self.feedback == 'argmax':
# beam_results = self.beam_search(1, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# assert rollout_traj['instr_id'] == beam_trajs[0]['instr_id']
# assert rollout_traj['trajectory'] == beam_trajs[0]['trajectory']
# assert np.allclose(rollout_traj['score'], beam_trajs[0]['score'])
# print("passed check: beam_search with beam_size=1")
#
# self.env.set_beam_size(10)
# beam_results = self.beam_search(10, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# rollout_score = rollout_traj['score']
# rollout_scores.append(rollout_score)
# beam_score = beam_trajs[0]['score']
# beam_10_scores.append(beam_score)
# # assert rollout_score <= beam_score
# self.env.set_beam_size(1)
# # print("passed check: beam_search with beam_size=10")
# if self.feedback == 'teacher' and self.beam_size == 1:
# rollout_loss = self.loss
# path_obs, path_actions, encoded_instructions = self.env.gold_obs_actions_and_instructions(self.episode_len, load_next_minibatch=False)
# for i in range(len(rollout_results)):
# assert rollout_results[i]['actions'] == path_actions[i]
# assert [o1['viewpoint'] == o2['viewpoint']
# for o1, o2 in zip(rollout_results[i]['observations'], path_obs[i])]
# trajs, loss = self._score_obs_actions_and_instructions(path_obs, path_actions, encoded_instructions)
# for traj, rollout in zip(trajs, rollout_results):
# assert traj['instr_id'] == rollout['instr_id']
# assert traj['actions'] == rollout['actions']
# assert np.allclose(traj['score'], rollout['score'])
# assert np.allclose(rollout_loss.data[0], loss.data[0])
# print('passed score test')
for result in rollout_results:
if result['instr_id'] in self.results:
looped = True
else:
self.results[result['instr_id']] = result
if looped:
break
# if self.feedback == 'argmax':
# print("avg rollout score: ", np.mean(rollout_scores))
# print("avg beam 10 score: ", np.mean(beam_10_scores))
return self.results
def path_element_from_observation(ob):
return (ob['viewpoint'], ob['heading'], ob['elevation'])
class StopAgent(BaseAgent):
''' An agent that doesn't move! '''
def rollout(self):
world_states = self.env.reset()
obs = self.env.observe(world_states)
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob) ]
} for ob in obs]
return traj
class RandomAgent(BaseAgent):
''' An agent that picks a random direction then tries to go straight for
five viewpoint steps and then stops. '''
def rollout(self):
world_states = self.env.reset()
obs = self.env.observe(world_states)
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob)]
} for ob in obs]
ended = [False] * len(obs)
self.steps = [0] * len(obs)
for t in range(6):
actions = []
for i, ob in enumerate(obs):
if self.steps[i] >= 5:
actions.append(0) # do nothing, i.e. end
ended[i] = True
elif self.steps[i] == 0:
a = np.random.randint(len(ob['adj_loc_list']) - 1) + 1
actions.append(a) # choose a random adjacent loc
self.steps[i] += 1
else:
assert len(ob['adj_loc_list']) > 1
actions.append(1) # go forward
self.steps[i] += 1
world_states = self.env.step(world_states, actions, obs)
obs = self.env.observe(world_states)
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['trajectory'].append(path_element_from_observation(ob))
return traj
class ShortestAgent(BaseAgent):
''' An agent that always takes the shortest path to goal. '''
def rollout(self):
world_states = self.env.reset()
#obs = self.env.observe(world_states)
all_obs, all_actions = self.env.shortest_paths_to_goals(world_states, 20)
return [
{
'instr_id': obs[0]['instr_id'],
# end state will appear twice because stop action is a no-op, so exclude it
'trajectory': [path_element_from_observation(ob) for ob in obs[:-1]]
}
for obs in all_obs
]
class Seq2SeqAgent(BaseAgent):
''' An agent based on an LSTM seq2seq model with attention. '''
# For now, the agent can't pick which forward move to make - just the one in the middle
# env_actions = FOLLOWER_ENV_ACTIONS
# start_index = START_ACTION_INDEX
# ignore_index = IGNORE_ACTION_INDEX
# forward_index = FORWARD_ACTION_INDEX
# end_index = END_ACTION_INDEX
feedback_options = ['teacher', 'argmax', 'sample']
def __init__(self, env, results_path, encoder, decoder, episode_len=10, beam_size=1, reverse_instruction=True, max_instruction_length=80):
super(Seq2SeqAgent, self).__init__(env, results_path)
self.encoder = encoder
self.decoder = decoder
self.episode_len = episode_len
self.losses = []
self.criterion = nn.CrossEntropyLoss(ignore_index=-1)
self.beam_size = beam_size
self.reverse_instruction = reverse_instruction
self.max_instruction_length = max_instruction_length
# @staticmethod
# def n_inputs():
# return len(FOLLOWER_MODEL_ACTIONS)
#
# @staticmethod
# def n_outputs():
# return len(FOLLOWER_MODEL_ACTIONS)-2 # Model doesn't output start or ignore
def _feature_variables(self, obs, beamed=False):
''' Extract precomputed features into variable. '''
feature_lists = list(zip(*[ob['feature'] for ob in (flatten(obs) if beamed else obs)]))
assert len(feature_lists) == len(self.env.image_features_list)
batched = []
for featurizer, feature_list in zip(self.env.image_features_list, feature_lists):
batched.append(featurizer.batch_features(feature_list))
return batched
def _action_variable(self, obs):
# get the maximum number of actions of all sample in this batch
max_num_a = -1
for i, ob in enumerate(obs):
max_num_a = max(max_num_a, len(ob['adj_loc_list']))
is_valid = np.zeros((len(obs), max_num_a), np.float32)
action_embedding_dim = obs[0]['action_embedding'].shape[-1]
action_embeddings = np.zeros(
(len(obs), max_num_a, action_embedding_dim),
dtype=np.float32)
for i, ob in enumerate(obs):
adj_loc_list = ob['adj_loc_list']
num_a = len(adj_loc_list)
is_valid[i, 0:num_a] = 1.
for n_a, adj_dict in enumerate(adj_loc_list):
action_embeddings[i, :num_a, :] = ob['action_embedding']
return (
Variable(torch.from_numpy(action_embeddings), requires_grad=False).cuda(),
Variable(torch.from_numpy(is_valid), requires_grad=False).cuda(),
is_valid)
def _teacher_action(self, obs, ended):
''' Extract teacher actions into variable. '''
a = torch.LongTensor(len(obs))
for i,ob in enumerate(obs):
# Supervised teacher only moves one axis at a time
a[i] = ob['teacher'] if not ended[i] else -1
return try_cuda(Variable(a, requires_grad=False))
def _proc_batch(self, obs, beamed=False):
encoded_instructions = [ob['instr_encoding'] for ob in (flatten(obs) if beamed else obs)]
return batch_instructions_from_encoded(encoded_instructions, self.max_instruction_length, reverse=self.reverse_instruction)
def rollout(self):
if self.beam_size == 1:
return self._rollout_with_loss()
else:
assert self.beam_size >= 1
beams, _, _ = self.beam_search(self.beam_size)
return [beam[0] for beam in beams]
def _score_obs_actions_and_instructions(self, path_obs, path_actions, encoded_instructions):
batch_size = len(path_obs)
assert len(path_actions) == batch_size
assert len(encoded_instructions) == batch_size
for path_o, path_a in zip(path_obs, path_actions):
assert len(path_o) == len(path_a) + 1
seq, seq_mask, seq_lengths, perm_indices = \
batch_instructions_from_encoded(
encoded_instructions, self.max_instruction_length,
reverse=self.reverse_instruction, sort=True)
loss = 0
ctx, h_t, c_t = self.encoder(seq, seq_lengths)
u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action
ended = np.array([False] * batch_size)
sequence_scores = try_cuda(torch.zeros(batch_size))
traj = [{
'instr_id': path_o[0]['instr_id'],
'trajectory': [path_element_from_observation(path_o[0])],
'actions': [],
'scores': [],
'observations': [path_o[0]],
'instr_encoding': path_o[0]['instr_encoding']
} for path_o in path_obs]
obs = None
for t in range(self.episode_len):
next_obs = []
next_target_list = []
for perm_index, src_index in enumerate(perm_indices):
path_o = path_obs[src_index]
path_a = path_actions[src_index]
if t < len(path_a):
next_target_list.append(path_a[t])
next_obs.append(path_o[t])
else:
next_target_list.append(-1)
next_obs.append(obs[perm_index])
obs = next_obs
target = try_cuda(Variable(torch.LongTensor(next_target_list), requires_grad=False))
f_t_list = self._feature_variables(obs) # Image features from obs
all_u_t, is_valid, _ = self._action_variable(obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask)
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# Supervised training
loss += self.criterion(logit, target)
# Determine next model inputs
a_t = torch.clamp(target, min=0) # teacher forcing
# update the previous action
u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach()
action_scores = -F.cross_entropy(logit, target, ignore_index=-1, reduce=False).data
sequence_scores += action_scores
# Save trajectory output
for perm_index, src_index in enumerate(perm_indices):
ob = obs[perm_index]
if not ended[perm_index]:
traj[src_index]['trajectory'].append(path_element_from_observation(ob))
traj[src_index]['score'] = float(sequence_scores[perm_index])
traj[src_index]['scores'].append(action_scores[perm_index])
traj[src_index]['actions'].append(a_t.data[perm_index])
# traj[src_index]['observations'].append(ob)
# Update ended list
for i in range(batch_size):
action_idx = a_t[i].data[0]
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all():
break
return traj, loss
def _rollout_with_loss(self):
initial_world_states = self.env.reset(sort=True)
initial_obs = self.env.observe(initial_world_states)
initial_obs = np.array(initial_obs)
batch_size = len(initial_obs)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(initial_obs)
# Forward through encoder, giving initial hidden state and memory cell for decoder
# TODO consider not feeding this into the decoder, and just using attention
self.loss = 0
feedback = self.feedback
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
# Record starting point
traj = [{
'instr_id': ob['instr_id'],
'trajectory': [path_element_from_observation(ob)],
'actions': [],
'scores': [],
'observations': [ob],
'instr_encoding': ob['instr_encoding']
} for ob in initial_obs]
obs = initial_obs
world_states = initial_world_states
# Initial action
u_t_prev = self.decoder.u_begin.expand(batch_size, -1) # init action
ended = np.array([False] * batch_size) # Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
env_action = [None] * batch_size
sequence_scores = try_cuda(torch.zeros(batch_size))
for t in range(self.episode_len):
f_t_list = self._feature_variables(obs) # Image features from obs
all_u_t, is_valid, _ = self._action_variable(obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx, seq_mask)
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# Supervised training
target = self._teacher_action(obs, ended)
self.loss += self.criterion(logit, target)
# Determine next model inputs
if feedback == 'teacher':
# turn -1 (ignore) to 0 (stop) so that the action is executable
a_t = torch.clamp(target, min=0)
elif feedback == 'argmax':
_,a_t = logit.max(1) # student forcing - argmax
a_t = a_t.detach()
elif feedback == 'sample':
probs = F.softmax(logit, dim=1) # sampling an action from model
# Further mask probs where agent can't move forward
# Note input to `D.Categorical` does not have to sum up to 1
# http://pytorch.org/docs/stable/torch.html#torch.multinomial
probs[is_valid == 0] = 0.
m = D.Categorical(probs)
a_t = m.sample()
else:
sys.exit('Invalid feedback option')
# update the previous action
u_t_prev = all_u_t[np.arange(batch_size), a_t, :].detach()
action_scores = -F.cross_entropy(logit, a_t, ignore_index=-1, reduce=False).data
sequence_scores += action_scores
# dfried: I changed this so that the ended list is updated afterward; this causes <end> to be added as the last action, along with its score, and the final world state will be duplicated (to more closely match beam search)
# Make environment action
for i in range(batch_size):
action_idx = a_t[i].data[0]
env_action[i] = action_idx
world_states = self.env.step(world_states, env_action, obs)
obs = self.env.observe(world_states)
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, world_states[0], a_t.data[0], sequence_scores[0]))
# Save trajectory output
for i,ob in enumerate(obs):
if not ended[i]:
traj[i]['trajectory'].append(path_element_from_observation(ob))
traj[i]['score'] = sequence_scores[i]
traj[i]['scores'].append(action_scores[i])
traj[i]['actions'].append(a_t.data[i])
traj[i]['observations'].append(ob)
# Update ended list
for i in range(batch_size):
action_idx = a_t[i].data[0]
if action_idx == 0:
ended[i] = True
# Early exit if all ended
if ended.all():
break
#self.losses.append(self.loss.data[0] / self.episode_len)
# shouldn't divide by the episode length because of masking
self.losses.append(self.loss.data[0])
return traj
def beam_search(self, beam_size, load_next_minibatch=True, mask_undo=False):
assert self.env.beam_size >= beam_size
world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch)
obs = self.env.observe(world_states, beamed=True)
batch_size = len(world_states)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(obs, beamed=True)
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
completed = []
for _ in range(batch_size):
completed.append([])
beams = [
[InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=i,
last_action=-1,
last_action_embedding=self.decoder.u_begin.view(-1),
action_count=0,
score=0.0, h_t=None, c_t=None, last_alpha=None)]
for i, (ws, o) in enumerate(zip(world_states, obs))
]
# Do a sequence rollout and calculate the loss
for t in range(self.episode_len):
flat_indices = []
beam_indices = []
u_t_list = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
flat_indices.append(inf_state.flat_index)
u_t_list.append(inf_state.last_action_embedding)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
flat_obs = flatten(obs)
f_t_list = self._feature_variables(flat_obs) # Image features from obs
all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t[flat_indices], c_t[flat_indices], ctx[beam_indices], seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
if mask_undo:
masked_logit = logit.clone()
else:
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#action_scores, action_indices = log_probs.topk(min(beam_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(min(beam_size, logit.size()[1]), dim=1)
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
new_beams = []
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states, beam_obs) in enumerate(zip(beams, world_states, obs)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
assert len(beam_obs) == len(beam)
if beam:
for inf_index, (inf_state, world_state, ob, action_score_row, action_index_row) in \
enumerate(zip(beam, beam_world_states, beam_obs, action_scores[start_index:end_index], action_indices[start_index:end_index])):
flat_index = start_index + inf_index
for action_score, action_index in zip(action_score_row, action_index_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state, # will be updated later after successors are pruned
observation=ob, # will be updated later after successors are pruned
flat_index=flat_index,
last_action=action_index,
last_action_embedding=all_u_t[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=float(inf_state.score + action_score), h_t=None, c_t=None,
last_alpha=alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)[:beam_size]
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True)
successor_obs = self.env.observe(successor_world_states, beamed=True)
all_successors = structured_map(lambda inf_state, world_state, obs: inf_state._replace(world_state=world_state, observation=obs),
all_successors, successor_world_states, successor_obs, nested=True)
# if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
for beam_index, successors in enumerate(all_successors):
new_beam = []
for successor in successors:
if successor.last_action == 0 or t == self.episode_len - 1:
completed[beam_index].append(successor)
else:
new_beam.append(successor)
if len(completed[beam_index]) >= beam_size:
new_beam = []
new_beams.append(new_beam)
beams = new_beams
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
obs = [
[inf_state.observation for inf_state in beam]
for beam in beams
]
# Early exit if all ended
if not any(beam for beam in beams):
break
trajs = []
for this_completed in completed:
assert this_completed
this_trajs = []
for inf_state in sorted(this_completed, key=lambda t: t.score, reverse=True)[:beam_size]:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'trajectory': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
traversed_lists = None # todo
return trajs, completed, traversed_lists
def state_factored_search(self, completion_size, successor_size, load_next_minibatch=True, mask_undo=False, first_n_ws_key=4):
assert self.env.beam_size >= successor_size
world_states = self.env.reset(sort=True, beamed=True, load_next_minibatch=load_next_minibatch)
initial_obs = self.env.observe(world_states, beamed=True)
batch_size = len(world_states)
# get mask and lengths
seq, seq_mask, seq_lengths = self._proc_batch(initial_obs, beamed=True)
# Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t = self.encoder(seq, seq_lengths)
completed = []
completed_holding = []
for _ in range(batch_size):
completed.append({})
completed_holding.append({})
state_cache = [
{ws[0][0:first_n_ws_key]: (InferenceState(prev_inference_state=None,
world_state=ws[0],
observation=o[0],
flat_index=None,
last_action=-1,
last_action_embedding=self.decoder.u_begin.view(-1),
action_count=0,
score=0.0, h_t=h_t[i], c_t=c_t[i], last_alpha=None), True)}
for i, (ws, o) in enumerate(zip(world_states, initial_obs))
]
beams = [[inf_state for world_state, (inf_state, expanded) in sorted(instance_cache.items())]
for instance_cache in state_cache] # sorting is a noop here since each instance_cache should only contain one
# traversed_lists = None
# list of inference states containing states in order of the states being expanded
last_expanded_list = []
traversed_lists = []
for beam in beams:
assert len(beam) == 1
first_state = beam[0]
last_expanded_list.append(first_state)
traversed_lists.append([first_state])
def update_traversed_lists(new_visited_inf_states):
assert len(new_visited_inf_states) == len(last_expanded_list)
assert len(new_visited_inf_states) == len(traversed_lists)
for instance_index, instance_states in enumerate(new_visited_inf_states):
last_expanded = last_expanded_list[instance_index]
# todo: if this passes, shouldn't need traversed_lists
assert last_expanded.world_state.viewpointId == traversed_lists[instance_index][-1].world_state.viewpointId
for inf_state in instance_states:
path_from_last_to_next = least_common_viewpoint_path(last_expanded, inf_state)
# path_from_last should include last_expanded's world state as the first element, so check and drop that
assert path_from_last_to_next[0].world_state.viewpointId == last_expanded.world_state.viewpointId
assert path_from_last_to_next[-1].world_state.viewpointId == inf_state.world_state.viewpointId
traversed_lists[instance_index].extend(path_from_last_to_next[1:])
last_expanded = inf_state
last_expanded_list[instance_index] = last_expanded
# Do a sequence rollout and calculate the loss
while any(len(comp) < completion_size for comp in completed):
beam_indices = []
u_t_list = []
h_t_list = []
c_t_list = []
flat_obs = []
for beam_index, beam in enumerate(beams):
for inf_state in beam:
beam_indices.append(beam_index)
u_t_list.append(inf_state.last_action_embedding)
h_t_list.append(inf_state.h_t.unsqueeze(0))
c_t_list.append(inf_state.c_t.unsqueeze(0))
flat_obs.append(inf_state.observation)
u_t_prev = torch.stack(u_t_list, dim=0)
assert len(u_t_prev.shape) == 2
f_t_list = self._feature_variables(flat_obs) # Image features from obs
all_u_t, is_valid, is_valid_numpy = self._action_variable(flat_obs)
h_t = torch.cat(h_t_list, dim=0)
c_t = torch.cat(c_t_list, dim=0)
assert len(f_t_list) == 1, 'for now, only work with MeanPooled feature'
h_t, c_t, alpha, logit, alpha_v = self.decoder(
u_t_prev, all_u_t, f_t_list[0], h_t, c_t, ctx[beam_indices], seq_mask[beam_indices])
# Mask outputs of invalid actions
logit[is_valid == 0] = -float('inf')
# # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
if mask_undo:
masked_logit = logit.clone()
else:
masked_logit = logit
log_probs = F.log_softmax(logit, dim=1).data
# force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1)
_, action_indices = masked_logit.data.topk(logit.size()[1], dim=1) # todo: fix this
action_scores = log_probs.gather(1, action_indices)
assert action_scores.size() == action_indices.size()
start_index = 0
assert len(beams) == len(world_states)
all_successors = []
for beam_index, (beam, beam_world_states) in enumerate(zip(beams, world_states)):
successors = []
end_index = start_index + len(beam)
assert len(beam_world_states) == len(beam)
if beam:
for inf_index, (inf_state, world_state, action_score_row) in \
enumerate(zip(beam, beam_world_states, log_probs[start_index:end_index])):
flat_index = start_index + inf_index
for action_index, action_score in enumerate(action_score_row):
if is_valid_numpy[flat_index, action_index] == 0:
continue
successors.append(
InferenceState(prev_inference_state=inf_state,
world_state=world_state, # will be updated later after successors are pruned
observation=flat_obs[flat_index], # will be updated later after successors are pruned
flat_index=None,
last_action=action_index,
last_action_embedding=all_u_t[flat_index, action_index].detach(),
action_count=inf_state.action_count + 1,
score=inf_state.score + action_score,
h_t=h_t[flat_index], c_t=c_t[flat_index],
last_alpha=alpha[flat_index].data)
)
start_index = end_index
successors = sorted(successors, key=lambda t: t.score, reverse=True)
all_successors.append(successors)
successor_world_states = [
[inf_state.world_state for inf_state in successors]
for successors in all_successors
]
successor_env_actions = [
[inf_state.last_action for inf_state in successors]
for successors in all_successors
]
successor_last_obs = [
[inf_state.observation for inf_state in successors]
for successors in all_successors
]
successor_world_states = self.env.step(successor_world_states, successor_env_actions, successor_last_obs, beamed=True)
all_successors = structured_map(lambda inf_state, world_state: inf_state._replace(world_state=world_state),
all_successors, successor_world_states, nested=True)
# if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
assert len(all_successors) == len(state_cache)
new_beams = []
for beam_index, (successors, instance_cache) in enumerate(zip(all_successors, state_cache)):
# early stop if we've already built a sizable completion list
instance_completed = completed[beam_index]
instance_completed_holding = completed_holding[beam_index]
if len(instance_completed) >= completion_size:
new_beams.append([])
continue
for successor in successors:
ws_keys = successor.world_state[0:first_n_ws_key]
if successor.last_action == 0 or successor.action_count == self.episode_len:
if ws_keys not in instance_completed_holding or instance_completed_holding[ws_keys][0].score < successor.score:
instance_completed_holding[ws_keys] = (successor, False)
else:
if ws_keys not in instance_cache or instance_cache[ws_keys][0].score < successor.score:
instance_cache[ws_keys] = (successor, False)
# third value: did this come from completed_holding?
uncompleted_to_consider = ((ws_keys, inf_state, False) for (ws_keys, (inf_state, expanded)) in instance_cache.items() if not expanded)
completed_to_consider = ((ws_keys, inf_state, True) for (ws_keys, (inf_state, expanded)) in instance_completed_holding.items() if not expanded)
import itertools
import heapq
to_consider = itertools.chain(uncompleted_to_consider, completed_to_consider)
ws_keys_and_inf_states = heapq.nlargest(successor_size, to_consider, key=lambda pair: pair[1].score)
new_beam = []
for ws_keys, inf_state, is_completed in ws_keys_and_inf_states:
if is_completed:
assert instance_completed_holding[ws_keys] == (inf_state, False)
instance_completed_holding[ws_keys] = (inf_state, True)
if ws_keys not in instance_completed or instance_completed[ws_keys].score < inf_state.score:
instance_completed[ws_keys] = inf_state
else:
instance_cache[ws_keys] = (inf_state, True)
new_beam.append(inf_state)
if len(instance_completed) >= completion_size:
new_beams.append([])
else:
new_beams.append(new_beam)
beams = new_beams
# Early exit if all ended
if not any(beam for beam in beams):
break
world_states = [
[inf_state.world_state for inf_state in beam]
for beam in beams
]
successor_obs = self.env.observe(world_states, beamed=True)
beams = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs),
beams, successor_obs, nested=True)
update_traversed_lists(beams)
completed_list = []
for this_completed in completed:
completed_list.append(sorted(this_completed.values(), key=lambda t: t.score, reverse=True)[:completion_size])
completed_ws = [
[inf_state.world_state for inf_state in comp_l]
for comp_l in completed_list
]
completed_obs = self.env.observe(completed_ws, beamed=True)
completed_list = structured_map(lambda inf_state, obs: inf_state._replace(observation=obs),
completed_list, completed_obs, nested=True)
# TODO: consider moving observations and this update earlier so that we don't have to traverse as far back
update_traversed_lists(completed_list)
# TODO: sanity check the traversed lists here
trajs = []
for this_completed in completed_list:
assert this_completed
this_trajs = []
for inf_state in this_completed:
path_states, path_observations, path_actions, path_scores, path_attentions = backchain_inference_states(inf_state)
# this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory = [path_element_from_observation(ob) for ob in path_observations]
this_trajs.append({
'instr_id': path_observations[0]['instr_id'],
'instr_encoding': path_observations[0]['instr_encoding'],
'trajectory': trajectory,
'observations': path_observations,
'actions': path_actions,
'score': inf_state.score,
'scores': path_scores,
'attentions': path_attentions
})
trajs.append(this_trajs)
# completed_list: list of lists of final inference states corresponding to the candidates, one list per instance
# traversed_lists: list of "physical states" that the robot has explored, one per instance
return trajs, completed_list, traversed_lists
def set_beam_size(self, beam_size):
if self.env.beam_size < beam_size:
self.env.set_beam_size(beam_size)
self.beam_size = beam_size
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, beam_size=1):
''' Evaluate once on each instruction in the current environment '''
if not allow_cheat: # permitted for purpose of calculating validation loss only
assert feedback in ['argmax', 'sample'] # no cheating by using teacher at test time!
self.feedback = feedback
if use_dropout:
self.encoder.train()
self.decoder.train()
else:
self.encoder.eval()
self.decoder.eval()
self.set_beam_size(beam_size)
return super(Seq2SeqAgent, self).test()
def train(self, encoder_optimizer, decoder_optimizer, n_iters, feedback='teacher'):
''' Train for a given number of iterations '''
assert all(f in self.feedback_options for f in feedback.split("+"))
self.feedback = feedback
self.encoder.train()
self.decoder.train()
self.losses = []
it = range(1, n_iters + 1)
try:
import tqdm
it = tqdm.tqdm(it)
except:
pass
for _ in it:
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
self._rollout_with_loss()
self.loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
def _encoder_and_decoder_paths(self, base_path):
return base_path + "_enc", base_path + "_dec"
def save(self, path):
''' Snapshot models '''
encoder_path, decoder_path = self._encoder_and_decoder_paths(path)
torch.save(self.encoder.state_dict(), encoder_path)
torch.save(self.decoder.state_dict(), decoder_path)
def load(self, path, **kwargs):
''' Loads parameters (but not training state) '''
encoder_path, decoder_path = self._encoder_and_decoder_paths(path)
self.encoder.load_state_dict(torch.load(encoder_path, **kwargs))
self.decoder.load_state_dict(torch.load(decoder_path, **kwargs))
| [
"itertools.chain",
"torch.nn.CrossEntropyLoss",
"torch.distributions.Categorical",
"torch.LongTensor",
"utils.flatten",
"torch.from_numpy",
"numpy.array",
"sys.exit",
"torch.nn.functional.softmax",
"numpy.arange",
"numpy.concatenate",
"torch.autograd.Variable",
"collections.namedtuple",
"h... | [((578, 756), 'collections.namedtuple', 'namedtuple', (['"""InferenceState"""', '"""prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha"""'], {}), "('InferenceState',\n 'prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha'\n )\n", (588, 756), False, 'from collections import namedtuple\n'), ((756, 789), 'collections.namedtuple', 'namedtuple', (['"""Cons"""', '"""first, rest"""'], {}), "('Cons', 'first, rest')\n", (766, 789), False, 'from collections import namedtuple\n'), ((3096, 3150), 'numpy.full', 'np.full', (['(num_instructions, max_length)', 'vocab_pad_idx'], {}), '((num_instructions, max_length), vocab_pad_idx)\n', (3103, 3150), True, 'import numpy as np\n'), ((3529, 3557), 'torch.from_numpy', 'torch.from_numpy', (['seq_tensor'], {}), '(seq_tensor)\n', (3545, 3557), False, 'import torch\n'), ((3360, 3399), 'numpy.concatenate', 'np.concatenate', (['(inst, [vocab_eos_idx])'], {}), '((inst, [vocab_eos_idx]))\n', (3374, 3399), True, 'import numpy as np\n'), ((4234, 4248), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4245, 4248), False, 'import random\n'), ((11382, 11418), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(-1)'}), '(ignore_index=-1)\n', (11401, 11418), True, 'import torch.nn as nn\n'), ((14888, 14918), 'numpy.array', 'np.array', (['([False] * batch_size)'], {}), '([False] * batch_size)\n', (14896, 14918), True, 'import numpy as np\n'), ((17929, 17950), 'numpy.array', 'np.array', (['initial_obs'], {}), '(initial_obs)\n', (17937, 17950), True, 'import numpy as np\n'), ((18891, 18921), 'numpy.array', 'np.array', (['([False] * batch_size)'], {}), '([False] * batch_size)\n', (18899, 18921), True, 'import numpy as np\n'), ((4619, 4640), 'json.dump', 'json.dump', (['results', 'f'], {}), '(results, f)\n', (4628, 4640), False, 'import json\n'), ((13579, 13611), 'torch.autograd.Variable', 'Variable', (['a'], {'requires_grad': '(False)'}), '(a, requires_grad=False)\n', (13587, 13611), False, 'from torch.autograd import Variable\n'), ((14954, 14977), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (14965, 14977), False, 'import torch\n'), ((16571, 16597), 'torch.clamp', 'torch.clamp', (['target'], {'min': '(0)'}), '(target, min=0)\n', (16582, 16597), False, 'import torch\n'), ((19103, 19126), 'torch.zeros', 'torch.zeros', (['batch_size'], {}), '(batch_size)\n', (19114, 19126), False, 'import torch\n'), ((24141, 24169), 'torch.stack', 'torch.stack', (['u_t_list'], {'dim': '(0)'}), '(u_t_list, dim=0)\n', (24152, 24169), False, 'import torch\n'), ((24237, 24249), 'utils.flatten', 'flatten', (['obs'], {}), '(obs)\n', (24244, 24249), False, 'from utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda\n'), ((35462, 35490), 'torch.stack', 'torch.stack', (['u_t_list'], {'dim': '(0)'}), '(u_t_list, dim=0)\n', (35473, 35490), False, 'import torch\n'), ((35716, 35742), 'torch.cat', 'torch.cat', (['h_t_list'], {'dim': '(0)'}), '(h_t_list, dim=0)\n', (35725, 35742), False, 'import torch\n'), ((35761, 35787), 'torch.cat', 'torch.cat', (['c_t_list'], {'dim': '(0)'}), '(c_t_list, dim=0)\n', (35770, 35787), False, 'import torch\n'), ((46909, 46922), 'tqdm.tqdm', 'tqdm.tqdm', (['it'], {}), '(it)\n', (46918, 46922), False, 'import tqdm\n'), ((47775, 47809), 'torch.load', 'torch.load', (['encoder_path'], {}), '(encoder_path, **kwargs)\n', (47785, 47809), False, 'import torch\n'), ((47848, 47882), 'torch.load', 'torch.load', (['decoder_path'], {}), '(decoder_path, **kwargs)\n', (47858, 47882), False, 'import torch\n'), ((19951, 19977), 'torch.clamp', 'torch.clamp', (['target'], {'min': '(0)'}), '(target, min=0)\n', (19962, 19977), False, 'import torch\n'), ((25083, 25110), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (25096, 25110), True, 'import torch.nn.functional as F\n'), ((36430, 36457), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (36443, 36457), True, 'import torch.nn.functional as F\n'), ((41825, 41888), 'itertools.chain', 'itertools.chain', (['uncompleted_to_consider', 'completed_to_consider'], {}), '(uncompleted_to_consider, completed_to_consider)\n', (41840, 41888), False, 'import itertools\n'), ((41930, 42005), 'heapq.nlargest', 'heapq.nlargest', (['successor_size', 'to_consider'], {'key': '(lambda pair: pair[1].score)'}), '(successor_size, to_consider, key=lambda pair: pair[1].score)\n', (41944, 42005), False, 'import heapq\n'), ((3620, 3641), 'numpy.array', 'np.array', (['seq_lengths'], {}), '(seq_lengths)\n', (3628, 3641), True, 'import numpy as np\n'), ((3826, 3867), 'torch.autograd.Variable', 'Variable', (['seq_tensor'], {'requires_grad': '(False)'}), '(seq_tensor, requires_grad=False)\n', (3834, 3867), False, 'from torch.autograd import Variable\n'), ((13724, 13736), 'utils.flatten', 'flatten', (['obs'], {}), '(obs)\n', (13731, 13736), False, 'from utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda\n'), ((15912, 15946), 'torch.LongTensor', 'torch.LongTensor', (['next_target_list'], {}), '(next_target_list)\n', (15928, 15946), False, 'import torch\n'), ((16759, 16820), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logit', 'target'], {'ignore_index': '(-1)', 'reduce': '(False)'}), '(logit, target, ignore_index=-1, reduce=False)\n', (16774, 16820), True, 'import torch.nn.functional as F\n'), ((20797, 20855), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logit', 'a_t'], {'ignore_index': '(-1)', 'reduce': '(False)'}), '(logit, a_t, ignore_index=-1, reduce=False)\n', (20812, 20855), True, 'import torch.nn.functional as F\n'), ((13095, 13130), 'torch.from_numpy', 'torch.from_numpy', (['action_embeddings'], {}), '(action_embeddings)\n', (13111, 13130), False, 'import torch\n'), ((13182, 13208), 'torch.from_numpy', 'torch.from_numpy', (['is_valid'], {}), '(is_valid)\n', (13198, 13208), False, 'import torch\n'), ((20186, 20209), 'torch.nn.functional.softmax', 'F.softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (20195, 20209), True, 'import torch.nn.functional as F\n'), ((20530, 20550), 'torch.distributions.Categorical', 'D.Categorical', (['probs'], {}), '(probs)\n', (20543, 20550), True, 'import torch.distributions as D\n'), ((20618, 20653), 'sys.exit', 'sys.exit', (['"""Invalid feedback option"""'], {}), "('Invalid feedback option')\n", (20626, 20653), False, 'import sys\n'), ((16689, 16710), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (16698, 16710), True, 'import numpy as np\n'), ((20727, 20748), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (20736, 20748), True, 'import numpy as np\n'), ((11967, 11979), 'utils.flatten', 'flatten', (['obs'], {}), '(obs)\n', (11974, 11979), False, 'from utils import vocab_pad_idx, vocab_eos_idx, flatten, structured_map, try_cuda\n')] |
import os
import math
import ffmpeg
import numpy as np
from vispy import scene, app, io, geometry
from vispy.color import Color
from vispy.visuals import transforms
from vispy.scene.cameras import TurntableCamera
from .. import util as util
CF_MESH_PATH = os.path.join(os.path.dirname(__file__), "crazyflie2.obj.gz")
# Convert millimeters to meters, but make twice as big so easier to see.
MESHFILE_SCALE = 2.0 * 0.001
# The matrix that rotates the coordinates of the .obj file to agree with the
# Crazyflie's standard coordinate system. VisPy uses [row vector] * [matrix]
# (like DirectX), so this is the transpose of what we would expect.
UNROT_MESHFILE_TRANSPOSE = MESHFILE_SCALE * np.array([
[-1, 0, 0],
[ 0, 0, 1],
[ 0, -1, 0],
])
ELLIPSOID_COLOR_OK = Color("#11FF22", alpha=0.1)
ELLIPSOID_COLOR_COLLISION = Color("#FF0000", alpha=0.1)
class VisVispy:
def __init__(self, show=True, resizable=True):
self.canvas = scene.SceneCanvas(
keys='interactive', size=(1024, 768), show=show, config=dict(samples=4), resizable=resizable
)
self.plane_color = 0.25 * np.ones((1, 3))
self.bg_color = 0.9 * np.ones((1, 3))
self.line_color = 0.7 * np.ones((1, 3))
# Set up a viewbox to display the cube with interactive arcball
self.view = self.canvas.central_widget.add_view()
self.view.bgcolor = self.bg_color
self.view.camera = TurntableCamera(
fov=30.0, elevation=30.0, azimuth=90.0, center=(0.0, 0.0, 1.25)
)
self.cam_state = self.view.camera.get_state()
# add a colored 3D axis for orientation
axis = scene.visuals.XYZAxis(parent=self.view.scene)
self.cfs = []
self.led_color_cache = []
ground = scene.visuals.Plane(
32.0, 32.0, direction="+z", color=self.plane_color, parent=self.view.scene
)
# Lazy-constructed vispy objects and data for connectivity graph gfx.
self.graph_edges = None
self.graph_lines = None
self.graph = None
# Lazy-constructed vispy objects for collision ellipsoids.
self.ellipsoids = None
self.ellipsoid_radii = None
def setGraph(self, edges):
"""Set edges of graph visualization - sequence of (i,j) tuples."""
# Only allocate new memory if we need to.
n_edges = len(edges)
if self.graph_edges is None or n_edges != len(self.graph_edges):
self.graph_lines = np.zeros((2 * n_edges, 3))
self.graph_edges = edges
# Lazily construct VisPy object for graph.
if self.graph is None:
self.graph = scene.visuals.Line(
parent=self.view.scene,
color=self.line_color,
pos=self.graph_lines,
connect="segments",
method="gl",
antialias=True,
)
def showEllipsoids(self, radii):
self.ellipsoid_radii = np.array(radii)
def update(self, t, crazyflies):
if len(self.cfs) == 0:
verts, faces, normals, nothin = io.read_mesh(CF_MESH_PATH)
for i, cf in enumerate(crazyflies):
color = cf.ledRGB
mesh = scene.visuals.Mesh(
parent=self.view.scene,
vertices=verts,
faces=faces,
color=color,
shading="smooth",
)
mesh.unfreeze()
mesh.light_dir = (0.1, 0.1, 1.0)
mesh.shininess = 0.01
mesh.ambient_light_color = [0.5] * 3
mesh.transform = transforms.MatrixTransform()
self.cfs.append(mesh)
self.led_color_cache.append(color)
if self.ellipsoid_radii is not None and self.ellipsoids is None:
sphere_mesh = geometry.create_sphere(radius=1.0)
self.ellipsoids = [
scene.visuals.Mesh(
parent=self.view.scene,
meshdata=sphere_mesh,
color=ELLIPSOID_COLOR_OK,
shading="smooth",
)
for _ in self.cfs
]
for ell in self.ellipsoids:
ell.light_dir = (0.1, 0.1, 1.0)
ell.shininess = 0.0
ell.ambient_light_color = [0.5] * 3
ell.transform = transforms.MatrixTransform()
positions = np.stack([cf.position() for cf in crazyflies])
for i in range(0, len(self.cfs)):
R_state = crazyflies[i].rotBodyToWorld()
# Recall VisPy uses [row vector] * [matrix]!!
T = np.eye(4)
T[:3, :3] = np.dot(UNROT_MESHFILE_TRANSPOSE, R_state.T)
T[3, :3] = positions[i]
self.cfs[i].transform = transforms.MatrixTransform(T)
# vispy does not do this check
color = crazyflies[i].ledRGB
if color != self.led_color_cache[i]:
self.led_color_cache[i] = color
self.cfs[i].color = color # sets dirty flag
# Update graph line segments to match new Crazyflie positions.
if self.graph is not None:
for k, (i, j) in enumerate(self.graph_edges):
self.graph_lines[2 * k, :] = positions[i]
self.graph_lines[2 * k + 1, :] = positions[j]
self.graph.set_data(self.graph_lines)
# Update collsiion ellipsoids.
if self.ellipsoids is not None:
colliding = util.check_ellipsoid_collisions(positions, self.ellipsoid_radii)
for i, pos in enumerate(positions):
ell = self.ellipsoids[i]
tf = ell.transform
tf.reset()
tf.scale(self.ellipsoid_radii)
tf.translate(pos)
new_color = ELLIPSOID_COLOR_COLLISION if colliding[i] else ELLIPSOID_COLOR_OK
if not (new_color == ell.color): # vispy Color lacks != override.
ell.color = new_color
self.canvas.app.process_events()
def render(self):
frame = self.canvas.render()
# Do not render alpha channel - we always use rgb24 format.
if frame.shape[2] == 4:
frame = frame[:, :, :3]
return frame
| [
"vispy.scene.visuals.Line",
"numpy.eye",
"numpy.ones",
"vispy.scene.cameras.TurntableCamera",
"vispy.visuals.transforms.MatrixTransform",
"numpy.array",
"os.path.dirname",
"vispy.scene.visuals.XYZAxis",
"numpy.zeros",
"vispy.io.read_mesh",
"vispy.geometry.create_sphere",
"numpy.dot",
"vispy.... | [((778, 805), 'vispy.color.Color', 'Color', (['"""#11FF22"""'], {'alpha': '(0.1)'}), "('#11FF22', alpha=0.1)\n", (783, 805), False, 'from vispy.color import Color\n'), ((835, 862), 'vispy.color.Color', 'Color', (['"""#FF0000"""'], {'alpha': '(0.1)'}), "('#FF0000', alpha=0.1)\n", (840, 862), False, 'from vispy.color import Color\n'), ((273, 298), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (288, 298), False, 'import os\n'), ((689, 734), 'numpy.array', 'np.array', (['[[-1, 0, 0], [0, 0, 1], [0, -1, 0]]'], {}), '([[-1, 0, 0], [0, 0, 1], [0, -1, 0]])\n', (697, 734), True, 'import numpy as np\n'), ((1433, 1518), 'vispy.scene.cameras.TurntableCamera', 'TurntableCamera', ([], {'fov': '(30.0)', 'elevation': '(30.0)', 'azimuth': '(90.0)', 'center': '(0.0, 0.0, 1.25)'}), '(fov=30.0, elevation=30.0, azimuth=90.0, center=(0.0, 0.0, 1.25)\n )\n', (1448, 1518), False, 'from vispy.scene.cameras import TurntableCamera\n'), ((1654, 1699), 'vispy.scene.visuals.XYZAxis', 'scene.visuals.XYZAxis', ([], {'parent': 'self.view.scene'}), '(parent=self.view.scene)\n', (1675, 1699), False, 'from vispy import scene, app, io, geometry\n'), ((1774, 1873), 'vispy.scene.visuals.Plane', 'scene.visuals.Plane', (['(32.0)', '(32.0)'], {'direction': '"""+z"""', 'color': 'self.plane_color', 'parent': 'self.view.scene'}), "(32.0, 32.0, direction='+z', color=self.plane_color,\n parent=self.view.scene)\n", (1793, 1873), False, 'from vispy import scene, app, io, geometry\n'), ((2972, 2987), 'numpy.array', 'np.array', (['radii'], {}), '(radii)\n', (2980, 2987), True, 'import numpy as np\n'), ((1123, 1138), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (1130, 1138), True, 'import numpy as np\n'), ((1169, 1184), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (1176, 1184), True, 'import numpy as np\n'), ((1217, 1232), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (1224, 1232), True, 'import numpy as np\n'), ((2487, 2513), 'numpy.zeros', 'np.zeros', (['(2 * n_edges, 3)'], {}), '((2 * n_edges, 3))\n', (2495, 2513), True, 'import numpy as np\n'), ((2655, 2796), 'vispy.scene.visuals.Line', 'scene.visuals.Line', ([], {'parent': 'self.view.scene', 'color': 'self.line_color', 'pos': 'self.graph_lines', 'connect': '"""segments"""', 'method': '"""gl"""', 'antialias': '(True)'}), "(parent=self.view.scene, color=self.line_color, pos=self.\n graph_lines, connect='segments', method='gl', antialias=True)\n", (2673, 2796), False, 'from vispy import scene, app, io, geometry\n'), ((3101, 3127), 'vispy.io.read_mesh', 'io.read_mesh', (['CF_MESH_PATH'], {}), '(CF_MESH_PATH)\n', (3113, 3127), False, 'from vispy import scene, app, io, geometry\n'), ((3878, 3912), 'vispy.geometry.create_sphere', 'geometry.create_sphere', ([], {'radius': '(1.0)'}), '(radius=1.0)\n', (3900, 3912), False, 'from vispy import scene, app, io, geometry\n'), ((4692, 4701), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (4698, 4701), True, 'import numpy as np\n'), ((4726, 4769), 'numpy.dot', 'np.dot', (['UNROT_MESHFILE_TRANSPOSE', 'R_state.T'], {}), '(UNROT_MESHFILE_TRANSPOSE, R_state.T)\n', (4732, 4769), True, 'import numpy as np\n'), ((4842, 4871), 'vispy.visuals.transforms.MatrixTransform', 'transforms.MatrixTransform', (['T'], {}), '(T)\n', (4868, 4871), False, 'from vispy.visuals import transforms\n'), ((3233, 3339), 'vispy.scene.visuals.Mesh', 'scene.visuals.Mesh', ([], {'parent': 'self.view.scene', 'vertices': 'verts', 'faces': 'faces', 'color': 'color', 'shading': '"""smooth"""'}), "(parent=self.view.scene, vertices=verts, faces=faces,\n color=color, shading='smooth')\n", (3251, 3339), False, 'from vispy import scene, app, io, geometry\n'), ((3660, 3688), 'vispy.visuals.transforms.MatrixTransform', 'transforms.MatrixTransform', ([], {}), '()\n', (3686, 3688), False, 'from vispy.visuals import transforms\n'), ((3961, 4074), 'vispy.scene.visuals.Mesh', 'scene.visuals.Mesh', ([], {'parent': 'self.view.scene', 'meshdata': 'sphere_mesh', 'color': 'ELLIPSOID_COLOR_OK', 'shading': '"""smooth"""'}), "(parent=self.view.scene, meshdata=sphere_mesh, color=\n ELLIPSOID_COLOR_OK, shading='smooth')\n", (3979, 4074), False, 'from vispy import scene, app, io, geometry\n'), ((4425, 4453), 'vispy.visuals.transforms.MatrixTransform', 'transforms.MatrixTransform', ([], {}), '()\n', (4451, 4453), False, 'from vispy.visuals import transforms\n')] |
import scipy.integrate as intg
import numpy as np
import matplotlib.pyplot as plt
#Physical Constants
#Everything is in MKS units
h = 6.6261e-34 #Planck constant [J/s]
kB = 1.3806e-23 #Boltzmann constant [J/K]
c = 299792458.0 #Speed of light [m/s]
PI = np.pi #Pi
eps0 = 8.85e-12 #Vacuum Permitivity
rho=2.417e-8 #Resistivity of the mirror
Tcmb = 2.725 #CMB Temperatrure [K]
#Conversions
GHz = 10 ** 9
#Calculates total black body power for a given temp and emis.
def bbSpec(freq,temp,emis):
"""Calculates the Black body spectrum for a given temp and emis"""
if temp==0:
return 0
occ = 1.0/(np.exp(h*freq/(temp*kB)) - 1)
e = emis(freq) if callable(emis) else emis
return 2 * e * h * freq**3 /(c**2) * occ
def weightedSpec(freq,temp,emis):
"""Calculates the Black body spectrum for a given temp and emis weighted by AOmega"""
AOmega = (c/freq)**2
return AOmega * bbSpec(freq, temp, emis)
def powFromSpec(freqs, spec):
"""Integrates spectrum"""
return np.trapz(spec, freqs)
def spillEff(det):
"""" Calculates Efficiency of the Aperture"""
D = det.pixSize
F = det.f_num
waistFact = det.waistFact
freq = det.band_center
return 1. - np.exp((-np.power(np.pi,2)/2.)*np.power((D/(waistFact*F*(c/freq))),2))
def getLambdaOpt(nu, chi):
"""Gets lambdal_opt for mirrors"""
geom = (1 / np.cos(chi) - np.cos(chi))
return - 2 * geom * np.sqrt(4 * PI * eps0 * rho * nu)
def aniPowSpec(emis, freq, temp=Tcmb):
"""Derivative of BB spectrum"""
e = emis(freq) if callable(emis) else emis
occ = 1.0/(np.exp(h*freq/(temp*kB)) - 1)
return (2 * (h**2)/kB)*e*(occ**2)*((freq**2)/(temp**2))*np.exp((h*freq)/(kB*temp))
if __name__=="__main__":
print(bbSpec(145 * GHz, 100, 1))
| [
"numpy.trapz",
"numpy.sqrt",
"numpy.power",
"numpy.exp",
"numpy.cos"
] | [((1028, 1049), 'numpy.trapz', 'np.trapz', (['spec', 'freqs'], {}), '(spec, freqs)\n', (1036, 1049), True, 'import numpy as np\n'), ((1401, 1412), 'numpy.cos', 'np.cos', (['chi'], {}), '(chi)\n', (1407, 1412), True, 'import numpy as np\n'), ((1438, 1471), 'numpy.sqrt', 'np.sqrt', (['(4 * PI * eps0 * rho * nu)'], {}), '(4 * PI * eps0 * rho * nu)\n', (1445, 1471), True, 'import numpy as np\n'), ((1700, 1730), 'numpy.exp', 'np.exp', (['(h * freq / (kB * temp))'], {}), '(h * freq / (kB * temp))\n', (1706, 1730), True, 'import numpy as np\n'), ((633, 663), 'numpy.exp', 'np.exp', (['(h * freq / (temp * kB))'], {}), '(h * freq / (temp * kB))\n', (639, 663), True, 'import numpy as np\n'), ((1387, 1398), 'numpy.cos', 'np.cos', (['chi'], {}), '(chi)\n', (1393, 1398), True, 'import numpy as np\n'), ((1610, 1640), 'numpy.exp', 'np.exp', (['(h * freq / (temp * kB))'], {}), '(h * freq / (temp * kB))\n', (1616, 1640), True, 'import numpy as np\n'), ((1263, 1308), 'numpy.power', 'np.power', (['(D / (waistFact * F * (c / freq)))', '(2)'], {}), '(D / (waistFact * F * (c / freq)), 2)\n', (1271, 1308), True, 'import numpy as np\n'), ((1241, 1259), 'numpy.power', 'np.power', (['np.pi', '(2)'], {}), '(np.pi, 2)\n', (1249, 1259), True, 'import numpy as np\n')] |
import pickle
import pathlib
import numpy as np
from bci3wads.utils import constants
class Epoch:
def __init__(self, signals, flashing, stimulus_codes, stimulus_types,
target_char):
self.n_channels = signals.shape[1]
self.signals = signals
self.flashing = flashing
self.stimulus_codes = stimulus_codes
self.stimulus_types = stimulus_types
self.target_char = target_char
def flash_start_indices(self):
indices = [
i for i in range(len(self.flashing))
if (i == 0) # Each epoch begins with the first flash
or (self.flashing[i] == 1 and self.flashing[i - 1] == 0)
]
return indices
def sample_channel(self, indices, channel_id=constants.CHANNEL_ID,
window_size=constants.WINDOW_SIZE):
channel_signals = self.signals[:, channel_id]
samples = np.array([
channel_signals[i:i+window_size]
for i in indices
])
return samples
def samples_codes(self, indices):
return self.stimulus_codes[indices]
def process_channel(self, channel_id=constants.CHANNEL_ID,
window_size=constants.WINDOW_SIZE):
indices = self.flash_start_indices()
samples = self.sample_channel(indices, window_size=window_size,
channel_id=channel_id)
codes = self.samples_codes(indices)
n_codes = len(np.unique(codes)) # Should be 12
positions = np.array([
np.nonzero(codes == i)[0]
for i in range(n_codes)
])
processed = np.array([samples[position] for position in positions])
return processed
def process_channels(self, channel_ids=constants.CHANNEL_IDS,
window_size=constants.WINDOW_SIZE):
processed_channels = np.concatenate([
self.process_channel(channel_id, window_size)
for channel_id in channel_ids
], axis=-1)
return processed_channels
def target_char_codes(self):
codes = np.nonzero(constants.CHARACTERS == self.target_char)
return [codes[0][0] + 6, codes[1][0]]
def target_char_coords(self):
coords = np.nonzero(constants.CHARACTERS == self.target_char)
return [coords[0][0], coords[1][0]]
class Subject:
def __init__(self, filename, is_train=True):
with open(constants.INTER_DATA_PATH.joinpath(filename), 'rb') as f:
data = pickle.load(f)
self.is_train = is_train
self.name = pathlib.Path(filename).stem
self.signals = data['signals']
self.target_chars = data.get('target_chars')
self.flashings = data['flashings']
self.stimulus_codes = data['stimulus_codes']
self.stimulus_types = data.get('stimulus_types')
# self.epochs = [
# Epoch(signal, flashing, codes, types, target_char)
# for signal, flashing, codes, types, target_char in zip(
# self.signals, self.flashings, self.stimulus_codes,
# self.stimulus_types, self.target_chars
# )
# ]
@property
def epochs(self):
if self.is_train:
return [
Epoch(signal, flashing, codes, types, target_char)
for signal, flashing, codes, types, target_char in zip(
self.signals, self.flashings, self.stimulus_codes,
self.stimulus_types, self.target_chars
)
]
else:
return [
Epoch(signal, flashing, codes, stimulus_types=None,
target_char=None)
for signal, flashing, codes in zip(
self.signals, self.flashings, self.stimulus_codes
)
]
def process_epoch_channels(self, epoch_id=constants.EPOCH_ID,
channel_ids=constants.CHANNEL_IDS,
window_size=constants.WINDOW_SIZE):
processed_channels = self.epochs[epoch_id].process_channels(
channel_ids, window_size)
return processed_channels
def process_epoch(self, processed_channels, target_char, target_char_codes,
target_char_coords,
epoch_id=constants.EPOCH_ID,
channel_ids=constants.CHANNEL_IDS):
data = {}
data['target_char'] = target_char
data['target_char_codes'] = target_char_codes
data['target_char_coords'] = target_char_coords
data['epoch_id'] = epoch_id
data['channel_ids'] = channel_ids
data['processed_channels'] = processed_channels
return data
def save_epoch(self, data):
dir_path = constants.PROC_DATA_PATH / self.name / \
f"channels_{'_'.join([str(ind) for ind in data['channel_ids']])}"
dir_path.mkdir(parents=True, exist_ok=True)
filename = f"epoch_{data['epoch_id']}.pickle"
file_path = dir_path.joinpath(filename)
with open(file_path, 'wb') as f:
pickle.dump(data, f)
| [
"pickle.dump",
"numpy.unique",
"pathlib.Path",
"pickle.load",
"numpy.array",
"numpy.nonzero",
"bci3wads.utils.constants.INTER_DATA_PATH.joinpath"
] | [((920, 983), 'numpy.array', 'np.array', (['[channel_signals[i:i + window_size] for i in indices]'], {}), '([channel_signals[i:i + window_size] for i in indices])\n', (928, 983), True, 'import numpy as np\n'), ((1664, 1719), 'numpy.array', 'np.array', (['[samples[position] for position in positions]'], {}), '([samples[position] for position in positions])\n', (1672, 1719), True, 'import numpy as np\n'), ((2125, 2177), 'numpy.nonzero', 'np.nonzero', (['(constants.CHARACTERS == self.target_char)'], {}), '(constants.CHARACTERS == self.target_char)\n', (2135, 2177), True, 'import numpy as np\n'), ((2277, 2329), 'numpy.nonzero', 'np.nonzero', (['(constants.CHARACTERS == self.target_char)'], {}), '(constants.CHARACTERS == self.target_char)\n', (2287, 2329), True, 'import numpy as np\n'), ((1492, 1508), 'numpy.unique', 'np.unique', (['codes'], {}), '(codes)\n', (1501, 1508), True, 'import numpy as np\n'), ((2536, 2550), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2547, 2550), False, 'import pickle\n'), ((2605, 2627), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (2617, 2627), False, 'import pathlib\n'), ((5155, 5175), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5166, 5175), False, 'import pickle\n'), ((2459, 2503), 'bci3wads.utils.constants.INTER_DATA_PATH.joinpath', 'constants.INTER_DATA_PATH.joinpath', (['filename'], {}), '(filename)\n', (2493, 2503), False, 'from bci3wads.utils import constants\n'), ((1570, 1592), 'numpy.nonzero', 'np.nonzero', (['(codes == i)'], {}), '(codes == i)\n', (1580, 1592), True, 'import numpy as np\n')] |
'''
Unit test of the wrapper for the connected ls mask creation c++ code
Created on May 25, 2016
@author: thomasriddick
'''
import unittest
import numpy as np
from Dynamic_HD_Scripts.interface.cpp_interface.libs \
import create_connected_lsmask_wrapper as cc_lsmask_wrapper #@UnresolvedImport
class Test(unittest.TestCase):
"""Unit test object"""
def setUp(self):
"""Unit test setup function. Prepare test data"""
self.landsea_in = np.asarray([[0,0,1,0,1,1,0,1,0,1],
[1,0,0,0,0,1,0,1,1,1],
[0,0,0,1,1,1,1,1,1,0],
[0,0,1,0,1,0,0,1,1,1],
[0,0,0,1,0,0,0,0,0,1],
[0,1,0,1,0,0,1,0,1,0],
[0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,1,0,0,0,0],
[1,1,0,0,1,1,1,0,1,0],
[1,1,1,0,0,0,0,0,0,0]],
dtype=np.int32, order='C')
self.ocean_seed_points = np.asarray([[0,0,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0,0]],
dtype=np.int32, order='C')
self.expected_output_using_diagonals = np.asarray([[0,0,1,0,1,1,0,1,0,1],
[1,0,0,0,0,1,0,1,1,1],
[0,0,0,1,1,1,1,1,1,0],
[0,0,1,0,1,0,0,1,1,1],
[0,0,0,1,0,0,0,0,0,1],
[0,1,0,1,0,0,1,0,1,0],
[0,1,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0,0]],
dtype=np.int32, order='C')
self.expected_output_not_using_diagonals = np.asarray([[0,0,1,0,1,1,0,1,0,1],
[1,0,0,0,0,1,0,1,1,1],
[0,0,0,1,1,1,1,1,1,0],
[0,0,0,0,1,0,0,1,1,1],
[0,0,0,0,0,0,0,0,0,1],
[0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0,0]],
dtype=np.int32, order='C')
def testCreateConnectedLsmaskWrapperUsingDiagonals(self):
"""Test creating a connected ls mask including diagonal connections"""
cc_lsmask_wrapper.create_connected_ls_mask(self.landsea_in, #@UndefinedVariable
self.ocean_seed_points,
True)
np.testing.assert_array_equal(self.landsea_in,
self.expected_output_using_diagonals,
"Creating a connected ls mask using diagonal doesn't produce"
" expected results")
def testCreateConnectedLsmaskWrapperWithoutUsingDiagonals(self):
"""Test creating a connected ls mask not including diagonal connections"""
cc_lsmask_wrapper.create_connected_ls_mask(self.landsea_in, #@UndefinedVariable
self.ocean_seed_points,
False)
np.testing.assert_array_equal(self.landsea_in,
self.expected_output_not_using_diagonals,
"Creating a connected ls mask not using diagonals doesn't produce"
" expected results")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| [
"unittest.main",
"numpy.asarray",
"Dynamic_HD_Scripts.interface.cpp_interface.libs.create_connected_lsmask_wrapper.create_connected_ls_mask",
"numpy.testing.assert_array_equal"
] | [((5148, 5163), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5161, 5163), False, 'import unittest\n'), ((466, 842), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1], [0, 0, 0, \n 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 1, 0, 0,\n 0, 0, 0, 1], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 0, 0, 1, 1, 1, 0, 1, 0], [1,\n 1, 1, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.int32', 'order': '"""C"""'}), "([[0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1, 1, 1], [0, 0, \n 0, 1, 0, 0, 0, 0, 0, 1], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0], [0, 1, 0, 0, 0,\n 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 0, 0, 1, 1, 1, 0,\n 1, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32, order='C')\n", (476, 842), True, 'import numpy as np\n'), ((1149, 1525), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.int32', 'order': '"""C"""'}), "([[0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32, order='C')\n", (1159, 1525), True, 'import numpy as np\n'), ((1910, 2286), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1], [0, 0, 0, \n 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 1, 0, 0,\n 0, 0, 0, 1], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0], [0, 1, 0, 0, 0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1,\n 1, 1, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.int32', 'order': '"""C"""'}), "([[0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 1, 0, 1, 0, 0, 1, 1, 1], [0, 0, \n 0, 1, 0, 0, 0, 0, 0, 1], [0, 1, 0, 1, 0, 0, 1, 0, 1, 0], [0, 1, 0, 0, 0,\n 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0,\n 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32, order='C')\n", (1920, 2286), True, 'import numpy as np\n'), ((2832, 3208), 'numpy.asarray', 'np.asarray', (['[[0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1], [0, 0, 0, \n 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 1, 1], [0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1,\n 1, 1, 0, 0, 0, 0, 0, 0, 0]]'], {'dtype': 'np.int32', 'order': '"""C"""'}), "([[0, 0, 1, 0, 1, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 1, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 1, 0, 0, 1, 1, 1], [0, 0, \n 0, 0, 0, 0, 0, 0, 0, 1], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0,\n 0, 0], [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32, order='C')\n", (2842, 3208), True, 'import numpy as np\n'), ((3892, 3986), 'Dynamic_HD_Scripts.interface.cpp_interface.libs.create_connected_lsmask_wrapper.create_connected_ls_mask', 'cc_lsmask_wrapper.create_connected_ls_mask', (['self.landsea_in', 'self.ocean_seed_points', '(True)'], {}), '(self.landsea_in, self.\n ocean_seed_points, True)\n', (3934, 3986), True, 'from Dynamic_HD_Scripts.interface.cpp_interface.libs import create_connected_lsmask_wrapper as cc_lsmask_wrapper\n'), ((4112, 4290), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.landsea_in', 'self.expected_output_using_diagonals', '"""Creating a connected ls mask using diagonal doesn\'t produce expected results"""'], {}), '(self.landsea_in, self.\n expected_output_using_diagonals,\n "Creating a connected ls mask using diagonal doesn\'t produce expected results"\n )\n', (4141, 4290), True, 'import numpy as np\n'), ((4555, 4650), 'Dynamic_HD_Scripts.interface.cpp_interface.libs.create_connected_lsmask_wrapper.create_connected_ls_mask', 'cc_lsmask_wrapper.create_connected_ls_mask', (['self.landsea_in', 'self.ocean_seed_points', '(False)'], {}), '(self.landsea_in, self.\n ocean_seed_points, False)\n', (4597, 4650), True, 'from Dynamic_HD_Scripts.interface.cpp_interface.libs import create_connected_lsmask_wrapper as cc_lsmask_wrapper\n'), ((4776, 4963), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['self.landsea_in', 'self.expected_output_not_using_diagonals', '"""Creating a connected ls mask not using diagonals doesn\'t produce expected results"""'], {}), '(self.landsea_in, self.\n expected_output_not_using_diagonals,\n "Creating a connected ls mask not using diagonals doesn\'t produce expected results"\n )\n', (4805, 4963), True, 'import numpy as np\n')] |
import sys
from typing import Collection, Tuple, Optional
import numba
import pandas as pd
import numpy as np
from numpy import linalg as la
from scipy.sparse import issparse
from anndata import AnnData
from .. import logging as logg
from ..utils import sanitize_anndata
def _design_matrix(
model: pd.DataFrame,
batch_key: str,
batch_levels: Collection[str],
) -> pd.DataFrame:
"""
Computes a simple design matrix.
Parameters
--------
model
Contains the batch annotation
batch_key
Name of the batch column
batch_levels
Levels of the batch annotation
Returns
--------
The design matrix for the regression problem
"""
import patsy
design = patsy.dmatrix(
"~ 0 + C(Q('{}'), levels=batch_levels)".format(batch_key),
model,
return_type="dataframe",
)
model = model.drop([batch_key], axis=1)
numerical_covariates = model.select_dtypes('number').columns.values
logg.info("Found {} batches\n".format(design.shape[1]))
other_cols = [c for c in model.columns.values if c not in numerical_covariates]
if other_cols:
col_repr = " + ".join("Q('{}')".format(x) for x in other_cols)
factor_matrix = patsy.dmatrix("~ 0 + {}".format(col_repr),
model[other_cols],
return_type="dataframe")
design = pd.concat((design, factor_matrix), axis=1)
logg.info("Found {} categorical variables:".format(len(other_cols)))
logg.info("\t" + ", ".join(other_cols) + '\n')
if numerical_covariates is not None:
logg.info("Found {} numerical variables:".format(len(numerical_covariates)))
logg.info("\t" + ", ".join(numerical_covariates) + '\n')
for nC in numerical_covariates:
design[nC] = model[nC]
return design
def _standardize_data(
model: pd.DataFrame,
data: pd.DataFrame,
batch_key: str,
) -> Tuple[pd.DataFrame, pd.DataFrame, np.ndarray, np.ndarray]:
"""
Standardizes the data per gene.
The aim here is to make mean and variance be comparable across batches.
Parameters
--------
model
Contains the batch annotation
data
Contains the Data
batch_key
Name of the batch column in the model matrix
Returns
--------
s_data : pandas.DataFrame
Standardized Data
design : pandas.DataFrame
Batch assignment as one-hot encodings
var_pooled : numpy.ndarray
Pooled variance per gene
stand_mean : numpy.ndarray
Gene-wise mean
"""
# compute the design matrix
batch_items = model.groupby(batch_key).groups.items()
batch_levels, batch_info = zip(*batch_items)
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
design = _design_matrix(model, batch_key, batch_levels)
# compute pooled variance estimator
B_hat = np.dot(np.dot(la.inv(np.dot(design.T, design)), design.T), data.T)
grand_mean = np.dot((n_batches / n_array).T, B_hat[:n_batch, :])
var_pooled = (data - np.dot(design, B_hat).T)**2
var_pooled = np.dot(var_pooled, np.ones((int(n_array), 1)) / int(n_array))
# Compute the means
if np.sum(var_pooled == 0) > 0:
print(
'Found {} genes with zero variance.'
.format(np.sum(var_pooled == 0))
)
stand_mean = np.dot(grand_mean.T.reshape((len(grand_mean), 1)), np.ones((1, int(n_array))))
tmp = np.array(design.copy())
tmp[:, :n_batch] = 0
stand_mean += np.dot(tmp, B_hat).T
# need to be a bit careful with the zero variance genes
# just set the zero variance genes to zero in the standardized data
s_data = np.where(var_pooled == 0, 0, (
(data - stand_mean) /
np.dot(np.sqrt(var_pooled), np.ones((1, int(n_array))))
))
s_data = pd.DataFrame(s_data, index=data.index, columns=data.columns)
return s_data, design, var_pooled, stand_mean
def combat(adata: AnnData, key: str = 'batch', covariates: Optional[Collection[str]] = None, inplace: bool = True):
"""ComBat function for batch effect correction [Johnson07]_ [Leek12]_ [Pedersen12]_.
Corrects for batch effects by fitting linear models, gains statistical power
via an EB framework where information is borrowed across genes. This uses the
implementation of `ComBat <https://github.com/brentp/combat.py>`__ [Pedersen12]_.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
key: `str`, optional (default: `"batch"`)
Key to a categorical annotation from adata.obs that will be used for batch effect removal
covariates
Additional covariates such as adjustment variables or biological condition. Note that
not including covariates may introduce bias or lead to the removal of biological signal
in unbalanced designs.
inplace: bool, optional (default: `True`)
Wether to replace adata.X or to return the corrected data
Returns
-------
Depending on the value of inplace, either returns an updated AnnData object
or modifies the passed one.
"""
# check the input
if key not in adata.obs_keys():
raise ValueError('Could not find the key {!r} in adata.obs'.format(key))
if covariates is not None:
cov_exist = np.isin(covariates, adata.obs_keys())
if np.any(~cov_exist):
missing_cov = np.array(covariates)[~cov_exist].tolist()
raise ValueError('Could not find the covariate(s) {!r} in adata.obs'.format(missing_cov))
if key in covariates:
raise ValueError('Batch key and covariates cannot overlap')
if len(covariates) != len(set(covariates)):
raise ValueError('Covariates must be unique')
# only works on dense matrices so far
if issparse(adata.X):
X = adata.X.A.T
else:
X = adata.X.T
data = pd.DataFrame(
data=X,
index=adata.var_names,
columns=adata.obs_names,
)
sanitize_anndata(adata)
# construct a pandas series of the batch annotation
model = adata.obs[[key] + (covariates if covariates else [])]
batch_info = model.groupby(key).groups.values()
n_batch = len(batch_info)
n_batches = np.array([len(v) for v in batch_info])
n_array = float(sum(n_batches))
# standardize across genes using a pooled variance estimator
logg.info("Standardizing Data across genes.\n")
s_data, design, var_pooled, stand_mean = _standardize_data(model, data, key)
# fitting the parameters on the standardized data
logg.info("Fitting L/S model and finding priors\n")
batch_design = design[design.columns[:n_batch]]
# first estimate of the additive batch effect
gamma_hat = np.dot(np.dot(la.inv(np.dot(batch_design.T, batch_design)), batch_design.T), s_data.T)
delta_hat = []
# first estimate for the multiplicative batch effect
for i, batch_idxs in enumerate(batch_info):
delta_hat.append(s_data[batch_idxs].var(axis=1))
# empirically fix the prior hyperparameters
gamma_bar = gamma_hat.mean(axis=1)
t2 = gamma_hat.var(axis=1)
# a_prior and b_prior are the priors on lambda and theta from John<NAME> Li (2006)
a_prior = list(map(_aprior, delta_hat))
b_prior = list(map(_bprior, delta_hat))
logg.info("Finding parametric adjustments\n")
# gamma star and delta star will be our empirical bayes (EB) estimators
# for the additive and multiplicative batch effect per batch and cell
gamma_star, delta_star = [], []
for i, batch_idxs in enumerate(batch_info):
# temp stores our estimates for the batch effect parameters.
# temp[0] is the additive batch effect
# temp[1] is the multiplicative batch effect
gamma, delta = _it_sol(
s_data[batch_idxs].values,
gamma_hat[i],
delta_hat[i].values,
gamma_bar[i],
t2[i],
a_prior[i],
b_prior[i],
)
gamma_star.append(gamma)
delta_star.append(delta)
logg.info("Adjusting data\n")
bayesdata = s_data
gamma_star = np.array(gamma_star)
delta_star = np.array(delta_star)
# we now apply the parametric adjustment to the standardized data from above
# loop over all batches in the data
for j, batch_idxs in enumerate(batch_info):
# we basically substract the additive batch effect, rescale by the ratio
# of multiplicative batch effect to pooled variance and add the overall gene
# wise mean
dsq = np.sqrt(delta_star[j,:])
dsq = dsq.reshape((len(dsq), 1))
denom = np.dot(dsq, np.ones((1, n_batches[j])))
numer = np.array(bayesdata[batch_idxs] - np.dot(batch_design.loc[batch_idxs], gamma_star).T)
bayesdata[batch_idxs] = numer / denom
vpsq = np.sqrt(var_pooled).reshape((len(var_pooled), 1))
bayesdata = bayesdata * np.dot(vpsq, np.ones((1, int(n_array)))) + stand_mean
# put back into the adata object or return
if inplace:
adata.X = bayesdata.values.transpose()
else:
return bayesdata.values.transpose()
@numba.jit
def _it_sol(s_data, g_hat, d_hat, g_bar, t2, a, b, conv=0.0001) -> Tuple[float, float]:
"""
Iteratively compute the conditional posterior means for gamma and delta.
gamma is an estimator for the additive batch effect, deltat is an estimator
for the multiplicative batch effect. We use an EB framework to estimate these
two. Analytical expressions exist for both parameters, which however depend on each other.
We therefore iteratively evalutate these two expressions until convergence is reached.
Parameters
--------
s_data : pd.DataFrame
Contains the standardized Data
g_hat : float
Initial guess for gamma
d_hat : float
Initial guess for delta
g_bar, t_2, a, b : float
Hyperparameters
conv: float, optional (default: `0.0001`)
convergence criterium
Returns:
--------
gamma : float
estimated value for gamma
delta : float
estimated value for delta
"""
n = (1 - np.isnan(s_data)).sum(axis=1)
g_old = g_hat.copy()
d_old = d_hat.copy()
change = 1
count = 0
# we place a normally distributed prior on gamma and and inverse gamma prior on delta
# in the loop, gamma and delta are updated together. they depend on each other. we iterate until convergence.
while change > conv:
g_new = (t2*n*g_hat + d_old*g_bar) / (t2*n + d_old)
sum2 = s_data - g_new.reshape((g_new.shape[0], 1)) @ np.ones((1, s_data.shape[1]))
sum2 = sum2 ** 2
sum2 = sum2.sum(axis=1)
d_new = (0.5*sum2 + b) / (n/2.0 + a-1.0)
change = max((abs(g_new - g_old) / g_old).max(), (abs(d_new - d_old) / d_old).max())
g_old = g_new # .copy()
d_old = d_new # .copy()
count = count + 1
return g_new, d_new
def _aprior(delta_hat):
m = delta_hat.mean()
s2 = delta_hat.var()
return (2*s2 + m**2) / s2
def _bprior(delta_hat):
m = delta_hat.mean()
s2 = delta_hat.var()
return (m*s2 + m**3) / s2
| [
"numpy.sqrt",
"numpy.ones",
"scipy.sparse.issparse",
"numpy.any",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.isnan",
"pandas.DataFrame",
"pandas.concat"
] | [((3100, 3151), 'numpy.dot', 'np.dot', (['(n_batches / n_array).T', 'B_hat[:n_batch, :]'], {}), '((n_batches / n_array).T, B_hat[:n_batch, :])\n', (3106, 3151), True, 'import numpy as np\n'), ((3949, 4009), 'pandas.DataFrame', 'pd.DataFrame', (['s_data'], {'index': 'data.index', 'columns': 'data.columns'}), '(s_data, index=data.index, columns=data.columns)\n', (3961, 4009), True, 'import pandas as pd\n'), ((5950, 5967), 'scipy.sparse.issparse', 'issparse', (['adata.X'], {}), '(adata.X)\n', (5958, 5967), False, 'from scipy.sparse import issparse\n'), ((6036, 6104), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'X', 'index': 'adata.var_names', 'columns': 'adata.obs_names'}), '(data=X, index=adata.var_names, columns=adata.obs_names)\n', (6048, 6104), True, 'import pandas as pd\n'), ((8281, 8301), 'numpy.array', 'np.array', (['gamma_star'], {}), '(gamma_star)\n', (8289, 8301), True, 'import numpy as np\n'), ((8319, 8339), 'numpy.array', 'np.array', (['delta_star'], {}), '(delta_star)\n', (8327, 8339), True, 'import numpy as np\n'), ((1437, 1479), 'pandas.concat', 'pd.concat', (['(design, factor_matrix)'], {'axis': '(1)'}), '((design, factor_matrix), axis=1)\n', (1446, 1479), True, 'import pandas as pd\n'), ((3316, 3339), 'numpy.sum', 'np.sum', (['(var_pooled == 0)'], {}), '(var_pooled == 0)\n', (3322, 3339), True, 'import numpy as np\n'), ((3637, 3655), 'numpy.dot', 'np.dot', (['tmp', 'B_hat'], {}), '(tmp, B_hat)\n', (3643, 3655), True, 'import numpy as np\n'), ((5496, 5514), 'numpy.any', 'np.any', (['(~cov_exist)'], {}), '(~cov_exist)\n', (5502, 5514), True, 'import numpy as np\n'), ((8711, 8736), 'numpy.sqrt', 'np.sqrt', (['delta_star[j, :]'], {}), '(delta_star[j, :])\n', (8718, 8736), True, 'import numpy as np\n'), ((8806, 8832), 'numpy.ones', 'np.ones', (['(1, n_batches[j])'], {}), '((1, n_batches[j]))\n', (8813, 8832), True, 'import numpy as np\n'), ((8993, 9012), 'numpy.sqrt', 'np.sqrt', (['var_pooled'], {}), '(var_pooled)\n', (9000, 9012), True, 'import numpy as np\n'), ((3037, 3061), 'numpy.dot', 'np.dot', (['design.T', 'design'], {}), '(design.T, design)\n', (3043, 3061), True, 'import numpy as np\n'), ((3177, 3198), 'numpy.dot', 'np.dot', (['design', 'B_hat'], {}), '(design, B_hat)\n', (3183, 3198), True, 'import numpy as np\n'), ((3429, 3452), 'numpy.sum', 'np.sum', (['(var_pooled == 0)'], {}), '(var_pooled == 0)\n', (3435, 3452), True, 'import numpy as np\n'), ((3880, 3899), 'numpy.sqrt', 'np.sqrt', (['var_pooled'], {}), '(var_pooled)\n', (3887, 3899), True, 'import numpy as np\n'), ((6910, 6946), 'numpy.dot', 'np.dot', (['batch_design.T', 'batch_design'], {}), '(batch_design.T, batch_design)\n', (6916, 6946), True, 'import numpy as np\n'), ((10301, 10317), 'numpy.isnan', 'np.isnan', (['s_data'], {}), '(s_data)\n', (10309, 10317), True, 'import numpy as np\n'), ((10762, 10791), 'numpy.ones', 'np.ones', (['(1, s_data.shape[1])'], {}), '((1, s_data.shape[1]))\n', (10769, 10791), True, 'import numpy as np\n'), ((8883, 8931), 'numpy.dot', 'np.dot', (['batch_design.loc[batch_idxs]', 'gamma_star'], {}), '(batch_design.loc[batch_idxs], gamma_star)\n', (8889, 8931), True, 'import numpy as np\n'), ((5542, 5562), 'numpy.array', 'np.array', (['covariates'], {}), '(covariates)\n', (5550, 5562), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.validate()."""
import numpy as np
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, reward_space: str):
"""This test produces a random trajectory, resets the environment, then
replays the trajectory and checks that it produces the same state.
"""
env.observation_space = "Autophase"
env.reward_space = reward_space
benchmark = env.datasets["generator://csmith-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
try:
env.reset(benchmark=benchmark)
except BenchmarkInitError:
return
trajectory = apply_random_trajectory(
env, random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE
)
print(env.state) # For debugging in case of failure.
env.reset(benchmark=benchmark)
for i, (action, observation, reward, done) in enumerate(trajectory, start=1):
print(f"Replaying step {i}: {env.action_space.flags[action]}")
replay_observation, replay_reward, replay_done, info = env.step(action)
assert done == replay_done, info
np.testing.assert_array_almost_equal(observation, replay_observation)
np.testing.assert_almost_equal(reward, replay_reward)
if __name__ == "__main__":
main()
| [
"numpy.testing.assert_array_almost_equal",
"tests.pytest_plugins.random_util.apply_random_trajectory",
"numpy.testing.assert_almost_equal",
"tests.test_main.main",
"pytest.mark.timeout"
] | [((580, 604), 'pytest.mark.timeout', 'pytest.mark.timeout', (['(600)'], {}), '(600)\n', (599, 604), False, 'import pytest\n'), ((1132, 1228), 'tests.pytest_plugins.random_util.apply_random_trajectory', 'apply_random_trajectory', (['env'], {'random_trajectory_length_range': 'RANDOM_TRAJECTORY_LENGTH_RANGE'}), '(env, random_trajectory_length_range=\n RANDOM_TRAJECTORY_LENGTH_RANGE)\n', (1155, 1228), False, 'from tests.pytest_plugins.random_util import apply_random_trajectory\n'), ((1780, 1786), 'tests.test_main.main', 'main', ([], {}), '()\n', (1784, 1786), False, 'from tests.test_main import main\n'), ((1615, 1684), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['observation', 'replay_observation'], {}), '(observation, replay_observation)\n', (1651, 1684), True, 'import numpy as np\n'), ((1693, 1746), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['reward', 'replay_reward'], {}), '(reward, replay_reward)\n', (1723, 1746), True, 'import numpy as np\n')] |
########################################################################################################
## pyFAST - Fingerprint and Similarity Thresholding in python
##
## <NAME>
## 11/14/2016
##
## (see Yoon et. al. 2015, Sci. Adv. for algorithm details)
##
########################################################################################################
##
## Feature Extraction (Fingerprinting)
##
########################################################################################################
import numpy as np
import pywt as wt
from sklearn.preprocessing import normalize
from scipy.signal import spectrogram
from scipy.misc import imresize
def init_feature_extractor(params, ntimes):
feats = FeatureExtractor(sampling_rate=params['fingerprint']['sampling_rate'],
window_length=params['fingerprint']['spec_length'],
window_lag=params['fingerprint']['spec_lag'],
fingerprint_length=params['fingerprint']['fp_length'],
fingerprint_lag=params['fingerprint']['fp_lag'],
min_freq=params['fingerprint']["min_freq"],
max_freq=params['fingerprint']["max_freq"],
nfreq=params['fingerprint']['nfreq'],
ntimes=ntimes)
return feats
class FeatureExtractor(object):
def __init__(self, sampling_rate, window_length, window_lag, fingerprint_length, fingerprint_lag,
min_freq = 0, max_freq = None, nfreq = 32, ntimes = 64):
self.sampling_rate = sampling_rate #/ sampling rate
self.window_len = window_length #/ length of window (seconds) used in spectrogram
self.window_lag = window_lag #/ window lag (seconds) used in spectrogram
self.fp_len = fingerprint_length #/ width of fingerprint (samples)
self.fp_lag = fingerprint_lag #/ lag between fingerprints (samples)
self.max_freq = self._initialize_frequencies(max_freq) #/ minimum and maximum frequencies for bandpass filter
self.min_freq = min_freq
self.new_d1 = int(nfreq) #/ number of frequency / time bins in fingerprints (must be power of 2) - TODO: error checking
self.new_d2 = int(ntimes)
self.d1 = None #/ dimension of spectral images prior to resizing
self.d2 = None
self.haar_means = None
self.haar_stddevs = None
self.haar_medians = None
self.haar_absdevs = None
def _initialize_frequencies(self, max_freq): #/ initializes data structure
if max_freq is None:
max_freq = self.sampling_rate/2.0
return max_freq
def update(self, field, value):
if hasattr(self, field):
setattr(self, field, value)
else:
print('WARNING: object has no attribute: ' + field)
print('object has the following attributes:' + self.__dict__.keys())
return
def get_params(self):
mdict = dict()
for k in self.__dict__.keys():
if k not in ['haar_means','haar_stddevs','haar_absdevs','haar_medians']:
mdict[k] = self.__dict__[k]
return mdict
#/ returns indicies for overlapping windows
def get_window_params(self, N, L, dL):
idx0 = np.asarray(range(0, N+1, dL))
idx2 = np.asarray(range(L,N+1,dL))
nWindows = len(idx2)
idx1 = idx0[0:nWindows]
return nWindows, idx1, idx2
########################################################################
## FOR COMPUTING FINGERPRINTS ##
########################################################################
#/ computes spectrogram from continous timeseries data
def data_to_spectrogram(self, x_data, window_type = 'hanning'):
f, t, Sxx = spectrogram(x_data, fs=self.sampling_rate,
window=window_type, nperseg=int(self.sampling_rate*self.window_len),
noverlap = int(self.sampling_rate*(self.window_len - self.window_lag)))
# Truncate spectrogram, keep only passband frequencies
if self.min_freq > 0:
fidx_keep = (f >= self.min_freq)
Sxx = Sxx[fidx_keep, :]
f = f[fidx_keep]
if self.max_freq < f[-1]:
fidx_keep = (f <= self.max_freq)
Sxx = Sxx[fidx_keep, :]
f = f[fidx_keep]
self.frequencies = f
self.times = t
return f, t, Sxx
#/ breaks spectrogram into overlapping spectral images
def spectrogram_to_spectral_images(self, Sxx):
nFreq, nTimes = np.shape(Sxx)
nWindows, idx1, idx2 = self.get_window_params(nTimes, self.fp_len, self.fp_lag)
spectral_images = np.zeros([nWindows, nFreq, self.fp_len])
for i in range(nWindows):
spectral_images[i,:,:] = Sxx[:,idx1[i]:idx2[i]]
self.nwindows = nWindows
nWindows, self.d1, self.d2 = np.shape(spectral_images)
#self.new_d1, self.new_d2 = np.exp2(np.floor(np.log2([self.d1, self.d2])))
return spectral_images, nWindows, idx1, idx2
#/ resizes each spectral image to specified dimensions
def _resize_spectral_images(self, spectral_images, new_d1, new_d2):
new_spectral_images = np.zeros([self.nwindows,new_d1,new_d2])
for i in range(self.nwindows):
new_spectral_images[i,:,:] = imresize(spectral_images[i,:,:], (new_d1, new_d2), interp='bilinear', mode='F')
return new_spectral_images
#/ reshapes output from PyWavelets 2d wavelet transform into image
def _unwrap_wavelet_coeffs(self,coeffs):
L = len(coeffs)
cA = coeffs[0]
for i in range(1,L):
(cH, cV, cD) = coeffs[i]
cA = np.concatenate((np.concatenate((cA, cV),axis= 1),np.concatenate((cH, cD),axis = 1)),axis=0)
return cA
#/ computes wavelet transform for each spectral image
def spectral_images_to_wavelet(self, spectral_images, wavelet = wt.Wavelet('db1')):
if (int(self.new_d1)!=self.d1) or (int(self.new_d2)!=self.d2):
spectral_images = self._resize_spectral_images(spectral_images, self.new_d1, self.new_d2)
haar_images = np.zeros([self.nwindows,self.new_d1,self.new_d2])
for i in range(self.nwindows):
coeffs = wt.wavedec2(spectral_images[i,:,:], wavelet)
haar_images[i,:,:] = self._unwrap_wavelet_coeffs(coeffs)
return haar_images
#/ computes (normalized) haar_images from continous timeseries data
def data_to_haar_images(self, x_data):
f, t, Sxx = self.data_to_spectrogram(x_data)
spectral_images, nWindows, idx1, idx2 = self.spectrogram_to_spectral_images(Sxx)
haar_images = self.spectral_images_to_wavelet(spectral_images)
haar_images = normalize(self._images_to_vectors(haar_images), axis=1)
return haar_images, nWindows, idx1, idx2, Sxx, t
#/ converts set of images to array of vectors
def _images_to_vectors(self,images):
N,d1,d2 = np.shape(images)
vectors = np.zeros([N,d1*d2])
for i in range(N):
vectors[i,:] = np.reshape(images[i,:,:], (1,d1*d2))
return vectors
#/ converts set of vectors into set of images (of dimension d1 x d2)
def _vectors_to_images(self, vectors, d1, d2):
N,D = np.shape(vectors)
if D != d1*d2:
print('warning: invalid dimensions')
return vectors
else:
images = np.zeros([N,d1,d2])
for i in range(N):
images[i,:,:] = np.reshape(vectors[i,:], (d1,d2))
return images
def compute_haar_stats(self, haar_images,type = None):
if type is 'MAD':
shape = haar_images.shape
medians = []
for i in range(shape[1]):
medians.append(np.median(haar_images[:, i]))
self.haar_medians = np.array(medians)
mad = []
for i in range(shape[1]):
tmp = abs(haar_images[:, i] - medians[i])
mad.append(np.median(tmp))
self.haar_absdevs = np.array(mad)
return self.haar_medians, self.haar_absdevs
if type is 'Zscore':
self.haar_means = np.mean(haar_images,axis=0)
self.haar_stddevs = np.std(haar_images,axis=0)
return self.haar_means, self.haar_stddevs
def standardize_haar(self, haar_images, type = 'MAD'):
if type is 'Zscore':
haar_images = (haar_images - self.haar_means)/self.haar_stddevs
return haar_images
elif type is 'MAD':
haar_images = (haar_images - self.haar_medians)/self.haar_absdevs
return haar_images
else:
print('Warning: invalid type - select type MAD or Zscore')
return None
def binarize_vectors_topK_sign(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
binary_vectors = np.zeros((N,2*M), dtype=bool)
for i in range(N):
idx = np.argsort(abs(coeff_vectors[i,:]))[-K:]
binary_vectors[i,idx] = coeff_vectors[i,idx] > 0
binary_vectors[i,idx+M] = coeff_vectors[i,idx] < 0
return binary_vectors
def vectors_to_topK_sign(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
sign_vectors = np.zeros([N,M])
for i in range(N):
idx = np.argsort(abs(coeff_vectors[i,:]))[-K:]
sign_vectors[i,idx] = np.sign(coeff_vectors[i,idx])
return sign_vectors
def sign_to_binary(self, vector):
L = len(vector)
new_vec = np.zeros((L,2), dtype=bool)
new_vec[:,0] = vector > 0
new_vec[:,1] = vector < 0
return np.reshape(new_vec, (1,2*L))
def binarize_vectors_topK(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
sign_vectors = np.zeros((N,M),dtype=bool)
for i in range(N):
idx = np.argsort(coeff_vectors[i,:])[-K:]
sign_vectors[i,idx] = 1
return sign_vectors
def jaccard_sim(self, vec1, vec2):
return sum(vec1 & vec2)/ (1.0*sum(vec1 | vec2))
| [
"numpy.mean",
"numpy.median",
"numpy.reshape",
"pywt.wavedec2",
"pywt.Wavelet",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sign",
"scipy.misc.imresize",
"numpy.std",
"numpy.concatenate",
"numpy.shape"
] | [((4642, 4655), 'numpy.shape', 'np.shape', (['Sxx'], {}), '(Sxx)\n', (4650, 4655), True, 'import numpy as np\n'), ((4770, 4810), 'numpy.zeros', 'np.zeros', (['[nWindows, nFreq, self.fp_len]'], {}), '([nWindows, nFreq, self.fp_len])\n', (4778, 4810), True, 'import numpy as np\n'), ((4975, 5000), 'numpy.shape', 'np.shape', (['spectral_images'], {}), '(spectral_images)\n', (4983, 5000), True, 'import numpy as np\n'), ((5299, 5340), 'numpy.zeros', 'np.zeros', (['[self.nwindows, new_d1, new_d2]'], {}), '([self.nwindows, new_d1, new_d2])\n', (5307, 5340), True, 'import numpy as np\n'), ((6018, 6035), 'pywt.Wavelet', 'wt.Wavelet', (['"""db1"""'], {}), "('db1')\n", (6028, 6035), True, 'import pywt as wt\n'), ((6233, 6284), 'numpy.zeros', 'np.zeros', (['[self.nwindows, self.new_d1, self.new_d2]'], {}), '([self.nwindows, self.new_d1, self.new_d2])\n', (6241, 6284), True, 'import numpy as np\n'), ((7058, 7074), 'numpy.shape', 'np.shape', (['images'], {}), '(images)\n', (7066, 7074), True, 'import numpy as np\n'), ((7093, 7115), 'numpy.zeros', 'np.zeros', (['[N, d1 * d2]'], {}), '([N, d1 * d2])\n', (7101, 7115), True, 'import numpy as np\n'), ((7366, 7383), 'numpy.shape', 'np.shape', (['vectors'], {}), '(vectors)\n', (7374, 7383), True, 'import numpy as np\n'), ((8962, 8985), 'numpy.shape', 'np.shape', (['coeff_vectors'], {}), '(coeff_vectors)\n', (8970, 8985), True, 'import numpy as np\n'), ((9011, 9043), 'numpy.zeros', 'np.zeros', (['(N, 2 * M)'], {'dtype': 'bool'}), '((N, 2 * M), dtype=bool)\n', (9019, 9043), True, 'import numpy as np\n'), ((9371, 9394), 'numpy.shape', 'np.shape', (['coeff_vectors'], {}), '(coeff_vectors)\n', (9379, 9394), True, 'import numpy as np\n'), ((9418, 9434), 'numpy.zeros', 'np.zeros', (['[N, M]'], {}), '([N, M])\n', (9426, 9434), True, 'import numpy as np\n'), ((9693, 9721), 'numpy.zeros', 'np.zeros', (['(L, 2)'], {'dtype': 'bool'}), '((L, 2), dtype=bool)\n', (9701, 9721), True, 'import numpy as np\n'), ((9805, 9836), 'numpy.reshape', 'np.reshape', (['new_vec', '(1, 2 * L)'], {}), '(new_vec, (1, 2 * L))\n', (9815, 9836), True, 'import numpy as np\n'), ((9923, 9946), 'numpy.shape', 'np.shape', (['coeff_vectors'], {}), '(coeff_vectors)\n', (9931, 9946), True, 'import numpy as np\n'), ((9970, 9998), 'numpy.zeros', 'np.zeros', (['(N, M)'], {'dtype': 'bool'}), '((N, M), dtype=bool)\n', (9978, 9998), True, 'import numpy as np\n'), ((5419, 5504), 'scipy.misc.imresize', 'imresize', (['spectral_images[i, :, :]', '(new_d1, new_d2)'], {'interp': '"""bilinear"""', 'mode': '"""F"""'}), "(spectral_images[i, :, :], (new_d1, new_d2), interp='bilinear',\n mode='F')\n", (5427, 5504), False, 'from scipy.misc import imresize\n'), ((6343, 6389), 'pywt.wavedec2', 'wt.wavedec2', (['spectral_images[i, :, :]', 'wavelet'], {}), '(spectral_images[i, :, :], wavelet)\n', (6354, 6389), True, 'import pywt as wt\n'), ((7167, 7208), 'numpy.reshape', 'np.reshape', (['images[i, :, :]', '(1, d1 * d2)'], {}), '(images[i, :, :], (1, d1 * d2))\n', (7177, 7208), True, 'import numpy as np\n'), ((7518, 7539), 'numpy.zeros', 'np.zeros', (['[N, d1, d2]'], {}), '([N, d1, d2])\n', (7526, 7539), True, 'import numpy as np\n'), ((7941, 7958), 'numpy.array', 'np.array', (['medians'], {}), '(medians)\n', (7949, 7958), True, 'import numpy as np\n'), ((8153, 8166), 'numpy.array', 'np.array', (['mad'], {}), '(mad)\n', (8161, 8166), True, 'import numpy as np\n'), ((8285, 8313), 'numpy.mean', 'np.mean', (['haar_images'], {'axis': '(0)'}), '(haar_images, axis=0)\n', (8292, 8313), True, 'import numpy as np\n'), ((8345, 8372), 'numpy.std', 'np.std', (['haar_images'], {'axis': '(0)'}), '(haar_images, axis=0)\n', (8351, 8372), True, 'import numpy as np\n'), ((9554, 9584), 'numpy.sign', 'np.sign', (['coeff_vectors[i, idx]'], {}), '(coeff_vectors[i, idx])\n', (9561, 9584), True, 'import numpy as np\n'), ((7601, 7636), 'numpy.reshape', 'np.reshape', (['vectors[i, :]', '(d1, d2)'], {}), '(vectors[i, :], (d1, d2))\n', (7611, 7636), True, 'import numpy as np\n'), ((10042, 10073), 'numpy.argsort', 'np.argsort', (['coeff_vectors[i, :]'], {}), '(coeff_vectors[i, :])\n', (10052, 10073), True, 'import numpy as np\n'), ((5797, 5829), 'numpy.concatenate', 'np.concatenate', (['(cA, cV)'], {'axis': '(1)'}), '((cA, cV), axis=1)\n', (5811, 5829), True, 'import numpy as np\n'), ((5830, 5862), 'numpy.concatenate', 'np.concatenate', (['(cH, cD)'], {'axis': '(1)'}), '((cH, cD), axis=1)\n', (5844, 5862), True, 'import numpy as np\n'), ((7879, 7907), 'numpy.median', 'np.median', (['haar_images[:, i]'], {}), '(haar_images[:, i])\n', (7888, 7907), True, 'import numpy as np\n'), ((8104, 8118), 'numpy.median', 'np.median', (['tmp'], {}), '(tmp)\n', (8113, 8118), True, 'import numpy as np\n')] |
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
def show_image(image_path,type = "matplotlib"):
image = cv.imread(image_path, 0)
if type == "cv":
cv.imshow("original",image)
cv.waitKey(0)
cv.destroyWindow()
else:
plt.imshow(image,cmap = 'gray', interpolation = 'bicubic')
plt.xticks([])
plt.yticks([])
plt.show()
def show_cam_video():
cap = cv.VideoCapture(0)
#cap.open(0)
while True:
ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLORMAP_BONE)
cv.imshow('frame',gray)
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows()
def record_camera_video():
pass
def draw_a_line():
img = np.zeros((512,512,3),np.uint8)
cv.line(img,(0,0),(511,511),(255,0,0),5)
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xticks",
"cv2.destroyWindow",
"cv2.line",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"matplotlib.pyplot.yticks",
"cv2.cvtColor",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((130, 154), 'cv2.imread', 'cv.imread', (['image_path', '(0)'], {}), '(image_path, 0)\n', (139, 154), True, 'import cv2 as cv\n'), ((435, 453), 'cv2.VideoCapture', 'cv.VideoCapture', (['(0)'], {}), '(0)\n', (450, 453), True, 'import cv2 as cv\n'), ((689, 711), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (709, 711), True, 'import cv2 as cv\n'), ((775, 808), 'numpy.zeros', 'np.zeros', (['(512, 512, 3)', 'np.uint8'], {}), '((512, 512, 3), np.uint8)\n', (783, 808), True, 'import numpy as np\n'), ((807, 855), 'cv2.line', 'cv.line', (['img', '(0, 0)', '(511, 511)', '(255, 0, 0)', '(5)'], {}), '(img, (0, 0), (511, 511), (255, 0, 0), 5)\n', (814, 855), True, 'import cv2 as cv\n'), ((184, 212), 'cv2.imshow', 'cv.imshow', (['"""original"""', 'image'], {}), "('original', image)\n", (193, 212), True, 'import cv2 as cv\n'), ((220, 233), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (230, 233), True, 'import cv2 as cv\n'), ((242, 260), 'cv2.destroyWindow', 'cv.destroyWindow', ([], {}), '()\n', (258, 260), True, 'import cv2 as cv\n'), ((279, 334), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""', 'interpolation': '"""bicubic"""'}), "(image, cmap='gray', interpolation='bicubic')\n", (289, 334), True, 'import matplotlib.pyplot as plt\n'), ((346, 360), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (356, 360), True, 'import matplotlib.pyplot as plt\n'), ((369, 383), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (379, 383), True, 'import matplotlib.pyplot as plt\n'), ((392, 402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (400, 402), True, 'import matplotlib.pyplot as plt\n'), ((534, 570), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLORMAP_BONE'], {}), '(frame, cv.COLORMAP_BONE)\n', (545, 570), True, 'import cv2 as cv\n'), ((580, 604), 'cv2.imshow', 'cv.imshow', (['"""frame"""', 'gray'], {}), "('frame', gray)\n", (589, 604), True, 'import cv2 as cv\n'), ((615, 628), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (625, 628), True, 'import cv2 as cv\n')] |
import sys
import csv
from matplotlib import image as mpimg
import numpy as np
import scipy.misc
import cv2
vert_filename = sys.argv[1]
edge_filename = sys.argv[2]
img_filename = sys.argv[3]
output_img_filename = sys.argv[4]
thresh = int(sys.argv[5])
print('reading in verts...')
verts = []
with open(vert_filename, 'r') as vert_file:
reader = csv.reader(vert_file, delimiter=' ')
for row in reader:
v = [int(row[0]), int(row[1]), int(row[2])]
verts.append(v)
vert_file.close()
print('verts:', len(verts))
print('reading in edges...')
edges = []
with open(edge_filename, 'r') as edge_file:
reader = csv.reader(edge_file, delimiter=' ')
for row in reader:
e = [int(row[0]), int(row[1])]
edges.append(e)
edge_file.close()
print('edges:', len(edges))
img = mpimg.imread(img_filename)
nx, ny = img.shape
output = []
for r in range(nx):
row = []
for c in range(ny):
row.append(0)
output.append(row)
output = np.asarray(output)
print('building adjacency')
adj = []
for i in range(len(verts)):
adj.append([])
for e in edges:
#print(e)
v0 = e[0]
v1 = e[1]
adj[v0].append(v1)
adj[v1].append(v0)
maxs = 0
print('building max')
for i in range(len(verts)):
#print(i, adj[i])
v = verts[i]
f_v = img[v[0], v[1]]
local_max = True
for j in adj[i]:
u = verts[j]
f_u = img[u[0], u[1]]
if f_u > f_v:
local_max = False
break
if local_max and v[2] > thresh:
maxs += 1
# print('max at:', i, 'verts: (',v[1], v[0],') val:', f_v)
output[v[0], v[1]] = 255
print('maxs:', maxs)
cv2.imwrite(output_img_filename, output)
#scipy.misc.imsave(output_img_filename, output) | [
"cv2.imwrite",
"matplotlib.image.imread",
"numpy.asarray",
"csv.reader"
] | [((815, 841), 'matplotlib.image.imread', 'mpimg.imread', (['img_filename'], {}), '(img_filename)\n', (827, 841), True, 'from matplotlib import image as mpimg\n'), ((986, 1004), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (996, 1004), True, 'import numpy as np\n'), ((1662, 1702), 'cv2.imwrite', 'cv2.imwrite', (['output_img_filename', 'output'], {}), '(output_img_filename, output)\n', (1673, 1702), False, 'import cv2\n'), ((351, 387), 'csv.reader', 'csv.reader', (['vert_file'], {'delimiter': '""" """'}), "(vert_file, delimiter=' ')\n", (361, 387), False, 'import csv\n'), ((635, 671), 'csv.reader', 'csv.reader', (['edge_file'], {'delimiter': '""" """'}), "(edge_file, delimiter=' ')\n", (645, 671), False, 'import csv\n')] |
from __future__ import (division, print_function, absolute_import)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy.integrate as integrate
import scipy.optimize as optimize
from datetime import datetime
from elecsus.goal_functions import *
import time
import sys
from elecsus.elecsus_methods_NEW import calculate
from elecsus.libs.durhamcolours import *
def weighted_space(start, stop, num, power):
'''
Creates numpy array similar to np.linspace, but with more points
centrally than at the edges of the range (i.e. not linearly spaced)
power: The power the standard linspace is raised to - use odd numbered
powers to weight grid to middle of range.'''
M = (start + stop) / 2 # Midpoint of start and stop
D = stop - M # Difference between stop and M
return (np.linspace(-D, D, num) ** power) / (D ** (power - 1)) + M
def rotate_Efield(E_in, Etheta):
#Rotates standard E_in by Etheta and returns E_in, to be the input electric field in ElecSus
if Etheta != 0.:
if E_in.dtype == int:
E_in = E_in.astype(float)
if np.array_equal(E_in, [1, 0, 0]): # standard case
E_in[0] = np.cos(Etheta)
E_in[1] = np.sin(Etheta)
else:
rotation_matrix = np.array([[np.cos(Etheta), -np.sin(Etheta)],
[np.sin(Etheta), np.cos(Etheta)]])
if E_in.shape == (3,):
E_in[:2] = np.dot(rotation_matrix, E_in[:2])
else:
for i in range(len(E_in)):
E_in[:2, i] = np.dot(rotation_matrix, E_in[:2, i])
return E_in
def output_transmission(E_out, Etheta):
'''Uses given output electric field to find output transmission 90 degrees to given Etheta.'''
E = np.array([1, 0, 0])
I_in = (E * E.conjugate()).sum(axis=0)
outputAngle = Etheta + np.pi / 2
J_out = np.matrix([[np.cos(outputAngle) ** 2, np.sin(outputAngle) * np.cos(outputAngle)],
[np.sin(outputAngle) * np.cos(outputAngle), np.sin(outputAngle) ** 2]])
transmittedE = np.array(J_out * E_out[:2])
I_out = (transmittedE * transmittedE.conjugate()).sum(axis=0) / I_in
return I_out
def cascade(Detuning, E_in, p_dict_list, Etheta=0):
'''Finds the output from directing E_in into a series of one or more cells.
p_dict_list should be a list of paramater dictionaries that contain parameters for each cell in series
#cell 1 #cell 2 #cell n
i.e. p_dict_list = [p_dict_1,p_dict_2,...,p_dict_n]
Will also work for a single cell if p_dict_list has a length of 1.
'''
E_in = rotate_Efield(E_in, Etheta)
for p_dict in p_dict_list:
#print(p_dict)
[E_out] = calculate(Detuning, E_in, p_dict, outputs=['E_out'])
E_in = E_out
I_out = output_transmission(E_out, Etheta)
return I_out
def objective_cascade(params, var_names, p_dict_list, Det_base, Det_weight, E_in):
'''
Objective function that produces the value used by the optimizer. Called by optimizer().
params: List of values chosen by the optimizer for each variable used in the optimization.
Last value is ALWAYS Etheta with units of degrees
var_names: List of strings that denotes the variable that each value in params is associated with.
Each string should be of the form:
'Nvariable'
where N is the number of the cell and variable should be a valid key for use in a p_dict,
which can have a value assigned to it.
Example of params and var_names usage:
If params=[80.2, 24.3, 32.5, 88.9] and var_names=['1Bfield', '1T', '2Bfield'],
then in the first cell, Bfield = 80.2, T=24.3 and in the second cell Bfield=32.5, with Etheta=88.9 degrees.
This is converted to be used in the p_dict_list.
p_dict_list: List of paramater dictionaries that contain parameters for each cell in series.
#cell 1 #cell 2 #cell n
i.e. p_dict_list = [p_dict_1,p_dict_2,...,p_dict_n]
Will also work for a single cell if p_dict_list has a length of 1
In the optimization, input p_dict_list is from optimize() and should contain default values
for variables not being optimized.
Det_base: Base detuning grid used for the first evaluation of the filter system, before Det_weight is
also applied.
Should be numpy array, e.g. Det_base = np.linspace(-20, 20, 5000) * 1e3
Det_weight: Extremely narrow detuning grid used in the second evaluation of the filter system. After a
maximum in the filter profile is found in the first evaluation, Det_weight is biased to the
location of this maximum and then is combined with Det_base.
Should be numpy arrange with narrow range, e.g. Det_weight = weighted_space(-0.05, 0.05, 5000, 3) * 1e3
(weighted_space function creates an array with more points in the middle of the range than at the edges)
E_in: Default input electric field.
'''
global glob_var_progression #used for printing results during optimization
#could also be used to graph progression of optimization
# Round the input parameters to a specified number of decimal places. Drastically reduces the time
# for the optimizer to converge at a potential cost of finding a better filter. This could be customized
# for each variable type so that optimizations account for accuracy of lab equipment.
params = np.round(params, decimals=2)
print_string = 'Etheta= ' + str(params[-1]) + ' '
glob_var_progression[-2].append(params[-1])
for i in range(len(var_names)):
#used for printing results during optimization
print_string += (var_names[i] + '= ' + str(params[i]) + ', ')
glob_var_progression[i].append(params[i])
# Convert degrees to radians for Btheta, Bphi
if var_names[i][1:] == 'Btheta' or var_names[i][1:] == 'Bphi':
params[i] = np.radians(params[i])
#Assign value to correct p_dict in p_dict_list
cell_no = int(var_names[i][0]) - 1
p_dict_list[cell_no][var_names[i][1:]]=params[i]
# print(p_dict_list)
#First evaluation of filter system
I_out_temp = cascade(Det_base, E_in, p_dict_list, Etheta=np.radians(params[-1])).real
#Combining Det_base and biased Det_weight to create grid used in second evaluation
bias = Det_base[np.argmax(I_out_temp)]
Detuning = np.concatenate((Det_base, Det_weight + bias))
Detuning.sort()
#Second evaluation and FOM calculation
I_out = cascade(Detuning, E_in, p_dict_list, Etheta=np.radians(params[-1])).real
FOMval = FOM(Detuning, I_out) * 1e3
glob_var_progression[-1].append(np.round(FOMval, decimals=3))
print(glob_var_progression)
#Print results during optimization process
print("FnEv:", len(glob_var_progression[0]), print_string, "FOM=", np.round(FOMval, decimals=3), "Max I_out=",
np.round(I_out.max(), decimals=3), "\t", p_dict_list)
return -np.round(FOMval, decimals=3)
def optimizer():
'''
Function that performs the optimization. Adjustments made to optimization setup are currently made
within this function.
'''
# glob_var_progression used for printing results during optimization
# could also be used to graph progression of optimization
# should be set up so that it is a list of empty lists, with the number of empty lists being
# the number of optimized parameters + 1
# stores the variables and FOM for each evaluation of the objective function
global glob_var_progression
glob_var_progression = [[], [], [], [], [], [], [], []]
# var names - denotes the names of variables used in the optimization. 1Bfield means first cell Bfield.
# bounds - the bounds within which the optimizer chooses the value during a trial.
# Position in bounds list corresponds to position of value in var_names.
# Last tuple is always the bounds for Etheta
var_names = ['1Bfield', '1T', '1Btheta', '2Bfield', '2T', '2Btheta']
bounds = [(0, 500), (0, 300), (80, 100), (0, 500), (0, 300), (0, 180), (0, 180)] # last one is Etheta
p_dict_list = [{'Elem': 'Rb', 'Dline': 'D2', 'lcell': 5e-3, 'GammaBuf': 20.},
{'Elem': 'Rb', 'Dline': 'D2', 'lcell': 5e-3, }]
Det_base = np.linspace(-20, 20, 5000) * 1e3
Det_weight = weighted_space(-0.05, 0.05, 5000, 3) * 1e3
E_in = np.array([1, 0, 0])
print(var_names)
print(bounds)
print(p_dict_list)
#Calling optimization algorithm. Uses objective_cascade as objective function.
#tol=0.01 is chosen as this provides a good FOM without taking a more time than necessary to converge
t0 = time.clock()
result = optimize.differential_evolution(objective_cascade, bounds,
args=(var_names, p_dict_list, Det_base, Det_weight, E_in), tol=0.01)
t1 = time.clock() - t0
res = np.round(result.x, 2)
print(res, -result.fun)
print(result.nfev, result.nit)
print("Evaluation time = ", t1)
print("Time per evaluation = ", t1 / result.nfev)
optimizer() | [
"numpy.radians",
"time.clock",
"scipy.optimize.differential_evolution",
"numpy.argmax",
"numpy.array",
"numpy.linspace",
"numpy.dot",
"numpy.array_equal",
"numpy.cos",
"numpy.concatenate",
"numpy.sin",
"elecsus.elecsus_methods_NEW.calculate",
"numpy.round"
] | [((1844, 1863), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1852, 1863), True, 'import numpy as np\n'), ((2152, 2179), 'numpy.array', 'np.array', (['(J_out * E_out[:2])'], {}), '(J_out * E_out[:2])\n', (2160, 2179), True, 'import numpy as np\n'), ((5867, 5895), 'numpy.round', 'np.round', (['params'], {'decimals': '(2)'}), '(params, decimals=2)\n', (5875, 5895), True, 'import numpy as np\n'), ((6840, 6885), 'numpy.concatenate', 'np.concatenate', (['(Det_base, Det_weight + bias)'], {}), '((Det_base, Det_weight + bias))\n', (6854, 6885), True, 'import numpy as np\n'), ((8833, 8852), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (8841, 8852), True, 'import numpy as np\n'), ((9116, 9128), 'time.clock', 'time.clock', ([], {}), '()\n', (9126, 9128), False, 'import time\n'), ((9142, 9273), 'scipy.optimize.differential_evolution', 'optimize.differential_evolution', (['objective_cascade', 'bounds'], {'args': '(var_names, p_dict_list, Det_base, Det_weight, E_in)', 'tol': '(0.01)'}), '(objective_cascade, bounds, args=(var_names,\n p_dict_list, Det_base, Det_weight, E_in), tol=0.01)\n', (9173, 9273), True, 'import scipy.optimize as optimize\n'), ((9328, 9349), 'numpy.round', 'np.round', (['result.x', '(2)'], {}), '(result.x, 2)\n', (9336, 9349), True, 'import numpy as np\n'), ((1162, 1193), 'numpy.array_equal', 'np.array_equal', (['E_in', '[1, 0, 0]'], {}), '(E_in, [1, 0, 0])\n', (1176, 1193), True, 'import numpy as np\n'), ((2839, 2891), 'elecsus.elecsus_methods_NEW.calculate', 'calculate', (['Detuning', 'E_in', 'p_dict'], {'outputs': "['E_out']"}), "(Detuning, E_in, p_dict, outputs=['E_out'])\n", (2848, 2891), False, 'from elecsus.elecsus_methods_NEW import calculate\n'), ((6802, 6823), 'numpy.argmax', 'np.argmax', (['I_out_temp'], {}), '(I_out_temp)\n', (6811, 6823), True, 'import numpy as np\n'), ((7112, 7140), 'numpy.round', 'np.round', (['FOMval'], {'decimals': '(3)'}), '(FOMval, decimals=3)\n', (7120, 7140), True, 'import numpy as np\n'), ((7293, 7321), 'numpy.round', 'np.round', (['FOMval'], {'decimals': '(3)'}), '(FOMval, decimals=3)\n', (7301, 7321), True, 'import numpy as np\n'), ((7413, 7441), 'numpy.round', 'np.round', (['FOMval'], {'decimals': '(3)'}), '(FOMval, decimals=3)\n', (7421, 7441), True, 'import numpy as np\n'), ((8729, 8755), 'numpy.linspace', 'np.linspace', (['(-20)', '(20)', '(5000)'], {}), '(-20, 20, 5000)\n', (8740, 8755), True, 'import numpy as np\n'), ((9299, 9311), 'time.clock', 'time.clock', ([], {}), '()\n', (9309, 9311), False, 'import time\n'), ((1234, 1248), 'numpy.cos', 'np.cos', (['Etheta'], {}), '(Etheta)\n', (1240, 1248), True, 'import numpy as np\n'), ((1271, 1285), 'numpy.sin', 'np.sin', (['Etheta'], {}), '(Etheta)\n', (1277, 1285), True, 'import numpy as np\n'), ((6360, 6381), 'numpy.radians', 'np.radians', (['params[i]'], {}), '(params[i])\n', (6370, 6381), True, 'import numpy as np\n'), ((871, 894), 'numpy.linspace', 'np.linspace', (['(-D)', 'D', 'num'], {}), '(-D, D, num)\n', (882, 894), True, 'import numpy as np\n'), ((1512, 1545), 'numpy.dot', 'np.dot', (['rotation_matrix', 'E_in[:2]'], {}), '(rotation_matrix, E_in[:2])\n', (1518, 1545), True, 'import numpy as np\n'), ((6665, 6687), 'numpy.radians', 'np.radians', (['params[-1]'], {}), '(params[-1])\n', (6675, 6687), True, 'import numpy as np\n'), ((7006, 7028), 'numpy.radians', 'np.radians', (['params[-1]'], {}), '(params[-1])\n', (7016, 7028), True, 'import numpy as np\n'), ((1641, 1677), 'numpy.dot', 'np.dot', (['rotation_matrix', 'E_in[:2, i]'], {}), '(rotation_matrix, E_in[:2, i])\n', (1647, 1677), True, 'import numpy as np\n'), ((1968, 1987), 'numpy.cos', 'np.cos', (['outputAngle'], {}), '(outputAngle)\n', (1974, 1987), True, 'import numpy as np\n'), ((1994, 2013), 'numpy.sin', 'np.sin', (['outputAngle'], {}), '(outputAngle)\n', (2000, 2013), True, 'import numpy as np\n'), ((2016, 2035), 'numpy.cos', 'np.cos', (['outputAngle'], {}), '(outputAngle)\n', (2022, 2035), True, 'import numpy as np\n'), ((2062, 2081), 'numpy.sin', 'np.sin', (['outputAngle'], {}), '(outputAngle)\n', (2068, 2081), True, 'import numpy as np\n'), ((2084, 2103), 'numpy.cos', 'np.cos', (['outputAngle'], {}), '(outputAngle)\n', (2090, 2103), True, 'import numpy as np\n'), ((2105, 2124), 'numpy.sin', 'np.sin', (['outputAngle'], {}), '(outputAngle)\n', (2111, 2124), True, 'import numpy as np\n'), ((1341, 1355), 'numpy.cos', 'np.cos', (['Etheta'], {}), '(Etheta)\n', (1347, 1355), True, 'import numpy as np\n'), ((1416, 1430), 'numpy.sin', 'np.sin', (['Etheta'], {}), '(Etheta)\n', (1422, 1430), True, 'import numpy as np\n'), ((1432, 1446), 'numpy.cos', 'np.cos', (['Etheta'], {}), '(Etheta)\n', (1438, 1446), True, 'import numpy as np\n'), ((1358, 1372), 'numpy.sin', 'np.sin', (['Etheta'], {}), '(Etheta)\n', (1364, 1372), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
def batches(l, n):
"""Yield successive n-sized batches from l, the last batch is the left indexes."""
for i in range(0, l, n):
yield range(i,min(l,i+n))
class Deep_Autoencoder(object):
def __init__(self, sess, input_dim_list=[7,64,64,7],transfer_function=tf.nn.relu,learning_rate=0.001):
"""input_dim_list must include the original data dimension"""
#assert len(input_dim_list) < 2
#raise ValueError(
# "Do you need more one layer!")
self.W_list = []
self.encoding_b_list = []
self.decoding_b_list = []
self.dim_list = input_dim_list
self.transfer = transfer_function
self.learning_rate=0.001
## Encoders parameters
for i in range(len(input_dim_list)-1):
init_max_value = 4*np.sqrt(6. / (self.dim_list[i] + self.dim_list[i+1]))
self.W_list.append(tf.Variable(tf.random_uniform([self.dim_list[i],self.dim_list[i+1]],
np.negative(init_max_value),init_max_value)))
self.encoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i+1]],-0.1,0.1)))
## Decoders parameters
for i in range(len(input_dim_list)-2,-1,-1):
self.decoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i]],-0.1,0.1)))
## Placeholder for input
self.input_x = tf.placeholder(tf.float32,[None,self.dim_list[0]])
## coding graph :
last_layer = self.input_x
for weight,bias in zip(self.W_list,self.encoding_b_list):
hidden = self.transfer(tf.matmul(last_layer,weight) + bias)
last_layer = hidden
self.hidden = hidden
## decode graph:
for weight,bias in zip(reversed(self.W_list),self.decoding_b_list):
hidden = self.transfer(tf.matmul(last_layer,tf.transpose(weight)) + bias)
last_layer = hidden
self.recon = last_layer
#self.cost = tf.reduce_mean(tf.square(self.input_x - self.recon))
self.cost =0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.recon, self.input_x), 2.0))
self.train_step = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
sess.run(tf.global_variables_initializer())
def fit(self, X, sess,iteration=100, batch_size=12, init=False,verbose=False):
assert X.shape[1] == self.dim_list[0]
if init:
sess.run(tf.global_variables_initializer())
sample_size = X.shape[0]
for i in range(iteration):
for one_batch in batches(sample_size, batch_size):
e,op=sess.run((self.cost,self.train_step),feed_dict = {self.input_x:X[one_batch]})
if verbose and i%20==0:
#e = self.cost.eval(session = sess,feed_dict = {self.input_x: X[one_batch]})
print(" iteration :", i ,", cost:", e)
def transform(self, X, sess):
return self.hidden.eval(session = sess, feed_dict={self.input_x: X})
def getRecon(self, X, sess):
return self.recon.eval(session = sess,feed_dict={self.input_x: X})
| [
"numpy.sqrt",
"tensorflow.transpose",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.random_uniform",
"numpy.negative",
"tensorflow.matmul",
"tensorflow.subtract",
"tensorflow.train.AdamOptimizer"
] | [((1466, 1518), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.dim_list[0]]'], {}), '(tf.float32, [None, self.dim_list[0]])\n', (1480, 1518), True, 'import tensorflow as tf\n'), ((2317, 2350), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2348, 2350), True, 'import tensorflow as tf\n'), ((860, 916), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (self.dim_list[i] + self.dim_list[i + 1]))'], {}), '(6.0 / (self.dim_list[i] + self.dim_list[i + 1]))\n', (867, 916), True, 'import numpy as np\n'), ((2223, 2279), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (2245, 2279), True, 'import tensorflow as tf\n'), ((2529, 2562), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2560, 2562), True, 'import tensorflow as tf\n'), ((1174, 1226), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.dim_list[i + 1]]', '(-0.1)', '(0.1)'], {}), '([self.dim_list[i + 1]], -0.1, 0.1)\n', (1191, 1226), True, 'import tensorflow as tf\n'), ((1361, 1409), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.dim_list[i]]', '(-0.1)', '(0.1)'], {}), '([self.dim_list[i]], -0.1, 0.1)\n', (1378, 1409), True, 'import tensorflow as tf\n'), ((1678, 1707), 'tensorflow.matmul', 'tf.matmul', (['last_layer', 'weight'], {}), '(last_layer, weight)\n', (1687, 1707), True, 'import tensorflow as tf\n'), ((2152, 2189), 'tensorflow.subtract', 'tf.subtract', (['self.recon', 'self.input_x'], {}), '(self.recon, self.input_x)\n', (2163, 2189), True, 'import tensorflow as tf\n'), ((1076, 1103), 'numpy.negative', 'np.negative', (['init_max_value'], {}), '(init_max_value)\n', (1087, 1103), True, 'import numpy as np\n'), ((1934, 1954), 'tensorflow.transpose', 'tf.transpose', (['weight'], {}), '(weight)\n', (1946, 1954), True, 'import tensorflow as tf\n')] |
import random
random.seed(666)
import dynet as dy
import numpy as np
np.random.seed(666)
import heapq
from utils.helper import *
class LSTMDecoder(object):
def __init__(self, model, x_dims, h_dims, ccg_dims, LSTMBuilder, n_tag):
pc = model.add_subcollection()
input_dim = x_dims + ccg_dims
#decoder lstm
self.f = LSTMBuilder(1, input_dim, h_dims, pc)
self.W = pc.add_parameters((n_tag, h_dims), init='normal', mean=0, std=1)
# lookup table
self.ccg_lookup = pc.lookup_parameters_from_numpy(
np.random.randn(n_tag, ccg_dims).astype(np.float32))
self.pc = pc
self.spec = (x_dims, h_dims, ccg_dims, LSTMBuilder, n_tag)
self.h_dim = h_dims
self.ccg_dims = ccg_dims
self.ntags = n_tag
def __call__(self, hidden_vectors, h_sent_info, vocab, atom_info, ccg_info, train,
accelerate=True, dropout_x=None, dropout_h=None, ccg_dropout=None):
_, word_id, word_index = h_sent_info
truth_batch, masks_batch = atom_info
full_truth, full_mask, full_ccg = ccg_info
maxlen = truth_batch.shape[0]
batch_size = len(full_truth)
mask_dim = masks_batch.shape
arr = np.reshape(masks_batch, (mask_dim[0]*mask_dim[1], ))
total_token = np.sum(arr)*1.0
unk_index = vocab.get_token_index('*@UNK@*', 'ccg')
if train:
loss = []
else:
output = ['' for _ in range(batch_size)]
flag = 1 - full_mask
predict = None
if not accelerate:
maxlen = 125 # the maximum sequence length
f = None
atom_cnt = 0
start_flag = 0
while atom_cnt < maxlen:
if start_flag == 0:
f = self.f.initial_state()
if train:
self.f.set_dropouts(dropout_x, dropout_h)
self.f.set_dropout_masks(batch_size)
else:
self.f.set_dropouts(0.0, 0.0)
self.f.set_dropout_masks(batch_size)
ccg_vec_batch = [0] * batch_size
ccg_vec = dy.lookup_batch(self.ccg_lookup, ccg_vec_batch)
else:
if train:
ccg_vec = dy.lookup_batch(self.ccg_lookup, truth_batch[atom_cnt - 1])
cm = np.random.binomial(1, 1. - ccg_dropout, batch_size).astype(np.float32)
ccg_vec *= dy.inputTensor(cm, batched=True)
else:
if batch_size > 1:
ccg_vec = dy.lookup_batch(self.ccg_lookup, predict)
else:
ccg_vec = dy.lookup_batch(self.ccg_lookup, [predict])
x = dy.concatenate([hidden_vectors, ccg_vec])
f = f.add_input(x)
o = f.output()
y = self.W * o
if train:
errors = dy.pickneglogsoftmax_batch(y, truth_batch[atom_cnt])
m = np.reshape(masks_batch[atom_cnt], (1, batch_size), order='F')
m = dy.inputTensor(m, True)
err = dy.sum_batches(dy.cmult(errors, m))
loss.append(err)
else:
predict = np.argmax(y.npvalue(), axis=0)
for i in range(batch_size):
if batch_size > 1:
temp = vocab.get_token_from_index(predict[i], 'atom_ccg')
else:
temp = vocab.get_token_from_index(predict, 'atom_ccg')
if temp == 'EOS' and flag[i] == 0:
flag[i] = 1
elif temp != 'EOS' and flag[i] == 0:
output[i] += temp
if not accelerate and not vocab.get_token_from_index(full_ccg[i], 'full_ccg').startswith(output[i]):
flag[i] = 1
if (flag == 1).all():
break
atom_cnt += 1
start_flag = 1
if train:
return dy.esum(loss), total_token
else:
good = 0
for i in range(batch_size):
if output[i] == vocab.get_token_from_index(full_ccg[i], 'full_ccg') and full_mask[i] == 1:
good += 1
return good, np.sum(full_mask)
def softmax(self, x):
"""Compute the softmax in a numerically stable way."""
x = x - np.max(x)
exp_x = np.exp(x)
softmax_x = exp_x / np.sum(exp_x)
return softmax_x
def beam_search(self, hidden_vectors, h_sent_info, vocab, atom_info, ccg_info, beam_width):
_, word_id, word_index = h_sent_info
truth_batch, masks_batch = atom_info
full_truth, full_mask, full_ccg = ccg_info
maxlen = truth_batch.shape[0]
eos_index = vocab.get_token_index('EOS', 'atom_ccg')
batch_size = truth_batch.shape[1]
output = [['' for _ in range(batch_size)] for _ in range(beam_width)]
flag = [[0 for _ in range(batch_size)] for _ in range(beam_width)]
flag = np.array(flag)
ccg_len = [[0 for _ in range(batch_size)] for _ in range(beam_width)]
best_value = None
best_index = None
maxlen = 125
f = None
atom_cnt = 0
start_flag = 0
while atom_cnt < maxlen:
if start_flag == 0:
f = self.f.initial_state()
self.f.set_dropouts(0.0, 0.0)
self.f.set_dropout_masks(batch_size)
ccg_vec_batch = [0] * batch_size
ccg_vec = dy.lookup_batch(self.ccg_lookup, ccg_vec_batch)
else:
ccg_vec = []
for i in range(beam_width):
ccg_vec.append(dy.lookup_batch(self.ccg_lookup, best_index[:, i]))
ccg_vec = dy.concatenate_cols(ccg_vec)
x = dy.concatenate([hidden_vectors, ccg_vec])
f = f.add_input(x)
o = f.output()
if start_flag == 0:
y = dy.log(dy.softmax(self.W * o))
y_value = y.npvalue()
best_index = []
if batch_size > 1:
for i in range(batch_size):
y_list = list(y_value[:, i])
y_list[eos_index] = -1e9
best_index.append(list(map(y_list.index, heapq.nlargest(beam_width, y_list))))
else:
y_list = list(y_value)
y_list[eos_index] = -1e9
best_index.append(list(map(y_list.index, heapq.nlargest(beam_width, y_list))))
best_index = np.array(best_index)
best_value = []
for i in range(beam_width):
best_value.append(dy.pick_batch(y, best_index[:, i]).npvalue())
best_value = np.array(best_value)
best_value = np.reshape(best_value, (beam_width, batch_size), order='F')
for i in range(batch_size):
for k in range(beam_width):
temp = vocab.get_token_from_index(best_index[i][k], 'atom_ccg')
if temp == 'EOS' and flag[k][i] == 0:
flag[k][i] = 1
ccg_len[k][i] += 1
elif temp != 'EOS' and flag[k][i] == 0:
output[k][i] += temp
ccg_len[k][i] += 1
flag = np.array(flag)
output = np.array(output)
ccg_len = np.array(ccg_len)
hidden_vectors = dy.concatenate_cols([hidden_vectors for _ in range(beam_width)])
h0, c0 = f.s()
h1 = [h0 for _ in range(beam_width)]
h1 = dy.concatenate_cols(h1)
c1 = [c0 for _ in range(beam_width)]
c1 = dy.concatenate_cols(c1)
f = f.set_s((h1, c1))
else:
y = dy.log(dy.softmax(self.W * o))
y_value = y.npvalue()
if batch_size > 1:
for i in range(batch_size):
for k in range(beam_width):
if flag[k][i] == 0:
y_value[:, k, i] += best_value[k][i] * pow(ccg_len[k][i], 0.15)
y_value[:, k, i] = y_value[:, k, i] / (pow(ccg_len[k][i] + 1, 0.15))
else:
y_value[:, k, i] = -np.e ** 100
y_value[0, k, i] = best_value[k][i]
else:
for k in range(beam_width):
if flag[k][0] == 0:
y_value[:, k] += best_value[k][0] * pow(ccg_len[k][0], 0.15)
y_value[:, k] = y_value[:, k] / (pow(ccg_len[k][0] + 1, 0.15))
else:
y_value[:, k] = -np.e ** 100
y_value[0, k] = best_value[k][0]
best_value = []
best_beam = []
best_index = []
for i in range(batch_size):
if batch_size > 1:
b = y_value[:, :, i]
else:
b = y_value
b_b, b_i = top_k_2D_col_indexes(b, beam_width)
best_beam.append(b_b)
best_index.append(b_i)
b_v = []
for j in range(beam_width):
b_v.append(b[b_i[j]][b_b[j]])
best_value.append(b_v)
best_index = np.array(best_index)
best_beam = np.array(best_beam)
best_value = np.transpose(np.array(best_value))
output_new = [['' for _ in range(batch_size)] for _ in range(beam_width)]
flag_new = [[0 for _ in range(batch_size)] for _ in range(beam_width)]
ccg_len_new = [[0 for _ in range(batch_size)] for _ in range(beam_width)]
for i in range(batch_size):
for k in range(beam_width):
temp = vocab.get_token_from_index(best_index[i][k], 'atom_ccg')
ori_beam = best_beam[i][k]
if flag[ori_beam][i] == 1:
flag_new[k][i] = 1
output_new[k][i] = output[ori_beam][i]
ccg_len_new[k][i] = ccg_len[ori_beam][i]
elif temp == 'EOS' and flag[ori_beam][i] == 0:
flag_new[k][i] = 1
output_new[k][i] = output[ori_beam][i]
ccg_len_new[k][i] = ccg_len[ori_beam][i] + 1
elif temp != 'EOS' and flag[ori_beam][i] == 0:
flag_new[k][i] = 0
output_new[k][i] = output[ori_beam][i] + temp
ccg_len_new[k][i] = ccg_len[ori_beam][i] + 1
flag = np.array(flag_new)
output = np.array(output_new)
ccg_len = np.array(ccg_len_new)
h0, c0 = f.s()
h1 = []
c1 = []
for i in range(beam_width):
h1.append(dy.pick_batch(h0, best_beam[:, i], dim=1))
c1.append(dy.pick_batch(c0, best_beam[:, i], dim=1))
h1 = dy.concatenate_cols(h1)
c1 = dy.concatenate_cols(c1)
f = f.set_s((h1, c1))
if (flag == 1).all():
break
start_flag = 1
atom_cnt += 1
good = 0
for i in range(batch_size):
best_id = np.argmax(best_value[:, i])
if output[best_id][i] == vocab.get_token_from_index(full_ccg[i], 'full_ccg') and full_mask[i] == 1:
good += 1
if full_mask[i] == 1:
res = []
value = np.exp(best_value[:, i] * np.power(ccg_len[:, i], 0.15))
out_set = set()
for bw in range(beam_width):
if output[bw][i] not in out_set:
out_set.add(output[bw][i])
res.append([output[bw][i], value[bw]])
return good, np.sum(full_mask)
@staticmethod
def from_spec(spec, model):
"""Create and return a new instance with the needed parameters.
It is one of the prerequisites for Dynet save/load method.
"""
x_dims, h_dims, ccg_dims, LSTMBuilder, n_tag = spec
return LSTMDecoder(model, x_dims, h_dims, ccg_dims, LSTMBuilder, n_tag)
def param_collection(self):
"""Return a :code:`dynet.ParameterCollection` object with the parameters.
It is one of the prerequisites for Dynet save/load method.
"""
return self.pc
| [
"dynet.pickneglogsoftmax_batch",
"dynet.lookup_batch",
"dynet.inputTensor",
"numpy.array",
"dynet.esum",
"dynet.pick_batch",
"dynet.cmult",
"dynet.softmax",
"numpy.random.binomial",
"numpy.reshape",
"numpy.max",
"numpy.exp",
"numpy.random.seed",
"dynet.concatenate_cols",
"numpy.argmax",
... | [((14, 30), 'random.seed', 'random.seed', (['(666)'], {}), '(666)\n', (25, 30), False, 'import random\n'), ((69, 88), 'numpy.random.seed', 'np.random.seed', (['(666)'], {}), '(666)\n', (83, 88), True, 'import numpy as np\n'), ((1234, 1287), 'numpy.reshape', 'np.reshape', (['masks_batch', '(mask_dim[0] * mask_dim[1],)'], {}), '(masks_batch, (mask_dim[0] * mask_dim[1],))\n', (1244, 1287), True, 'import numpy as np\n'), ((4473, 4482), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4479, 4482), True, 'import numpy as np\n'), ((5097, 5111), 'numpy.array', 'np.array', (['flag'], {}), '(flag)\n', (5105, 5111), True, 'import numpy as np\n'), ((1309, 1320), 'numpy.sum', 'np.sum', (['arr'], {}), '(arr)\n', (1315, 1320), True, 'import numpy as np\n'), ((2761, 2802), 'dynet.concatenate', 'dy.concatenate', (['[hidden_vectors, ccg_vec]'], {}), '([hidden_vectors, ccg_vec])\n', (2775, 2802), True, 'import dynet as dy\n'), ((4447, 4456), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (4453, 4456), True, 'import numpy as np\n'), ((4511, 4524), 'numpy.sum', 'np.sum', (['exp_x'], {}), '(exp_x)\n', (4517, 4524), True, 'import numpy as np\n'), ((5903, 5944), 'dynet.concatenate', 'dy.concatenate', (['[hidden_vectors, ccg_vec]'], {}), '([hidden_vectors, ccg_vec])\n', (5917, 5944), True, 'import dynet as dy\n'), ((11821, 11848), 'numpy.argmax', 'np.argmax', (['best_value[:, i]'], {}), '(best_value[:, i])\n', (11830, 11848), True, 'import numpy as np\n'), ((12392, 12409), 'numpy.sum', 'np.sum', (['full_mask'], {}), '(full_mask)\n', (12398, 12409), True, 'import numpy as np\n'), ((2162, 2209), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.ccg_lookup', 'ccg_vec_batch'], {}), '(self.ccg_lookup, ccg_vec_batch)\n', (2177, 2209), True, 'import dynet as dy\n'), ((2935, 2987), 'dynet.pickneglogsoftmax_batch', 'dy.pickneglogsoftmax_batch', (['y', 'truth_batch[atom_cnt]'], {}), '(y, truth_batch[atom_cnt])\n', (2961, 2987), True, 'import dynet as dy\n'), ((3008, 3069), 'numpy.reshape', 'np.reshape', (['masks_batch[atom_cnt]', '(1, batch_size)'], {'order': '"""F"""'}), "(masks_batch[atom_cnt], (1, batch_size), order='F')\n", (3018, 3069), True, 'import numpy as np\n'), ((3090, 3113), 'dynet.inputTensor', 'dy.inputTensor', (['m', '(True)'], {}), '(m, True)\n', (3104, 3113), True, 'import dynet as dy\n'), ((4059, 4072), 'dynet.esum', 'dy.esum', (['loss'], {}), '(loss)\n', (4066, 4072), True, 'import dynet as dy\n'), ((4323, 4340), 'numpy.sum', 'np.sum', (['full_mask'], {}), '(full_mask)\n', (4329, 4340), True, 'import numpy as np\n'), ((5606, 5653), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.ccg_lookup', 'ccg_vec_batch'], {}), '(self.ccg_lookup, ccg_vec_batch)\n', (5621, 5653), True, 'import dynet as dy\n'), ((5858, 5886), 'dynet.concatenate_cols', 'dy.concatenate_cols', (['ccg_vec'], {}), '(ccg_vec)\n', (5877, 5886), True, 'import dynet as dy\n'), ((6682, 6702), 'numpy.array', 'np.array', (['best_index'], {}), '(best_index)\n', (6690, 6702), True, 'import numpy as np\n'), ((6892, 6912), 'numpy.array', 'np.array', (['best_value'], {}), '(best_value)\n', (6900, 6912), True, 'import numpy as np\n'), ((6942, 7001), 'numpy.reshape', 'np.reshape', (['best_value', '(beam_width, batch_size)'], {'order': '"""F"""'}), "(best_value, (beam_width, batch_size), order='F')\n", (6952, 7001), True, 'import numpy as np\n'), ((7517, 7531), 'numpy.array', 'np.array', (['flag'], {}), '(flag)\n', (7525, 7531), True, 'import numpy as np\n'), ((7557, 7573), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (7565, 7573), True, 'import numpy as np\n'), ((7600, 7617), 'numpy.array', 'np.array', (['ccg_len'], {}), '(ccg_len)\n', (7608, 7617), True, 'import numpy as np\n'), ((7821, 7844), 'dynet.concatenate_cols', 'dy.concatenate_cols', (['h1'], {}), '(h1)\n', (7840, 7844), True, 'import dynet as dy\n'), ((7919, 7942), 'dynet.concatenate_cols', 'dy.concatenate_cols', (['c1'], {}), '(c1)\n', (7938, 7942), True, 'import dynet as dy\n'), ((9716, 9736), 'numpy.array', 'np.array', (['best_index'], {}), '(best_index)\n', (9724, 9736), True, 'import numpy as np\n'), ((9765, 9784), 'numpy.array', 'np.array', (['best_beam'], {}), '(best_beam)\n', (9773, 9784), True, 'import numpy as np\n'), ((11127, 11145), 'numpy.array', 'np.array', (['flag_new'], {}), '(flag_new)\n', (11135, 11145), True, 'import numpy as np\n'), ((11171, 11191), 'numpy.array', 'np.array', (['output_new'], {}), '(output_new)\n', (11179, 11191), True, 'import numpy as np\n'), ((11218, 11239), 'numpy.array', 'np.array', (['ccg_len_new'], {}), '(ccg_len_new)\n', (11226, 11239), True, 'import numpy as np\n'), ((11530, 11553), 'dynet.concatenate_cols', 'dy.concatenate_cols', (['h1'], {}), '(h1)\n', (11549, 11553), True, 'import dynet as dy\n'), ((11575, 11598), 'dynet.concatenate_cols', 'dy.concatenate_cols', (['c1'], {}), '(c1)\n', (11594, 11598), True, 'import dynet as dy\n'), ((564, 596), 'numpy.random.randn', 'np.random.randn', (['n_tag', 'ccg_dims'], {}), '(n_tag, ccg_dims)\n', (579, 596), True, 'import numpy as np\n'), ((2284, 2343), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.ccg_lookup', 'truth_batch[atom_cnt - 1]'], {}), '(self.ccg_lookup, truth_batch[atom_cnt - 1])\n', (2299, 2343), True, 'import dynet as dy\n'), ((2471, 2503), 'dynet.inputTensor', 'dy.inputTensor', (['cm'], {'batched': '(True)'}), '(cm, batched=True)\n', (2485, 2503), True, 'import dynet as dy\n'), ((3151, 3170), 'dynet.cmult', 'dy.cmult', (['errors', 'm'], {}), '(errors, m)\n', (3159, 3170), True, 'import dynet as dy\n'), ((6062, 6084), 'dynet.softmax', 'dy.softmax', (['(self.W * o)'], {}), '(self.W * o)\n', (6072, 6084), True, 'import dynet as dy\n'), ((8026, 8048), 'dynet.softmax', 'dy.softmax', (['(self.W * o)'], {}), '(self.W * o)\n', (8036, 8048), True, 'import dynet as dy\n'), ((9827, 9847), 'numpy.array', 'np.array', (['best_value'], {}), '(best_value)\n', (9835, 9847), True, 'import numpy as np\n'), ((2599, 2640), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.ccg_lookup', 'predict'], {}), '(self.ccg_lookup, predict)\n', (2614, 2640), True, 'import dynet as dy\n'), ((2701, 2744), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.ccg_lookup', '[predict]'], {}), '(self.ccg_lookup, [predict])\n', (2716, 2744), True, 'import dynet as dy\n'), ((5780, 5830), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.ccg_lookup', 'best_index[:, i]'], {}), '(self.ccg_lookup, best_index[:, i])\n', (5795, 5830), True, 'import dynet as dy\n'), ((11393, 11434), 'dynet.pick_batch', 'dy.pick_batch', (['h0', 'best_beam[:, i]'], {'dim': '(1)'}), '(h0, best_beam[:, i], dim=1)\n', (11406, 11434), True, 'import dynet as dy\n'), ((11466, 11507), 'dynet.pick_batch', 'dy.pick_batch', (['c0', 'best_beam[:, i]'], {'dim': '(1)'}), '(c0, best_beam[:, i], dim=1)\n', (11479, 11507), True, 'import dynet as dy\n'), ((12096, 12125), 'numpy.power', 'np.power', (['ccg_len[:, i]', '(0.15)'], {}), '(ccg_len[:, i], 0.15)\n', (12104, 12125), True, 'import numpy as np\n'), ((2369, 2421), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1.0 - ccg_dropout)', 'batch_size'], {}), '(1, 1.0 - ccg_dropout, batch_size)\n', (2387, 2421), True, 'import numpy as np\n'), ((6615, 6649), 'heapq.nlargest', 'heapq.nlargest', (['beam_width', 'y_list'], {}), '(beam_width, y_list)\n', (6629, 6649), False, 'import heapq\n'), ((6817, 6851), 'dynet.pick_batch', 'dy.pick_batch', (['y', 'best_index[:, i]'], {}), '(y, best_index[:, i])\n', (6830, 6851), True, 'import dynet as dy\n'), ((6406, 6440), 'heapq.nlargest', 'heapq.nlargest', (['beam_width', 'y_list'], {}), '(beam_width, y_list)\n', (6420, 6440), False, 'import heapq\n')] |
# %% Packages
import os
os.chdir('/Users/czarrar/Dropbox/Circle/Jerb/recipe_rec/scripts')
import recipe_rec
import importlib
recipe_rec = importlib.reload(recipe_rec)
# %% Read in ingredients
recipe_file = '../data/30_ingredients+ave_ratings.csv'
recs = recipe_rec.RecipeRec()
recs.load_from_csv(recipe_file, index_col=0)
# %% Apply TFIDF
recs.fit_model()
# %% Save
#!rm z_model.p
recs.save_model("z_model.p")
# %% Test loading it back
import numpy as np
recs2 = recipe_rec.RecipeRec.load_model('z_model.p')
np.all(recs.model.features == recs2.model.features)
# %% Test Recipes + Try Out a Simple Proximity Model
test_recipes = [
'caramels water chopped pecans Rice Krispies milk chocolate chips shortening',
'peanut butter sugar large egg room temperature vanilla extract milk chocolate kisses',
'semisweet chocolate chips, water, large egg yolk lightly beaten, teaspoons vanilla extract, heavy whipping cream, sugar, Whipped cream, raspberries',
'dried minced onion, salt, chili powder, cornstarch, ground cumin, red pepper flakes, cayenne pepper, dried minced garlic, dried oregano, ground beef, water',
'reduced-sodium soy sauce, rice wine vinegar, cornstarch, sesame oil, divided, pork tenderloin, cut into strips, red chile pepper, chopped, cloves garlic, minced, onion, chopped, green bell pepper, chopped, head bok choy, leaves and stalks separated, chopped, crowns broccoli, chopped, ground ginger',
'Ingredient Checklist, hoisin sauce, brown sugar, soy sauce, applesauce, pork loin, sliced, cut into thin strips, cornstarch, peanut oil, sesame oil, chopped fresh ginger root, broccoli florets'
]
tmp = recs.proximity_model(test_recipes[0], to_clean=False)
tmp
d = tmp.iloc[0,:].to_dict()
d
"{name}: {ingredients}".format(**d)
recs.recipes.recipe_id.min()
| [
"recipe_rec.RecipeRec",
"os.chdir",
"importlib.reload",
"numpy.all",
"recipe_rec.RecipeRec.load_model"
] | [((24, 89), 'os.chdir', 'os.chdir', (['"""/Users/czarrar/Dropbox/Circle/Jerb/recipe_rec/scripts"""'], {}), "('/Users/czarrar/Dropbox/Circle/Jerb/recipe_rec/scripts')\n", (32, 89), False, 'import os\n'), ((139, 167), 'importlib.reload', 'importlib.reload', (['recipe_rec'], {}), '(recipe_rec)\n', (155, 167), False, 'import importlib\n'), ((256, 278), 'recipe_rec.RecipeRec', 'recipe_rec.RecipeRec', ([], {}), '()\n', (276, 278), False, 'import recipe_rec\n'), ((468, 512), 'recipe_rec.RecipeRec.load_model', 'recipe_rec.RecipeRec.load_model', (['"""z_model.p"""'], {}), "('z_model.p')\n", (499, 512), False, 'import recipe_rec\n'), ((513, 564), 'numpy.all', 'np.all', (['(recs.model.features == recs2.model.features)'], {}), '(recs.model.features == recs2.model.features)\n', (519, 564), True, 'import numpy as np\n')] |
# import packages here
import copy
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import random
import time
import torch
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# my imports
from scipy.ndimage.interpolation import rotate
import torch.optim as optim
from time import time
import sys
from torchvision import models
from sklearn.svm import LinearSVC
# not import the google.colab related stuff in this python file
# ==========================================
# Load Training Data and Testing Data
# ==========================================
class_names = [name[13:] for name in glob.glob('./data/train/*')]
class_names = dict(zip(range(len(class_names)), class_names))
print("class_names: %s " % class_names)
n_train_samples = 150
n_test_samples = 50
def img_norm(img):
#
# Write your code here
# normalize img pixels to [-1, 1]
#
# Min-Max Normalization
# x' = (x - X_min) / (X_max - X_min)
pixels = img.astype('float64')
min, max = pixels.min(), pixels.max()
# print(pixels-min,max-min)
return (2. * (pixels - min) / (max - min)) - 1
def load_dataset(path, img_size, num_per_class=-1, batch_num=1, shuffle=False, augment=False, is_color=False,
rotate_90=False, zero_centered=False):
data = []
labels = []
if is_color:
channel_num = 3
else:
channel_num = 1
# read images and resizing
for id, class_name in class_names.items():
print("Loading images from class: %s" % id)
img_path_class = glob.glob(path + class_name + '/*.jpg')
if num_per_class > 0:
img_path_class = img_path_class[:num_per_class]
labels.extend([id] * len(img_path_class))
for filename in img_path_class:
if is_color:
img = cv2.imread(filename)
else:
img = cv2.imread(filename, 0)
# resize the image
img = cv2.resize(img, img_size, cv2.INTER_LINEAR)
if is_color:
img = np.transpose(img, [2, 0, 1])
# norm pixel values to [-1, 1]
# Write your Data Normalization code here
data.append(img_norm(img))
#
# Write your Data Augmentation code here
# mirroring
if augment:
mrr_data = [np.flip(img, 1) for img in data]
data.extend(mrr_data)
labels.extend(labels)
if rotate_90:
aug_data = [rotate(img, np.random.randint(0, 360),
reshape=False) for img in data]
data.extend(aug_data)
labels.extend(labels)
#
# Write your Data Normalization code here
# norm data to zero-centered
# img already normed above
if zero_centered:
for i in range(len(data)):
# x' = x - μ
pixels = np.asarray(data[i]).astype('float64')
data[i] = pixels - pixels.mean()
# randomly permute (this step is important for training)
if shuffle:
bundle = list(zip(data, labels))
random.shuffle(bundle)
data, labels = zip(*bundle)
# divide data into minibatches of TorchTensors
if batch_num > 1:
batch_data = []
batch_labels = []
print(len(data))
print(batch_num)
for i in range(int(len(data) / batch_num)):
minibatch_d = data[i * batch_num: (i + 1) * batch_num]
minibatch_d = np.reshape(
minibatch_d, (batch_num, channel_num, img_size[0], img_size[1]))
batch_data.append(torch.from_numpy(minibatch_d))
minibatch_l = labels[i * batch_num: (i + 1) * batch_num]
batch_labels.append(torch.LongTensor(minibatch_l))
data, labels = batch_data, batch_labels
return zip(batch_data, batch_labels)
# load data into size (64, 64)
img_size = (64, 64)
batch_num = 50 # training sample number per batch
# load training dataset
trainloader_small = list(load_dataset('./data/train/', img_size, batch_num=batch_num, shuffle=True,
augment=True, zero_centered=True))
train_num = len(trainloader_small)
print("Finish loading %d minibatches(=%d) of training samples." %
(train_num, batch_num))
# load testing dataset
testloader_small = list(load_dataset(
'./data/test/', img_size, num_per_class=50, batch_num=batch_num))
test_num = len(testloader_small)
print("Finish loading %d minibatches(=%d) of testing samples." %
(test_num, batch_num))
# show some images
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
if len(npimg.shape) > 2:
npimg = np.transpose(img, [1, 2, 0])
plt.figure
plt.imshow(npimg, 'gray')
plt.show()
img, label = trainloader_small[0][0][11][0], trainloader_small[0][1][11]
label = int(np.array(label))
print(class_names[label])
imshow(img)
################################################## q 1 #############################################################
# part1
# ==========================================
# Define Network Architecture
# ==========================================
class Model(nn.Module):
# referencing model that introduction in https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#sphx-glr-beginner-blitz-neural-networks-tutorial-py
def __init__(self):
dropout_rate = 0.5
super(Model, self).__init__()
self.conv1 = nn.Conv2d(1, 12, 5) # output channel =12 kenel size - 5*5
self.pool1 = nn.MaxPool2d(2) # 2*2 window
self.conv2 = nn.Conv2d(12, 16, 5) # kenel size - 5*5
# using previous conv layer's output channel as input channel
self.pool2 = nn.MaxPool2d(2)
self.dropout1 = nn.Dropout(dropout_rate)
self.dropout2 = nn.Dropout(dropout_rate)
self.fc1 = nn.Linear(16 * 13 * 13, 120)
self.fc2 = nn.Linear(120, 120)
self.fc3 = nn.Linear(120, 16)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.dropout1(x)
x = self.pool2(F.relu(self.conv2(x)))
x = self.dropout2(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
# ==========================================
# Optimize/Train Network
# ==========================================
num_epoch = 50
device = torch.device("cuda") if torch.cuda.is_available(
) else torch.device("cpu") # use cuda if that's available
print("cuda is available", torch.cuda.is_available())
nn_part1 = Model().double().to(device)
def train_optimize_model(dataset, NNmodel, epochs=30):
optimizer = optim.Adam(NNmodel.parameters(), lr=0.001)
for i in range(epochs):
for batch in dataset:
x, y = batch
optimizer.zero_grad()
output = NNmodel((x.to(device)))
loss = nn.CrossEntropyLoss()(output, y.to(device))
loss.backward()
optimizer.step()
init_train_time_p1 = time()
train_optimize_model(trainloader_small, nn_part1, num_epoch)
time_took_4_train_n_optimize_mode = time()
# ==========================================
# Evaluating Network
# ==========================================
def evaluate_network(dataset, NNModel):
for data in dataset:
x, y = data
return float((torch.max(NNModel(Variable(x.to(device))), 1)[1] == y.to(device)).sum() / float(y.size(0)))
acc = evaluate_network(testloader_small, nn_part1)
print('question 1 part 1\n')
print('Data augmentation: mirroring')
print(
' Data normalization: [-1,1], zero-centered by substract mean from pixels ')
print(' Network Regularization: dropout layer after each conv layer')
print('\n\nnet struct:', nn_part1)
print('\n\nAccuracy is {0:.2f}%'.format(acc * 100), 'after', num_epoch,
'epoches in {0:.2f}'.format(time_took_4_train_n_optimize_mode - init_train_time_p1), 'secs')
standard_acc = acc
# part2 approach 1: Data augmentation: mirroring, Data normalization [-1,1], zero-centered, Network Regularization: using dropout layers, activation function sigmoid
class Model_sigmoid(nn.Module):
# referencing model that introduction in https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#sphx-glr-beginner-blitz-neural-networks-tutorial-py
def __init__(self):
dropout_rate = 0.5
super(Model_sigmoid, self).__init__()
self.conv1 = nn.Conv2d(1, 12, 5) # output channel =12 kenel size - 5*5
self.pool1 = nn.MaxPool2d(2) # 2*2 window
self.conv2 = nn.Conv2d(12, 16, 5) # kenel size - 5*5
# using previous conv layer's output channel as input channel
self.pool2 = nn.MaxPool2d(2)
self.dropout1 = nn.Dropout(dropout_rate)
self.dropout2 = nn.Dropout(dropout_rate)
self.fc1 = nn.Linear(16 * 13 * 13, 120)
self.fc2 = nn.Linear(120, 120)
self.fc3 = nn.Linear(120, 16)
def forward(self, x):
x = self.pool1(torch.sigmoid(self.conv1(x)))
x = self.dropout1(x)
x = self.pool2(torch.sigmoid(self.conv2(x)))
x = self.dropout2(x)
x = x.view(-1, self.num_flat_features(x))
x = torch.sigmoid(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
# part2 approach 2: Data augmentation: mirroring, Data normalization [-1,1], zero-centered, batch normlaization, Network Regularization: using dropout layers, activation function relu
class Model_bathch_norm(nn.Module):
# referencing model that introduction in https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html#sphx-glr-beginner-blitz-neural-networks-tutorial-py
def __init__(self):
dropout_rate = 0.5
super(Model_bathch_norm, self).__init__()
self.conv1 = nn.Conv2d(1, 12, 5) # output channel =12 kenel size - 5*5
self.pool1 = nn.MaxPool2d(2) # 2*2 window
self.conv2 = nn.Conv2d(12, 16, 5) # kenel size - 5*5
self.norm1 = nn.BatchNorm2d(16)
# using previous conv layer's output channel as input channel
self.pool2 = nn.MaxPool2d(2)
self.dropout1 = nn.Dropout(dropout_rate)
self.fc1 = nn.Linear(16 * 13 * 13, 120)
self.dropout2 = nn.Dropout(dropout_rate)
self.fc2 = nn.Linear(120, 120)
self.fc3 = nn.Linear(120, 16)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.dropout1(x)
x = self.pool2(F.relu(self.conv2(x)))
x = self.dropout2(x)
x = self.norm1(x)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
# part2 optimaization/tranning phrase
num_epoch = 50
# print("cuda is available",torch.cuda.is_available())
nn_part2_1 = Model_sigmoid().double().to(device)
nn_part2_2 = Model_bathch_norm().double().to(device)
nn_part2_3 = Model().double().to(device)
# nn_part2_3 = Model_softmax().double().to(device)
# nn_part2_3 = Model().double().to(device)
# print(123)
init_time = time()
train_optimize_model(trainloader_small, nn_part2_1, num_epoch)
time_p2_1 = time()
train_optimize_model(trainloader_small, nn_part2_2, num_epoch)
time_p2_2 = time()
trainloader_small_rotated = list(load_dataset(
'data/train/', img_size, batch_num=batch_num, shuffle=True, augment=True, zero_centered=True, rotate_90=True))
testloader_small_rotated = list(load_dataset(
'data/test/', img_size, num_per_class=100, batch_num=batch_num))
time_p2_3_ini = time()
# train_optimize_model(trainloader_small_rotated, nn_part2_3,num_epoch)
train_optimize_model(trainloader_small_rotated, nn_part2_3, num_epoch)
time_p2_3_fin = time()
# evaluation for part 2 test 1
acc = evaluate_network(testloader_small, nn_part2_1)
print(
'test 1\n Data augmentation: mirroring\n Data normalization [-1,1], zero-centered\n Network Regularization: using dropout layers\n activation function sigmoid')
print('\n\n NN struct:', nn_part2_1)
print('\n\nAccuracy is {0:.2f}%'.format(acc * 100), 'after', num_epoch,
'epoches in {0:.2f}'.format(time_p2_1 - init_time), 'secs')
print('accuracy increase is{0:.2f}%'.format(acc * 100 - standard_acc * 100))
# evaluation for part 2 test 2
acc = evaluate_network(testloader_small, nn_part2_2)
print(
'test 2\n Data augmentation: mirroring\n Data normalization [-1,1], zero-centered, batch normlaization\n Network Regularization: using dropout layers\n')
print('\n\n NN struct:', nn_part2_2)
print('\n\nAccuracy is {0:.2f}%'.format(acc * 100), 'after', num_epoch,
'epoches in {0:.2f}'.format(time_p2_2 - time_p2_1), 'secs')
print('accuracy increase is{0:.2f}%'.format(acc * 100 - standard_acc * 100))
# evaluation for part 2 test 3
acc = evaluate_network(testloader_small_rotated, nn_part2_3)
print(
'test 1\n Data augmentation: mirroring, retated 90 degrees\n Data normalization [-1,1], zero-centered\n Network Regularization: using dropout layers\n')
print('\n\n NN struct:', nn_part2_3)
print('\n\nAccuracy is {0:.2f}%'.format(acc * 100), 'after', num_epoch,
'epoches in {0:.2f}'.format(time_p2_3_fin - time_p2_3_ini), 'secs')
print('accuracy increase is{0:.2f}%'.format(acc * 100 - standard_acc * 100))
############################################################ q 2 #################################################
# reload data with a larger size
img_size = (224, 224)
batch_num = 50 # training sample number per batch
# load training dataset
trainloader_large = list(load_dataset('./data/train/', img_size, batch_num=batch_num, shuffle=True,
augment=False, is_color=True, zero_centered=True))
train_num = len(trainloader_large)
print("Finish loading %d minibatches(=%d) of training samples." %
(train_num, batch_num))
# load testing dataset
testloader_large = list(load_dataset(
'./data/test/', img_size, num_per_class=50, batch_num=batch_num, is_color=True))
test_num = len(testloader_large)
print("Finish loading %d minibatches(=%d) of testing samples." %
(test_num, batch_num))
# part 1
print("replace final layer with 16 channels")
temp_model = models.alexnet(pretrained=True).double().to(device)
temp_model.classifier[-1] = nn.Linear(
temp_model.classifier[-1].in_features, 16)
loss_fun = nn.CrossEntropyLoss()
temp_optimizer = torch.optim.SGD(
temp_model.parameters(), lr=0.001, momentum=0.9)
temp_scheduler = torch.optim.lr_scheduler.StepLR(
temp_optimizer, step_size=7, gamma=0.1)
temp_model.to(device)
temp_model = temp_model.double()
print("part 1")
# part1
# referencing https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
def train_model(model, criterion, optimizer, scheduler, num_epochs=20):
time1 = time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
running_corrects = 0
for inputs, labels in trainloader_large:
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = model(inputs.to(device))
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels.to(device))
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.to(device))
scheduler.step()
# print(len(trainloader_large))
epoch_acc = running_corrects.double() / len(trainloader_large)
if epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
time2 = time()
totalTime = time2 - time1
print('Training complete in {:.0f}m {:.0f}s'.format(
totalTime // 60, totalTime % 60))
# print('Best Acc: {:4f}'.format(best_acc*100))
model.load_state_dict(best_model_wts)
return model
best_model = train_model(temp_model, loss_fun,
temp_optimizer, temp_scheduler, num_epochs=20)
time1 = time()
correct = 0
total = 0
with torch.no_grad():
for data in testloader_large:
images, labels = data
outputs = best_model(images.to(device))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(device)).sum().item()
print("replace final layer with 16 channels")
print('NN struct:', best_model)
print('Accuracy of the network on the test images: %d %%' %
(100 * correct / total))
time2 = time()
print("total " + str(time2 - time1) + "seconds")
# part 2
time1 = time()
alex = models.alexnet(pretrained=True).double()
temp_model = LinearSVC(penalty="l2", C=1.0, random_state=23333)
# remove last layer
del alex.classifier[-1]
featureSize = 200
X_train, y_train = [], []
for train, label in trainloader_large:
X_train.append(alex(train)[:, :featureSize].detach().numpy())
y_train.append(label.detach().numpy())
X_train = np.vstack(X_train)
y_train = np.hstack(y_train)
temp_model.fit(X_train, y_train)
time2 = time()
correct_pred = 0
total_pred = 0
for data, label in testloader_large:
predicted = temp_model.predict(
alex(data)[:, :featureSize].detach().numpy())
correct_pred += np.sum(predicted == label.detach().numpy())
total_pred += len(label)
acc = correct_pred / total_pred * 100
print('part 2')
print('net struct:', alex)
print('Accuracy is {0:.2f}%'.format(acc),
' within {0:.2f}'.format(time2 - time1), 'seconds')
# bonus
print('question 2 bonus')
temp_vgg_model = models.vgg16(pretrained=True).double().to(device)
temp_vgg_model.classifier[-1] = nn.Linear(
temp_vgg_model.classifier[-1].in_features, 16)
loss_fun = nn.CrossEntropyLoss()
temp_optimizer = torch.optim.SGD(
temp_vgg_model.parameters(), lr=0.001, momentum=0.9)
temp_scheduler = torch.optim.lr_scheduler.StepLR(
temp_optimizer, step_size=7, gamma=0.1)
temp_vgg_model.to(device)
temp_vgg_model = temp_vgg_model.double()
vgg_model = train_model(temp_vgg_model, loss_fun,
temp_optimizer, temp_scheduler, num_epochs=5)
time1 = time()
correct = 0
total = 0
with torch.no_grad():
for data in testloader_large:
images, labels = data
outputs = vgg_model(images.to(device))
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels.to(device)).sum().item()
print("replace final layer with 16 channels")
print('NN struct:', vgg_model)
print('Accuracy of the network on the test images: %d %%' %
(100 * correct / total))
time2 = time()
print("total " + str(time2 - time1) + "seconds")
print('reason why vgg is better than alexnet\n')
print(
'vgg does not use large receptive fields like alexnet (11*11 with stride of 4 compares to 3*3 with stride set to 1 => multiple maller size kernel is better than a large kernel becuase with the number to increasing non-linear layers, the depth of the NN is increasing therefore the nn can learn more complex feattures ')
print('vgg also uses fewer parameters')
| [
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"numpy.hstack",
"torch.LongTensor",
"torch.max",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"matplotlib.pyplot.imshow",
"torch.nn.BatchNorm2d",
"numpy.flip",
"numpy.reshape",
"torch.set_grad_enabled",
"numpy.asarray",
"n... | [((7361, 7367), 'time.time', 'time', ([], {}), '()\n', (7365, 7367), False, 'from time import time\n'), ((7465, 7471), 'time.time', 'time', ([], {}), '()\n', (7469, 7471), False, 'from time import time\n'), ((11884, 11890), 'time.time', 'time', ([], {}), '()\n', (11888, 11890), False, 'from time import time\n'), ((11966, 11972), 'time.time', 'time', ([], {}), '()\n', (11970, 11972), False, 'from time import time\n'), ((12048, 12054), 'time.time', 'time', ([], {}), '()\n', (12052, 12054), False, 'from time import time\n'), ((12349, 12355), 'time.time', 'time', ([], {}), '()\n', (12353, 12355), False, 'from time import time\n'), ((12517, 12523), 'time.time', 'time', ([], {}), '()\n', (12521, 12523), False, 'from time import time\n'), ((15052, 15104), 'torch.nn.Linear', 'nn.Linear', (['temp_model.classifier[-1].in_features', '(16)'], {}), '(temp_model.classifier[-1].in_features, 16)\n', (15061, 15104), True, 'import torch.nn as nn\n'), ((15121, 15142), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (15140, 15142), True, 'import torch.nn as nn\n'), ((15247, 15318), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['temp_optimizer'], {'step_size': '(7)', 'gamma': '(0.1)'}), '(temp_optimizer, step_size=7, gamma=0.1)\n', (15278, 15318), False, 'import torch\n'), ((16913, 16919), 'time.time', 'time', ([], {}), '()\n', (16917, 16919), False, 'from time import time\n'), ((17401, 17407), 'time.time', 'time', ([], {}), '()\n', (17405, 17407), False, 'from time import time\n'), ((17475, 17481), 'time.time', 'time', ([], {}), '()\n', (17479, 17481), False, 'from time import time\n'), ((17543, 17593), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'penalty': '"""l2"""', 'C': '(1.0)', 'random_state': '(23333)'}), "(penalty='l2', C=1.0, random_state=23333)\n", (17552, 17593), False, 'from sklearn.svm import LinearSVC\n'), ((17841, 17859), 'numpy.vstack', 'np.vstack', (['X_train'], {}), '(X_train)\n', (17850, 17859), True, 'import numpy as np\n'), ((17870, 17888), 'numpy.hstack', 'np.hstack', (['y_train'], {}), '(y_train)\n', (17879, 17888), True, 'import numpy as np\n'), ((17930, 17936), 'time.time', 'time', ([], {}), '()\n', (17934, 17936), False, 'from time import time\n'), ((18507, 18563), 'torch.nn.Linear', 'nn.Linear', (['temp_vgg_model.classifier[-1].in_features', '(16)'], {}), '(temp_vgg_model.classifier[-1].in_features, 16)\n', (18516, 18563), True, 'import torch.nn as nn\n'), ((18580, 18601), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (18599, 18601), True, 'import torch.nn as nn\n'), ((18710, 18781), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['temp_optimizer'], {'step_size': '(7)', 'gamma': '(0.1)'}), '(temp_optimizer, step_size=7, gamma=0.1)\n', (18741, 18781), False, 'import torch\n'), ((18984, 18990), 'time.time', 'time', ([], {}), '()\n', (18988, 18990), False, 'from time import time\n'), ((19470, 19476), 'time.time', 'time', ([], {}), '()\n', (19474, 19476), False, 'from time import time\n'), ((4794, 4819), 'matplotlib.pyplot.imshow', 'plt.imshow', (['npimg', '"""gray"""'], {}), "(npimg, 'gray')\n", (4804, 4819), True, 'import matplotlib.pyplot as plt\n'), ((4824, 4834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4832, 4834), True, 'import matplotlib.pyplot as plt\n'), ((4922, 4937), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (4930, 4937), True, 'import numpy as np\n'), ((6763, 6788), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6786, 6788), False, 'import torch\n'), ((6739, 6759), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6751, 6759), False, 'import torch\n'), ((6795, 6814), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6807, 6814), False, 'import torch\n'), ((6874, 6899), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6897, 6899), False, 'import torch\n'), ((15576, 15582), 'time.time', 'time', ([], {}), '()\n', (15580, 15582), False, 'from time import time\n'), ((16534, 16540), 'time.time', 'time', ([], {}), '()\n', (16538, 16540), False, 'from time import time\n'), ((16947, 16962), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16960, 16962), False, 'import torch\n'), ((19018, 19033), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19031, 19033), False, 'import torch\n'), ((724, 751), 'glob.glob', 'glob.glob', (['"""./data/train/*"""'], {}), "('./data/train/*')\n", (733, 751), False, 'import glob\n'), ((1661, 1700), 'glob.glob', 'glob.glob', (["(path + class_name + '/*.jpg')"], {}), "(path + class_name + '/*.jpg')\n", (1670, 1700), False, 'import glob\n'), ((3149, 3171), 'random.shuffle', 'random.shuffle', (['bundle'], {}), '(bundle)\n', (3163, 3171), False, 'import random\n'), ((4746, 4774), 'numpy.transpose', 'np.transpose', (['img', '[1, 2, 0]'], {}), '(img, [1, 2, 0])\n', (4758, 4774), True, 'import numpy as np\n'), ((5539, 5558), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(12)', '(5)'], {}), '(1, 12, 5)\n', (5548, 5558), True, 'import torch.nn as nn\n'), ((5619, 5634), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (5631, 5634), True, 'import torch.nn as nn\n'), ((5670, 5690), 'torch.nn.Conv2d', 'nn.Conv2d', (['(12)', '(16)', '(5)'], {}), '(12, 16, 5)\n', (5679, 5690), True, 'import torch.nn as nn\n'), ((5802, 5817), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (5814, 5817), True, 'import torch.nn as nn\n'), ((5842, 5866), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (5852, 5866), True, 'import torch.nn as nn\n'), ((5891, 5915), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (5901, 5915), True, 'import torch.nn as nn\n'), ((5935, 5963), 'torch.nn.Linear', 'nn.Linear', (['(16 * 13 * 13)', '(120)'], {}), '(16 * 13 * 13, 120)\n', (5944, 5963), True, 'import torch.nn as nn\n'), ((5983, 6002), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(120)'], {}), '(120, 120)\n', (5992, 6002), True, 'import torch.nn as nn\n'), ((6022, 6040), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(16)'], {}), '(120, 16)\n', (6031, 6040), True, 'import torch.nn as nn\n'), ((8792, 8811), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(12)', '(5)'], {}), '(1, 12, 5)\n', (8801, 8811), True, 'import torch.nn as nn\n'), ((8872, 8887), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (8884, 8887), True, 'import torch.nn as nn\n'), ((8923, 8943), 'torch.nn.Conv2d', 'nn.Conv2d', (['(12)', '(16)', '(5)'], {}), '(12, 16, 5)\n', (8932, 8943), True, 'import torch.nn as nn\n'), ((9055, 9070), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (9067, 9070), True, 'import torch.nn as nn\n'), ((9095, 9119), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (9105, 9119), True, 'import torch.nn as nn\n'), ((9144, 9168), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (9154, 9168), True, 'import torch.nn as nn\n'), ((9188, 9216), 'torch.nn.Linear', 'nn.Linear', (['(16 * 13 * 13)', '(120)'], {}), '(16 * 13 * 13, 120)\n', (9197, 9216), True, 'import torch.nn as nn\n'), ((9236, 9255), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(120)'], {}), '(120, 120)\n', (9245, 9255), True, 'import torch.nn as nn\n'), ((9275, 9293), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(16)'], {}), '(120, 16)\n', (9284, 9293), True, 'import torch.nn as nn\n'), ((10388, 10407), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(12)', '(5)'], {}), '(1, 12, 5)\n', (10397, 10407), True, 'import torch.nn as nn\n'), ((10468, 10483), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (10480, 10483), True, 'import torch.nn as nn\n'), ((10519, 10539), 'torch.nn.Conv2d', 'nn.Conv2d', (['(12)', '(16)', '(5)'], {}), '(12, 16, 5)\n', (10528, 10539), True, 'import torch.nn as nn\n'), ((10581, 10599), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (10595, 10599), True, 'import torch.nn as nn\n'), ((10692, 10707), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (10704, 10707), True, 'import torch.nn as nn\n'), ((10732, 10756), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (10742, 10756), True, 'import torch.nn as nn\n'), ((10776, 10804), 'torch.nn.Linear', 'nn.Linear', (['(16 * 13 * 13)', '(120)'], {}), '(16 * 13 * 13, 120)\n', (10785, 10804), True, 'import torch.nn as nn\n'), ((10829, 10853), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (10839, 10853), True, 'import torch.nn as nn\n'), ((10873, 10892), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(120)'], {}), '(120, 120)\n', (10882, 10892), True, 'import torch.nn as nn\n'), ((10912, 10930), 'torch.nn.Linear', 'nn.Linear', (['(120)', '(16)'], {}), '(120, 16)\n', (10921, 10930), True, 'import torch.nn as nn\n'), ((17099, 17125), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (17108, 17125), False, 'import torch\n'), ((17489, 17520), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (17503, 17520), False, 'from torchvision import models\n'), ((19169, 19195), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (19178, 19195), False, 'import torch\n'), ((2063, 2106), 'cv2.resize', 'cv2.resize', (['img', 'img_size', 'cv2.INTER_LINEAR'], {}), '(img, img_size, cv2.INTER_LINEAR)\n', (2073, 2106), False, 'import cv2\n'), ((2426, 2441), 'numpy.flip', 'np.flip', (['img', '(1)'], {}), '(img, 1)\n', (2433, 2441), True, 'import numpy as np\n'), ((3529, 3604), 'numpy.reshape', 'np.reshape', (['minibatch_d', '(batch_num, channel_num, img_size[0], img_size[1])'], {}), '(minibatch_d, (batch_num, channel_num, img_size[0], img_size[1]))\n', (3539, 3604), True, 'import numpy as np\n'), ((1928, 1948), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (1938, 1948), False, 'import cv2\n'), ((1989, 2012), 'cv2.imread', 'cv2.imread', (['filename', '(0)'], {}), '(filename, 0)\n', (1999, 2012), False, 'import cv2\n'), ((2155, 2183), 'numpy.transpose', 'np.transpose', (['img', '[2, 0, 1]'], {}), '(img, [2, 0, 1])\n', (2167, 2183), True, 'import numpy as np\n'), ((2570, 2595), 'numpy.random.randint', 'np.random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (2587, 2595), True, 'import numpy as np\n'), ((3652, 3681), 'torch.from_numpy', 'torch.from_numpy', (['minibatch_d'], {}), '(minibatch_d)\n', (3668, 3681), False, 'import torch\n'), ((3785, 3814), 'torch.LongTensor', 'torch.LongTensor', (['minibatch_l'], {}), '(minibatch_l)\n', (3801, 3814), False, 'import torch\n'), ((7237, 7258), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7256, 7258), True, 'import torch.nn as nn\n'), ((14972, 15003), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (14986, 15003), False, 'from torchvision import models\n'), ((15873, 15901), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (15895, 15901), False, 'import torch\n'), ((15981, 16002), 'torch.max', 'torch.max', (['outputs', '(1)'], {}), '(outputs, 1)\n', (15990, 16002), False, 'import torch\n'), ((18425, 18454), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (18437, 18454), False, 'from torchvision import models\n'), ((2939, 2958), 'numpy.asarray', 'np.asarray', (['data[i]'], {}), '(data[i])\n', (2949, 2958), True, 'import numpy as np\n')] |
from time import time
import os
from random import gauss
import sys
import numpy as np
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation
from Bio.SeqIO import SeqRecord
from Bio import SeqIO, Seq
from advntr.sam_utils import get_id_of_reads_mapped_to_vntr_in_bamfile, make_bam_and_index
from advntr.models import load_unique_vntrs_data
from advntr import settings
hg38 = True
PREFIX_DIR = '/mnt/hg38_dnn/' if hg38 else '/mnt/'
OUTPUT_DIR_PREFIX = '../advntr2_recruitment_comparison_hg38/' if hg38 else '../advntr2_recruitment_comparison/'
read_length = 150
kmer_length = 6
if __name__ == '__main__':
if len(sys.argv) > 1:
kmer_length = int(sys.argv[1])
# input_dim = 4 ** kmer_length * position_partition
input_dim = 4 ** kmer_length
# input_dim = read_length * 4
reduced_dimensions = 150 * (2 * kmer_length - 6)
losses = ['binary_crossentropy', 'mean_squared_error', 'mean_absolute_error', 'mean_squared_logarithmic_error', 'hinge', 'squared_hinge']
loss_to_activatio = {'binary_crossentropy': 'sigmoid',
'mean_squared_error': 'linear',
'mean_absolute_error': 'linear',
'mean_squared_logarithmic_error': 'linear',
'hinge': 'tanh',
'squared_hinge': 'tanh'}
loss_index = 0
if __name__ == '__main__':
if len(sys.argv) > 4:
loss_index = int(sys.argv[4])
loss_function = losses[loss_index]
loss_suffix = '_%s' % loss_index if loss_index > 0 else ''
result_dir = OUTPUT_DIR_PREFIX + 'hmm_dnn_comparison_%s/' % (str(kmer_length) + loss_suffix)
bowtie_result_dir = OUTPUT_DIR_PREFIX + 'hmm_dnn_comparison_bowtie/'
bowtie_working_dir = PREFIX_DIR + '/bowtie_recruitment/'
dnn_models_dir = PREFIX_DIR + '/dnn_models_%s/' % (str(kmer_length)) + loss_suffix
def align_with_bowtie(fq_file):
bowtie_alignment = fq_file[:-3] + '_bowtie_aln.sam'
if not os.path.exists(bowtie_alignment[:-4] + '.bam'):
os.system('bowtie2 -x /mnt/hg19_chromosomes/hg19_bt2_idx -f %s -S %s --threads 7' % (fq_file, bowtie_alignment))
make_bam_and_index(bowtie_alignment)
return bowtie_alignment[:-4] + '.bam'
def get_embedding_of_string(sequence, kmer_length=6):
input_dim = 4 ** kmer_length
sequence = sequence.upper()
num = 0
mapping = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
for c in 'ASDFGHJKLPOIUYTREWQZXCVBNM':
if c not in mapping.keys():
mapping[c] = 0
for i in range(len(sequence[:kmer_length])):
num += mapping[sequence[i]] * (4 ** (kmer_length - i - 1))
result = [0] * input_dim
result[num] = 1
# result = set()
# result.add(num)
highest_position = 4 ** (kmer_length-1)
for i in range(kmer_length, len(sequence)):
num -= highest_position * mapping[sequence[i-kmer_length]]
num *= 4
num += mapping[sequence[i]]
result[num] = 1
# result.add(num)
return result
def get_google_embedding_of_string(sequence):
sequence = sequence.upper()
result = [0] * input_dim
mapping = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
for i, c in enumerate(sequence):
if c not in mapping.keys():
mapping[c] = 0
result[i * 4 + mapping[c]] = 1
return result
def make_random_unit_vector(dims):
vec = [gauss(0, 1) for i in range(dims)]
mag = sum(x ** 2 for x in vec) ** .5
return [x / mag for x in vec]
def get_random_vector_set(seed=10):
import random
random.seed(seed)
random_vectors = []
for _ in range(reduced_dimensions):
random_vectors.append(make_random_unit_vector(input_dim))
return random_vectors
def get_hashed_embedding_of_string(sequence, random_vectors):
original_embedding = get_embedding_of_string(sequence)
hashed_embedding = []
for i, random_vector in enumerate(random_vectors):
hashed_embedding.append(0)
for position in list(original_embedding):
hashed_embedding[i] += random_vector[position]
# hashed_embedding = np.array(random_vectors).dot(np.array(original_embedding))
return hashed_embedding
def generate_d_neighborhood(pattern, d):
neigh = set([pattern])
for i in range(d):
addition = set([])
for p in neigh:
for j in range(len(p)):
insertion = [p[j] + inserted for inserted in 'ACGT']
for sub in [''] + ['A', 'C', 'G', 'T'] + insertion:
new_str = p[:j] + sub + p[j+1:]
addition.add(new_str)
neigh |= addition
return neigh
def get_blast_keywords(reference_vntr, keyword_size=11):
vntr = ''.join(reference_vntr.get_repeat_segments())
if len(vntr) < keyword_size:
min_copies = int(keyword_size / len(vntr)) + 1
vntr = str(vntr) * min_copies
locus = reference_vntr.left_flanking_region[-15:] + vntr + reference_vntr.right_flanking_region[:15]
queries = []
step_size = 5 if len(reference_vntr.pattern) != 5 else 6
for i in range(0, len(locus) - keyword_size + 1, step_size):
queries.append(locus[i:i+keyword_size])
return queries
def get_hmm_accuracy(vntr_finder, simulated_true_reads, simulated_false_filtered_reads):
output_dir = result_dir + '/%s/' % vntr_finder.reference_vntr.id
print('running BLAST')
from blast_wrapper import get_blast_matched_ids, make_blast_database
blast_dir = output_dir + 'blast_dir/'
if not os.path.exists(blast_dir):
os.makedirs(blast_dir)
vntr_id = vntr_finder.reference_vntr.id
fasta_file = blast_dir + 'reads.fasta'
records = []
for i, read in enumerate(simulated_false_filtered_reads):
records.append(SeqRecord(seq=Seq.Seq(read), id='fasle_%s' % i))
for i, read in enumerate(simulated_true_reads):
records.append(SeqRecord(seq=Seq.Seq(read), id='true_%s' % i))
with open(fasta_file, 'w') as output_handle:
SeqIO.write(records, output_handle, 'fasta')
make_blast_database(fasta_file, blast_dir + 'blast_db_%s' % vntr_id)
query = '@'.join(get_blast_keywords(vntr_finder.reference_vntr))
search_id = 'search_id'
search_results = get_blast_matched_ids(query, blast_dir + 'blast_db_%s' % vntr_id, max_seq='100000', word_size='7',
evalue=sys.maxsize, search_id=search_id, identity_cutoff='100', blast_tmp_dir=blast_dir)
from collections import Counter
res = Counter(search_results)
filtered = [item for item, occur in res.items() if occur >= 2]
print('BLAST results computed')
print(len(filtered))
print(len(simulated_true_reads))
print(len(simulated_false_filtered_reads))
tp = float(len([e for e in filtered if e.startswith('true')]))
fp = float(len([e for e in filtered if e.startswith('false')]))
fn = float(len(simulated_true_reads) - tp)
tn = float(len(simulated_false_filtered_reads) - fp)
train_time = 0
passed_time = 0
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = (100 * (tp + tn) / (fp + fn + tp + tn))
print('BLAST:')
print(tp, fp, fn, tn)
print('Precision:', precision)
print('Recall:', recall)
print('acc: %s' % accuracy)
with open(output_dir + '/blast.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
return passed_time
output_dir = result_dir + '/%s/' % vntr_finder.reference_vntr.id
if os.path.exists(output_dir + '/hmm.txt') and os.path.getsize(output_dir + '/hmm.txt') > 0:
if sum(1 for _ in open(output_dir + 'hmm.txt')) > 5:
print('HMM info is already calculated')
with open(output_dir + 'hmm.txt') as infile:
lines = infile.readlines()
return float(lines[1])
train_true_reads = [read for i, read in enumerate(simulated_true_reads) if i % 2 == 0]
train_false_reads = [read for i, read in enumerate(simulated_false_filtered_reads) if i % 2 == 0]
test_true_reads = [read for i, read in enumerate(simulated_true_reads) if i % 2 == 1]
test_false_reads = [read for i, read in enumerate(simulated_false_filtered_reads) if i % 2 == 1]
start_time = time()
hmm = vntr_finder.get_vntr_matcher_hmm(read_length=read_length)
processed_true_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, train_true_reads)
processed_false_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, train_false_reads)
recruitment_score = vntr_finder.find_recruitment_score_threshold(processed_true_reads, processed_false_reads)
train_time = time() - start_time
print('HMM train time: %s' % train_time)
tp = 0.0
fn = 0.0
tn = 0.0
fp = 0.0
start_time = time()
true_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, test_true_reads)
false_reads = vntr_finder.find_hmm_score_of_simulated_reads(hmm, test_false_reads)
passed_time = time() - start_time
for read in true_reads:
if read.logp > recruitment_score:
tp += 1
else:
fn += 1
for read in false_reads:
if read.logp > recruitment_score:
fp += 1
else:
tn += 1
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = (100 * (tp + tn) / (fp + fn + tp + tn))
print('HMM: %s' % passed_time)
print(tp, fp, fn, tn)
print('Precision:', precision)
print('Recall:', recall)
print('acc: %s' % accuracy)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/hmm.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
return passed_time
def simulate_and_store_false_reads(vntr_finder, false_reads_file, min_matches=4):
simulated_false_filtered_reads = []
reference_files = []
for chromosome in settings.CHROMOSOMES:
reference_file = settings.HG19_DIR + chromosome + '.fa'
reference_files.append(reference_file)
if hg38:
reference_files = ['/mnt/hg38_chromosomes/hg38.fa']
for reference_file in reference_files:
simulated_false_filtered_reads += vntr_finder.simulate_false_filtered_reads(reference_file, min_matches)
print(len(simulated_false_filtered_reads))
if len(simulated_false_filtered_reads) > 41000:
break
with open(false_reads_file, 'w') as outfile:
for read in simulated_false_filtered_reads:
outfile.write('%s\n' % read)
def get_true_reads_and_false_reads(vntr_finder, vntr_id):
simulated_true_reads = vntr_finder.simulate_true_reads(read_length)
print('true reads: %s' % len(simulated_true_reads))
false_reads_file = PREFIX_DIR + '/false_reads/false_reads_%s.txt' % vntr_id
if not os.path.exists(false_reads_file) or os.path.getsize(false_reads_file) == 0:
if os.path.exists(false_reads_file) and os.path.getsize(false_reads_file) == 0:
print('There is no false read in the file')
no_false_read = True
else:
no_false_read = False
min_matches = 1 if no_false_read else 4
simulate_and_store_false_reads(vntr_finder, false_reads_file, min_matches)
min_matches = 6
while True:
with open(false_reads_file) as infile:
lines = infile.readlines()
if len(lines) > 40000:
print('There are more than %s reads in the file. Trying min_matches = %s' % (len(lines), min_matches))
simulate_and_store_false_reads(vntr_finder, false_reads_file, min_matches)
min_matches += 2
else:
break
simulated_false_filtered_reads = [read.strip() for read in lines if 'N' not in read.upper()]
print('false reads: %s' % len(simulated_false_filtered_reads))
print('true reads: %s' % len(simulated_true_reads))
return simulated_true_reads, simulated_false_filtered_reads
def get_nn_model(train, three_hidden_layers=False, model_function='relu', first_layer=100, second_layer=0):
model = Sequential()
model.add(Dense(first_layer, input_dim=input_dim, kernel_initializer="uniform", activation=model_function))
# if three_hidden_layers:
if second_layer > 0:
model.add(Dense(second_layer, activation=model_function, kernel_initializer="uniform"))
model.add(Dense(2))
model.add(Activation("softmax"))
model.compile(loss=loss_function, optimizer='adam', metrics=['accuracy'])
model.fit(train[0], train[1], epochs=3, batch_size=10)
return model
def is_true(result_class):
return result_class[0] > result_class[1]
def select_positive_and_negative_reads_with_bowtie(reads, vntr_finder, label):
working_dir = bowtie_working_dir + '/%s/' % vntr_finder.reference_vntr.id
if not os.path.exists(working_dir):
os.makedirs(working_dir)
fq_file = working_dir + label + '.fa'
records = []
for i, read in enumerate(reads):
record = SeqRecord('')
record.seq = Seq.Seq(read)
record.id = 'read_%s/1' % str(i)
records.append(record)
with open(fq_file, 'w') as output_handle:
SeqIO.write(records, output_handle, 'fasta')
passed_time = time()
bowtie_bamfile = align_with_bowtie(fq_file)
bowtie_selected = len(get_id_of_reads_mapped_to_vntr_in_bamfile(bowtie_bamfile, vntr_finder.reference_vntr))
return float(bowtie_selected), float(len(reads) - bowtie_selected), time() - passed_time
def run_bowtie2(true_reads, false_reads, vntr_finder):
output_dir = bowtie_result_dir + '%s/' % vntr_finder.reference_vntr.id
if os.path.exists(output_dir + '/bowtie.txt') and os.path.getsize(output_dir + '/bowtie.txt') > 0:
if sum(1 for _ in open(output_dir + 'bowtie.txt')) > 5:
print('bowtie results is already computed')
return
train_time = 0
tp, fn, t1 = select_positive_and_negative_reads_with_bowtie(true_reads, vntr_finder, 'true')
fp, tn, t2 = select_positive_and_negative_reads_with_bowtie(false_reads, vntr_finder, 'false')
passed_time = t1 + t2
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = float(tp + tn) / (tp + tn + fp + fn)
print('Bowtie2: %s' % passed_time)
print('Precision:', precision)
print('Recall:', recall)
print('acc: %s' % accuracy)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/bowtie.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
def run_simulation(vntr_map, vntr_id):
print('vntr:', vntr_id)
ref_vntr = vntr_map[vntr_id]
from advntr.vntr_finder import VNTRFinder
vntr_finder = VNTRFinder(ref_vntr)
simulated_true_reads, simulated_false_filtered_reads = get_true_reads_and_false_reads(vntr_finder, vntr_id)
if len(simulated_false_filtered_reads) > 30000 or len(simulated_false_filtered_reads) == 0:
print('skipping VNTR', vntr_id)
return
# map with bowtie2
# run_bowtie2(simulated_true_reads, simulated_false_filtered_reads, vntr_finder)
# hmm_time = get_hmm_accuracy(vntr_finder, simulated_true_reads, simulated_false_filtered_reads)
if not os.path.exists(dnn_models_dir):
os.makedirs(dnn_models_dir)
output_dir = result_dir + '/%s/' % vntr_finder.reference_vntr.id
model_dir = dnn_models_dir + '%s.hd5' % vntr_finder.reference_vntr.id
if os.path.exists(output_dir + 'dnn.txt') and os.path.getsize(output_dir + 'dnn.txt') > 0:
if sum(1 for _ in open(output_dir + 'dnn.txt')) > 5:
print('dnn information is already calculated')
return
true_embeddings = [get_embedding_of_string(seq) for seq in simulated_true_reads]
false_embeddings = [get_embedding_of_string(seq) for seq in simulated_false_filtered_reads]
train_x = [embedding for i, embedding in enumerate(true_embeddings) if i % 2 == 0]
start_time = time()
test_x = [embedding for i, embedding in enumerate(true_embeddings) if i % 2 == 1]
embedding_time = time() - start_time
train_y = [[1, 0]] * len(train_x)
test_y = [[1, 0]] * len(test_x)
train_x += [embedding for i, embedding in enumerate(false_embeddings) if i % 2 == 0]
start_time = time()
test_x += [embedding for i, embedding in enumerate(false_embeddings) if i % 2 == 1]
embedding_time += time() - start_time
train_y += [[0, 1]] * (len(train_x) - len(train_y))
test_y += [[0, 1]] * (len(test_x) - len(test_y))
train = [np.array(train_x), np.array(train_y)]
test = [np.array(test_x), np.array(test_y)]
first_layer = 100
second_layer = 50
start_time = time()
if os.path.exists(model_dir):
print('DNN model is already trained')
model = load_model(model_dir)
else:
model = get_nn_model(train, first_layer=first_layer, second_layer=second_layer)
model.save(model_dir)
train_time = time() - start_time
print('NN train time: %s' % train_time)
scores = model.evaluate(test[0], test[1])
start_time = time()
classes = model.predict(test[0], batch_size=128)
passed_time = embedding_time + time() - start_time
passed_time += hmm_time / len(test[0]) * len(true_embeddings) / 2
fn = 0.0
fp = 0.0
tp = 0.0
tn = 0.0
for i in range(len(test[1])):
majority = int(is_true(classes[i]))# + int(is_true(classes2[i])) + int(is_true(classes3[i]))
# print(majority)
if test[1][i][0] == 1:
if majority >= 1:
tp += 1
else:
fn += 1
else:
if majority >= 1:#is_true(classes[i]):
fp += 1
else:
tn += 1
precision = tp / (tp + fp) if tp > 0 else 0
recall = tp / (tp + fn)
accuracy = scores[1]*100
print('NN: %s' % passed_time)
print(tp, fp, fn, tn)
print('Precision:', precision)
print('Recall:', recall)
print("\n%s: %.2f%%" % (model.metrics_names[1], accuracy))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/dnn.txt', 'w') as outfile:
outfile.write('%s\n' % train_time)
outfile.write('%s\n' % passed_time)
outfile.write('%s\n' % precision)
outfile.write('%s\n' % recall)
outfile.write('%s\n' % accuracy)
outfile.write('%s,%s,%s,%s\n' % (tp, fn, fp, tn))
def main():
vntr_map = {}
if hg38:
reference_vntrs = load_unique_vntrs_data('vntr_data/hg38_selected_VNTRs_Illumina.db')
vntr_ids = []
for ref_vntr in reference_vntrs:
vntr_map[ref_vntr.id] = ref_vntr
if 100 >= len(ref_vntr.pattern) >= 6:
vntr_ids.append(ref_vntr.id)
else:
reference_vntrs = load_unique_vntrs_data()
for ref_vntr in reference_vntrs:
vntr_map[ref_vntr.id] = ref_vntr
from advntr.advntr_commands import get_tested_vntrs
vntr_ids = get_tested_vntrs()
print('len of reference_vntrs:', len(reference_vntrs))
print('# of vntrs: %s' % len(vntr_ids))
start, end = int(sys.argv[2]), int(sys.argv[3])
# run_simulation(vntr_map, 503431)
# exit(0)
count = 0
for vid in vntr_ids:
count += 1
if count < start or count > end:
continue
run_simulation(vntr_map, vid)
# best_f, best_s, best_acc = None, None, 0
# with open('param_training2.txt', 'w') as output:
# for f in accuracy_map.keys():
# for s in accuracy_map[f].keys():
# avg_accuracy = sum(accuracy_map[f][s]) / len(accuracy_map[f][s])
# output.write('%s %s %s\n' % (f, s, avg_accuracy))
# if avg_accuracy > best_acc:
# best_acc = avg_accuracy
# best_f = f
# best_s = s
# print(best_f, best_s, best_acc)
if __name__ == '__main__':
main()
| [
"blast_wrapper.get_blast_matched_ids",
"advntr.sam_utils.get_id_of_reads_mapped_to_vntr_in_bamfile",
"Bio.Seq.Seq",
"numpy.array",
"advntr.advntr_commands.get_tested_vntrs",
"keras.layers.Activation",
"keras.layers.Dense",
"os.path.exists",
"Bio.SeqIO.write",
"advntr.models.load_unique_vntrs_data"... | [((3485, 3502), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (3496, 3502), False, 'import random\n'), ((5971, 6039), 'blast_wrapper.make_blast_database', 'make_blast_database', (['fasta_file', "(blast_dir + 'blast_db_%s' % vntr_id)"], {}), "(fasta_file, blast_dir + 'blast_db_%s' % vntr_id)\n", (5990, 6039), False, 'from blast_wrapper import get_blast_matched_ids, make_blast_database\n'), ((6158, 6354), 'blast_wrapper.get_blast_matched_ids', 'get_blast_matched_ids', (['query', "(blast_dir + 'blast_db_%s' % vntr_id)"], {'max_seq': '"""100000"""', 'word_size': '"""7"""', 'evalue': 'sys.maxsize', 'search_id': 'search_id', 'identity_cutoff': '"""100"""', 'blast_tmp_dir': 'blast_dir'}), "(query, blast_dir + 'blast_db_%s' % vntr_id, max_seq=\n '100000', word_size='7', evalue=sys.maxsize, search_id=search_id,\n identity_cutoff='100', blast_tmp_dir=blast_dir)\n", (6179, 6354), False, 'from blast_wrapper import get_blast_matched_ids, make_blast_database\n'), ((6435, 6458), 'collections.Counter', 'Counter', (['search_results'], {}), '(search_results)\n', (6442, 6458), False, 'from collections import Counter\n'), ((8395, 8401), 'time.time', 'time', ([], {}), '()\n', (8399, 8401), False, 'from time import time\n'), ((8932, 8938), 'time.time', 'time', ([], {}), '()\n', (8936, 8938), False, 'from time import time\n'), ((12440, 12452), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12450, 12452), False, 'from keras.models import Sequential, load_model\n'), ((13591, 13597), 'time.time', 'time', ([], {}), '()\n', (13595, 13597), False, 'from time import time\n'), ((15296, 15316), 'advntr.vntr_finder.VNTRFinder', 'VNTRFinder', (['ref_vntr'], {}), '(ref_vntr)\n', (15306, 15316), False, 'from advntr.vntr_finder import VNTRFinder\n'), ((16532, 16538), 'time.time', 'time', ([], {}), '()\n', (16536, 16538), False, 'from time import time\n'), ((16846, 16852), 'time.time', 'time', ([], {}), '()\n', (16850, 16852), False, 'from time import time\n'), ((17253, 17259), 'time.time', 'time', ([], {}), '()\n', (17257, 17259), False, 'from time import time\n'), ((17267, 17292), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (17281, 17292), False, 'import os\n'), ((17651, 17657), 'time.time', 'time', ([], {}), '()\n', (17655, 17657), False, 'from time import time\n'), ((1926, 1972), 'os.path.exists', 'os.path.exists', (["(bowtie_alignment[:-4] + '.bam')"], {}), "(bowtie_alignment[:-4] + '.bam')\n", (1940, 1972), False, 'import os\n'), ((1982, 2103), 'os.system', 'os.system', (["('bowtie2 -x /mnt/hg19_chromosomes/hg19_bt2_idx -f %s -S %s --threads 7' %\n (fq_file, bowtie_alignment))"], {}), "(\n 'bowtie2 -x /mnt/hg19_chromosomes/hg19_bt2_idx -f %s -S %s --threads 7' %\n (fq_file, bowtie_alignment))\n", (1991, 2103), False, 'import os\n'), ((2103, 2139), 'advntr.sam_utils.make_bam_and_index', 'make_bam_and_index', (['bowtie_alignment'], {}), '(bowtie_alignment)\n', (2121, 2139), False, 'from advntr.sam_utils import get_id_of_reads_mapped_to_vntr_in_bamfile, make_bam_and_index\n'), ((3316, 3327), 'random.gauss', 'gauss', (['(0)', '(1)'], {}), '(0, 1)\n', (3321, 3327), False, 'from random import gauss\n'), ((5445, 5470), 'os.path.exists', 'os.path.exists', (['blast_dir'], {}), '(blast_dir)\n', (5459, 5470), False, 'import os\n'), ((5480, 5502), 'os.makedirs', 'os.makedirs', (['blast_dir'], {}), '(blast_dir)\n', (5491, 5502), False, 'import os\n'), ((5921, 5965), 'Bio.SeqIO.write', 'SeqIO.write', (['records', 'output_handle', '"""fasta"""'], {}), "(records, output_handle, 'fasta')\n", (5932, 5965), False, 'from Bio import SeqIO, Seq\n'), ((7650, 7689), 'os.path.exists', 'os.path.exists', (["(output_dir + '/hmm.txt')"], {}), "(output_dir + '/hmm.txt')\n", (7664, 7689), False, 'import os\n'), ((8797, 8803), 'time.time', 'time', ([], {}), '()\n', (8801, 8803), False, 'from time import time\n'), ((9129, 9135), 'time.time', 'time', ([], {}), '()\n', (9133, 9135), False, 'from time import time\n'), ((9698, 9724), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (9712, 9724), False, 'import os\n'), ((9734, 9757), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (9745, 9757), False, 'import os\n'), ((12467, 12567), 'keras.layers.Dense', 'Dense', (['first_layer'], {'input_dim': 'input_dim', 'kernel_initializer': '"""uniform"""', 'activation': 'model_function'}), "(first_layer, input_dim=input_dim, kernel_initializer='uniform',\n activation=model_function)\n", (12472, 12567), False, 'from keras.layers import Dense, Activation\n'), ((12730, 12738), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (12735, 12738), False, 'from keras.layers import Dense, Activation\n'), ((12754, 12775), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (12764, 12775), False, 'from keras.layers import Dense, Activation\n'), ((13176, 13203), 'os.path.exists', 'os.path.exists', (['working_dir'], {}), '(working_dir)\n', (13190, 13203), False, 'import os\n'), ((13213, 13237), 'os.makedirs', 'os.makedirs', (['working_dir'], {}), '(working_dir)\n', (13224, 13237), False, 'import os\n'), ((13352, 13365), 'Bio.SeqIO.SeqRecord', 'SeqRecord', (['""""""'], {}), "('')\n", (13361, 13365), False, 'from Bio.SeqIO import SeqRecord\n'), ((13387, 13400), 'Bio.Seq.Seq', 'Seq.Seq', (['read'], {}), '(read)\n', (13394, 13400), False, 'from Bio import SeqIO, Seq\n'), ((13527, 13571), 'Bio.SeqIO.write', 'SeqIO.write', (['records', 'output_handle', '"""fasta"""'], {}), "(records, output_handle, 'fasta')\n", (13538, 13571), False, 'from Bio import SeqIO, Seq\n'), ((13672, 13762), 'advntr.sam_utils.get_id_of_reads_mapped_to_vntr_in_bamfile', 'get_id_of_reads_mapped_to_vntr_in_bamfile', (['bowtie_bamfile', 'vntr_finder.reference_vntr'], {}), '(bowtie_bamfile, vntr_finder.\n reference_vntr)\n', (13713, 13762), False, 'from advntr.sam_utils import get_id_of_reads_mapped_to_vntr_in_bamfile, make_bam_and_index\n'), ((13991, 14033), 'os.path.exists', 'os.path.exists', (["(output_dir + '/bowtie.txt')"], {}), "(output_dir + '/bowtie.txt')\n", (14005, 14033), False, 'import os\n'), ((14744, 14770), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (14758, 14770), False, 'import os\n'), ((14780, 14803), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (14791, 14803), False, 'import os\n'), ((15801, 15831), 'os.path.exists', 'os.path.exists', (['dnn_models_dir'], {}), '(dnn_models_dir)\n', (15815, 15831), False, 'import os\n'), ((15841, 15868), 'os.makedirs', 'os.makedirs', (['dnn_models_dir'], {}), '(dnn_models_dir)\n', (15852, 15868), False, 'import os\n'), ((16019, 16057), 'os.path.exists', 'os.path.exists', (["(output_dir + 'dnn.txt')"], {}), "(output_dir + 'dnn.txt')\n", (16033, 16057), False, 'import os\n'), ((16646, 16652), 'time.time', 'time', ([], {}), '()\n', (16650, 16652), False, 'from time import time\n'), ((16963, 16969), 'time.time', 'time', ([], {}), '()\n', (16967, 16969), False, 'from time import time\n'), ((17105, 17122), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (17113, 17122), True, 'import numpy as np\n'), ((17124, 17141), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (17132, 17141), True, 'import numpy as np\n'), ((17155, 17171), 'numpy.array', 'np.array', (['test_x'], {}), '(test_x)\n', (17163, 17171), True, 'import numpy as np\n'), ((17173, 17189), 'numpy.array', 'np.array', (['test_y'], {}), '(test_y)\n', (17181, 17189), True, 'import numpy as np\n'), ((17356, 17377), 'keras.models.load_model', 'load_model', (['model_dir'], {}), '(model_dir)\n', (17366, 17377), False, 'from keras.models import Sequential, load_model\n'), ((17523, 17529), 'time.time', 'time', ([], {}), '()\n', (17527, 17529), False, 'from time import time\n'), ((18612, 18638), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (18626, 18638), False, 'import os\n'), ((18648, 18671), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (18659, 18671), False, 'import os\n'), ((19066, 19133), 'advntr.models.load_unique_vntrs_data', 'load_unique_vntrs_data', (['"""vntr_data/hg38_selected_VNTRs_Illumina.db"""'], {}), "('vntr_data/hg38_selected_VNTRs_Illumina.db')\n", (19088, 19133), False, 'from advntr.models import load_unique_vntrs_data\n'), ((19373, 19397), 'advntr.models.load_unique_vntrs_data', 'load_unique_vntrs_data', ([], {}), '()\n', (19395, 19397), False, 'from advntr.models import load_unique_vntrs_data\n'), ((19564, 19582), 'advntr.advntr_commands.get_tested_vntrs', 'get_tested_vntrs', ([], {}), '()\n', (19580, 19582), False, 'from advntr.advntr_commands import get_tested_vntrs\n'), ((7694, 7734), 'os.path.getsize', 'os.path.getsize', (["(output_dir + '/hmm.txt')"], {}), "(output_dir + '/hmm.txt')\n", (7709, 7734), False, 'import os\n'), ((11184, 11216), 'os.path.exists', 'os.path.exists', (['false_reads_file'], {}), '(false_reads_file)\n', (11198, 11216), False, 'import os\n'), ((11220, 11253), 'os.path.getsize', 'os.path.getsize', (['false_reads_file'], {}), '(false_reads_file)\n', (11235, 11253), False, 'import os\n'), ((11271, 11303), 'os.path.exists', 'os.path.exists', (['false_reads_file'], {}), '(false_reads_file)\n', (11285, 11303), False, 'import os\n'), ((12638, 12714), 'keras.layers.Dense', 'Dense', (['second_layer'], {'activation': 'model_function', 'kernel_initializer': '"""uniform"""'}), "(second_layer, activation=model_function, kernel_initializer='uniform')\n", (12643, 12714), False, 'from keras.layers import Dense, Activation\n'), ((13831, 13837), 'time.time', 'time', ([], {}), '()\n', (13835, 13837), False, 'from time import time\n'), ((14038, 14081), 'os.path.getsize', 'os.path.getsize', (["(output_dir + '/bowtie.txt')"], {}), "(output_dir + '/bowtie.txt')\n", (14053, 14081), False, 'import os\n'), ((16062, 16101), 'os.path.getsize', 'os.path.getsize', (["(output_dir + 'dnn.txt')"], {}), "(output_dir + 'dnn.txt')\n", (16077, 16101), False, 'import os\n'), ((17746, 17752), 'time.time', 'time', ([], {}), '()\n', (17750, 17752), False, 'from time import time\n'), ((11308, 11341), 'os.path.getsize', 'os.path.getsize', (['false_reads_file'], {}), '(false_reads_file)\n', (11323, 11341), False, 'import os\n'), ((5706, 5719), 'Bio.Seq.Seq', 'Seq.Seq', (['read'], {}), '(read)\n', (5713, 5719), False, 'from Bio import SeqIO, Seq\n'), ((5830, 5843), 'Bio.Seq.Seq', 'Seq.Seq', (['read'], {}), '(read)\n', (5837, 5843), False, 'from Bio import SeqIO, Seq\n')] |
from tensorflow.keras.models import load_model
from time import sleep
from keras.preprocessing.image import img_to_array
from keras.preprocessing import image
import cv2
import numpy as np
import os
from mtcnn import MTCNN
# Importing the MTCNN detector to detect faces
detector = MTCNN()
# Path to the emotion detection model
model_path = os.path.join("model","accuracy_80.h5")
classifier =load_model(model_path)
class_labels = ['Angry','Happy','Neutral','Sad','Surprise'] # Remember to keep in alphabetical order
def face_emot_detect(vid_path,filename,output_path):
"""
Take video path and find emotion and tag in video
:param vid_path: complete path of input video
:param filename: name of the video file
:result: bool, dictionary of detected emotions
"""
label_info = [0,0,0,0,0]
cap = cv2.VideoCapture(vid_path)
if cap.isOpened() == False:
print('No video found')
return False
# for saving
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
# output file name, fourcc code, frame/sec, size tuple
if output_path == '':
out = cv2.VideoWriter(filename, fourcc, int(cap.get(5)), (int(cap.get(3)),int(cap.get(4))))
else:
out = cv2.VideoWriter(output_path+'//'+filename, fourcc, int(cap.get(5)), (int(cap.get(3)),int(cap.get(4))))
while(True):
# Read one frame at a time
ret, frame = cap.read()
labels = []
# If a frame is returned
if ret == True:
# Get a dictionary of all faces
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = detector.detect_faces(frame)
# For every face in the faces detected in the current frame
for face in faces:
# Get the confidence value of the 'f' being a face
if face.get('confidence')>=0.9:
# Get the co-ordinates of the cropped area wherein face lies
x,y,w,h = face.get('box')
# Draw a Rectangle
frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
roi_gray = gray[y:y+h,x:x+w]
try:
roi_gray = cv2.resize(roi_gray,(48,48),interpolation=cv2.INTER_AREA)
except:
print(f"error in {filename}")
if np.sum([roi_gray])!=0:
roi = roi_gray.astype('float')/255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi,axis=0)
# make a prediction on the ROI, then lookup the class
preds = classifier.predict(roi)[0]
label_info[preds.argmax()] += 1
label = class_labels[preds.argmax()]
label_position = (x,y-5)
cv2.putText(frame,label,label_position,cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
else:
cv2.putText(frame,'No Face Found',(20,60),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)
out.write(frame)
else:
break
# Freeing all resources
out.release()
cap.release()
return dict(zip(class_labels,label_info))
| [
"cv2.rectangle",
"keras.preprocessing.image.img_to_array",
"mtcnn.MTCNN",
"os.path.join",
"cv2.putText",
"numpy.sum",
"tensorflow.keras.models.load_model",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.cvtColor",
"numpy.expand_dims",
"cv2.resize"
] | [((282, 289), 'mtcnn.MTCNN', 'MTCNN', ([], {}), '()\n', (287, 289), False, 'from mtcnn import MTCNN\n'), ((342, 381), 'os.path.join', 'os.path.join', (['"""model"""', '"""accuracy_80.h5"""'], {}), "('model', 'accuracy_80.h5')\n", (354, 381), False, 'import os\n'), ((393, 415), 'tensorflow.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (403, 415), False, 'from tensorflow.keras.models import load_model\n'), ((826, 852), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vid_path'], {}), '(vid_path)\n', (842, 852), False, 'import cv2\n'), ((968, 999), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (990, 999), False, 'import cv2\n'), ((1532, 1571), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1544, 1571), False, 'import cv2\n'), ((2044, 2106), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)\n', (2057, 2106), False, 'import cv2\n'), ((2204, 2264), 'cv2.resize', 'cv2.resize', (['roi_gray', '(48, 48)'], {'interpolation': 'cv2.INTER_AREA'}), '(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)\n', (2214, 2264), False, 'import cv2\n'), ((2368, 2386), 'numpy.sum', 'np.sum', (['[roi_gray]'], {}), '([roi_gray])\n', (2374, 2386), True, 'import numpy as np\n'), ((2482, 2499), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (2494, 2499), False, 'from keras.preprocessing.image import img_to_array\n'), ((2530, 2557), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (2544, 2557), True, 'import numpy as np\n'), ((2880, 2971), 'cv2.putText', 'cv2.putText', (['frame', 'label', 'label_position', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 0)', '(2)'], {}), '(frame, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, \n 255, 0), 2)\n', (2891, 2971), False, 'import cv2\n'), ((3009, 3103), 'cv2.putText', 'cv2.putText', (['frame', '"""No Face Found"""', '(20, 60)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 0)', '(2)'], {}), "(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 0), 2)\n", (3020, 3103), False, 'import cv2\n')] |
"""Drug Response Predictor
@author: <NAME>
This module centralizes the domain adaptation strategy towards biology-aware drug response
prediction on in-vivo dataset.
Example
-------
Examples are given in the vignettes.
Notes
-------
Examples are given in the vignette
References
-------
[1] <NAME>., <NAME>., <NAME>., <NAME>. (2019)
PRECISE: A domain adaptation approach to transfer predictors of drug response
from pre-clinical models to tumors
[2] <NAME>., <NAME>., <NAME>. (2012) Geodesic Flow Kernel for unsupervised
domain adaptation. IEEE CVPR
[3] <NAME>., <NAME>., <NAME>. (2011) Domain Adaptation for object
recognition, an unsupervised approach. IEEE ICCV
"""
import os
import numpy as np
import scipy
from pathlib import Path
from sklearn.model_selection import GridSearchCV, GroupKFold
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import ElasticNet, Ridge
from sklearn.neighbors import KNeighborsRegressor
from joblib import Parallel, delayed
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from copy import deepcopy
import tempfile
from joblib import load, dump
from precise.intermediate_factors import IntermediateFactors
from precise.principal_vectors import PVComputation
from precise.pipeline_routine import FlowProjector, GeodesicMatrixComputer, ConsensusRepresentation
class DrugResponsePredictor:
"""
Main pipeline for training a tumor-aware drug response predictor. This class contains:
- principal vectors computation,
- consensus representation computation,
- regression model training based on these representations,
- computation of the predictive performance.
On top of containing the solution selected by [1], it offers an implementation of the
Geodesic Flow Sampling [2] and the Geodesic Flow Kernel [3] with the equivalent
definition derived in supplementary material of [1].
Attributes
-------
n_representations : int, default to 100
Number of representations between source and target principal vectors for interpolation.
method : str, default to 'consensus'
Scheme used for the domain adaptation step, i.e. 'consensus', 'elasticnet', or 'gfk'.
mean_center : bool, default to True
Whether the different datasets used in the implementation should be mean centered.
std_unit : bool, default to False
Whether the different datasets used in the implementation should be standardized
(feature-level variance to 1).
n_factors : int, default to 70
Number of domain-specific factors to compute, e.g. PCs.
n_pv : int, default to 40
Number of principal vectors to compute from the domain-specific factors.
dim_reduction : str, default to 'pca'
Dimensionality reduction method for the source data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'.
dim_reduction_target : str, default to None
Dimensionality reduction method for the target data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'. If None, set to dim_reduction.
l1_ratio : float, default to 0
l1 ratio for elasticnet model (0 is Ridge, 1 is Lasso).
source_data : np.ndarray, default to None
source data to use in domain adaptation phase.
target_data : np.ndarray, default to None
target data to use in domain adaptation phase.
n_jobs : int, default to 1
number of jobs used in parallelisation.
pv_computation : PVComputation
Instance computing the principal vectors.
intermediate_factors : IntermediateFactors
Instance computing the interpolated features between source and target.
predictor : BaseEstimator
Regression model based on feature representation chosen in "method".
alpha_values: np.ndarray
Regression coefficients for grid search in regression model.
cv_fold : int (set to 10)
Number of cross validation folds used for finding the optimal shrinkage
coefficient and computing the predictive performance.
verbose : int (set to 1)
Level of verbosity in joblib instances.
"""
def __init__(self, n_representations=100, method='consensus',
mean_center=True,
std_unit=False,
n_factors=70,
n_pv=40,
dim_reduction='pca',
dim_reduction_target=None,
l1_ratio=0,
source_data=None,
target_data=None,
n_jobs=1):
"""
Parameters
-------
n_representations : int, default to 100
Number of representations between source and target principal vectors for interpolation.
0 means source only, -1 means target only.
method : str, default to 'consensus'
Scheme used for the domain adaptation step, i.e. 'consensus', 'elasticnet', or 'gfk'.
mean_center : bool, default to True
Whether the different datasets used in the implementation should be mean centered.
std_unit : bool, default to False
Whether the different datasets used in the implementation should be standardized
(feature-level variance to 1).
n_factors : int, default to 70
Number of domain-specific factors to compute, e.g. PCs.
n_pv : int, default to 40
Number of principal vectors to compute from the domain-specific factors.
dim_reduction : str, default to 'pca'
Dimensionality reduction method for the source data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'.
dim_reduction_target : str, default to None
Dimensionality reduction method for the target data,
i.e. 'pca', 'ica', 'nmf', 'fa', 'sparsepca', pls'. If None, set to dim_reduction.
l1_ratio : float, default to 0
l1 ratio for elasticnet model (0 is Ridge, 1 is Lasso).
source_data : np.ndarray, default to None
source data to use in domain adaptation phase.
target_data : np.ndarray, default to None
target data to use in domain adaptation phase.
n_jobs : int, default to 1
number of jobs used in parallelisation.
"""
self.n_representations = n_representations
self.mean_center = mean_center
self.std_unit = std_unit
self.method = method
self.n_factors = n_factors
self.n_pv = n_pv
self.l1_ratio = l1_ratio
self.dim_reduction = dim_reduction
self.dim_reduction_target = dim_reduction_target
self.n_jobs = n_jobs
self.source_data = source_data
self.target_data = target_data
self.pv_computation = PVComputation(
self.n_factors,
self.n_pv,
self.dim_reduction,
self.dim_reduction_target
)
self.intermediate_factors = IntermediateFactors(
self.n_representations
)
self.predictor = None
# Default values for CV
self.alpha_values = np.logspace(-6,10,34)
self.cv_fold = 10
self.verbose = 1
def fit(self, X_source, y_source, mean_center=False, std_unit=False, use_data=True):
"""
Train the drug response predictor by first computing the feature representation corresponding
to "method", then projecting on this representation, and finally training a regression model
on the projected data.
Parameters
-------
X_source : numpy.ndarray, shape (n_components, n_features)
Genomics data use for prediction.
y_source : numpy.ndarray, shape (n_components, 1)
Drug response, i.e. output.
mean_center : bool, default to False
Whether X_source features (i.e. genes) should be mean-centered.
std_unit : bool, default to False
Whether X_source features (i.e. genes) should be standardized.
use_data : bool, default to True
Whether X_source should be also incorporated into the domain adaptation.
If False, data from "source_data" will solely be used.
Return values
-------
self: returns an instance of self.
"""
# Sample along the geodesic flow and project on all intermediate features [3].
if self.method.lower() == 'elasticnet':
self._fit_elasticnet(X_source, y_source, use_data)
# Infinite setting [2].
elif self.method.lower() == 'gfk':
self._fit_kernel_ridge(X_source, y_source, use_data)
# Construct from the intermediate features a representation with comparable
# probability distribution [1].
elif self.method.lower() == 'consensus':
self._fit_consensus(X_source, y_source, use_data)
else:
raise NameError('Unknown method: %s, should be \'gfk\' or \'elasticnet\''%(self.method))
return self
def _memmap_array(self, x, name=None):
name = name or ''.join(np.random.choice(list('QWERTYUIOPASDFGHJKLZXCVBNM'), 10))
filename= os.path.join(tempfile.mkdtemp(), 'joblib_%s.mmap'%(name))
fp = np.memmap(filename, dtype='float32', mode='w+', shape=x.shape)
fp[:] = x[:]
return fp
def _memmap_data(self):
# Source
if self.source_data is not None and self.source_data.shape[0] > 0:
self.source_data = self._memmap_array(self.source_data, 'source')
self.target_data = self._memmap_array(self.target_data, 'target')
def _fit_elasticnet(self, X_source, y_source, use_data=True):
# Cross validate the alpha
param_grid ={
'regression__alpha': self.alpha_values,
}
# Put source and target data into memory for joblib
if self.n_jobs >= 2:
self._memmap_data()
self.regression_model_ = GridSearchCV(
Pipeline([
('projector', FlowProjector(
source_data=self.source_data,
target_data=self.target_data,
n_factors=self.n_factors,
n_pv=self.n_pv,
dim_reduction=self.dim_reduction,
dim_reduction_target=self.dim_reduction_target,
n_representations=self.n_representations,
use_data=use_data,
mean_center=self.mean_center,
std_unit=self.std_unit
)
),
('regression', ElasticNet(l1_ratio=self.l1_ratio) if self.l1_ratio != 0 else Ridge())
]),
cv=self.cv_fold,
n_jobs=self.n_jobs,
pre_dispatch='1.2*n_jobs',
param_grid=param_grid,
verbose=self.verbose,
scoring='neg_mean_squared_error'
)
self.regression_model_.fit(X_source, y_source)
self.predictor = self.regression_model_.best_estimator_
def _compute_kernel_matrix(self, X1, X2=None):
X1_projected = X1.dot(self.intermediate_factors.projection)
if X2 is None:
X2_projected = X1_projected
else:
X2_projected = X2.dot(self.intermediate_factors.projection)
return X1_projected.dot(self.G).dot(X2_projected.transpose())
def _fit_kernel_ridge(self, X_source, y_source, use_data=True):
# Cross validate the alpha
param_grid ={
'regression__alpha': self.alpha_values
}
# Put source and target data into memory for joblib
if self.n_jobs >= 2:
self._memmap_data()
# Grid search setup
self.regression_model_ = GridSearchCV(
Pipeline([
('projector', GeodesicMatrixComputer(
source_data=self.source_data,
target_data=self.target_data,
n_factors=self.n_factors,
n_pv=self.n_pv,
dim_reduction=self.dim_reduction,
dim_reduction_target=self.dim_reduction_target,
n_representations=self.n_representations,
use_data=use_data,
mean_center=self.mean_center,
std_unit=self.std_unit
)
),
('regression', KernelRidge(kernel='precomputed'))
]),
cv=self.cv_fold,
n_jobs=self.n_jobs,
pre_dispatch='1.2*n_jobs',
param_grid=param_grid,
verbose=self.verbose,
scoring='neg_mean_squared_error'
)
#Fit grid search, no need to remove intercept (sklearn handles it)
self.regression_model_.fit(X_source, y_source)
self.predictor = self.regression_model_.best_estimator_
def _fit_consensus(self, X_source, y_source, use_data=True):
# Cross validate the alpha
param_grid ={
'regression__alpha': self.alpha_values,
}
# Put source and target data into memory for joblib
if self.n_jobs >= 2:
self._memmap_data()
self.regression_model_ = GridSearchCV(
Pipeline([
('projector', ConsensusRepresentation(
source_data=self.source_data,
target_data=self.target_data,
n_factors=self.n_factors,
n_pv=self.n_pv,
dim_reduction=self.dim_reduction,
dim_reduction_target=self.dim_reduction_target,
n_representations=self.n_representations,
use_data=use_data,
mean_center=self.mean_center,
std_unit=self.std_unit
)
),
('scaler', StandardScaler(with_mean=True, with_std=True)),
('regression', ElasticNet(l1_ratio=self.l1_ratio) if self.l1_ratio != 0 else Ridge())
]),
cv=self.cv_fold,
n_jobs=self.n_jobs,
pre_dispatch='1.2*n_jobs',
param_grid=param_grid,
verbose=self.verbose,
scoring='neg_mean_squared_error'
)
self.regression_model_.fit(X_source, y_source)
self.predictor = self.regression_model_.best_estimator_
def predict(self, X_target):
"""
Project the data on the feature representation corresponding to "method",
and use the predictor trained in "fit" to predict the value of the samples.
Attributes
-------
X_target : numpy.ndarray, shape (n_samples, n_features)
Genomics data use for prediction.
Return values
-------
y_target : numpy.ndarray, shape (n_samples, 1)
Drug response predicted for target.
"""
if self.predictor is None:
raise ValueError("Instance not fitted")
return self.predictor.predict(X_target)
def _cv_predict(train, test, pred, X, y):
pred.fit(X[train], y[train])
return pred.predict(X[test]), test
def compute_predictive_performance(self, X, y, use_data=True):
"""
Compute the predictive performance of PRECISE by a nested double cross-validation.
Predictive performance is computed as pearson correlation between the predicted
response and the real response given in "y"
Attributes
-------
X : numpy.ndarray, shape (n_samples, n_features)
Genomics data use for prediction.
y: numpy.ndarray, shape (n_samples, 1)
Response data
use_data: bool, default to True
Whether data "X" should be incorporated into the domain adaptation step.
Return values
-------
predictive_performance : float
Predictive performance, between -1 and 1, 1 being the best possible.
"""
# Copy regression model to avoid interference with the already fitted model.
pred = deepcopy(self.regression_model_)
pred.verbose = self.verbose
# To remove any data aggregated during the learning phase.
pred.estimator.named_steps['projector'].source_data = self.source_data
# Restrict the grid search around the optimal shrinkage coefficient.
alpha_opt = self.regression_model_.best_estimator_.named_steps['regression'].alpha
pred.param_grid['regression__alpha'] = np.array([0.01, 0.1, 1, 10., 100.]) * alpha_opt
# Use double loop cross validation.
k_fold_split = GroupKFold(10)
results = [DrugResponsePredictor._cv_predict(train, test, pred, X, y)\
for train, test in k_fold_split.split(X, y, y)]
y_predicted = np.zeros(X.shape[0])
for v, i in results:
y_predicted[i] = v
del pred, results
# Compute predictive performance as pearson correlation.
return scipy.stats.pearsonr(y_predicted, y)[0] | [
"scipy.stats.pearsonr",
"precise.pipeline_routine.FlowProjector",
"sklearn.linear_model.ElasticNet",
"precise.pipeline_routine.GeodesicMatrixComputer",
"numpy.memmap",
"sklearn.linear_model.Ridge",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros",
"precise.intermediate_factors.... | [((6854, 6946), 'precise.principal_vectors.PVComputation', 'PVComputation', (['self.n_factors', 'self.n_pv', 'self.dim_reduction', 'self.dim_reduction_target'], {}), '(self.n_factors, self.n_pv, self.dim_reduction, self.\n dim_reduction_target)\n', (6867, 6946), False, 'from precise.principal_vectors import PVComputation\n'), ((7037, 7080), 'precise.intermediate_factors.IntermediateFactors', 'IntermediateFactors', (['self.n_representations'], {}), '(self.n_representations)\n', (7056, 7080), False, 'from precise.intermediate_factors import IntermediateFactors\n'), ((7203, 7226), 'numpy.logspace', 'np.logspace', (['(-6)', '(10)', '(34)'], {}), '(-6, 10, 34)\n', (7214, 7226), True, 'import numpy as np\n'), ((9325, 9387), 'numpy.memmap', 'np.memmap', (['filename'], {'dtype': '"""float32"""', 'mode': '"""w+"""', 'shape': 'x.shape'}), "(filename, dtype='float32', mode='w+', shape=x.shape)\n", (9334, 9387), True, 'import numpy as np\n'), ((16318, 16350), 'copy.deepcopy', 'deepcopy', (['self.regression_model_'], {}), '(self.regression_model_)\n', (16326, 16350), False, 'from copy import deepcopy\n'), ((16865, 16879), 'sklearn.model_selection.GroupKFold', 'GroupKFold', (['(10)'], {}), '(10)\n', (16875, 16879), False, 'from sklearn.model_selection import GridSearchCV, GroupKFold\n'), ((17050, 17070), 'numpy.zeros', 'np.zeros', (['X.shape[0]'], {}), '(X.shape[0])\n', (17058, 17070), True, 'import numpy as np\n'), ((9266, 9284), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9282, 9284), False, 'import tempfile\n'), ((16749, 16786), 'numpy.array', 'np.array', (['[0.01, 0.1, 1, 10.0, 100.0]'], {}), '([0.01, 0.1, 1, 10.0, 100.0])\n', (16757, 16786), True, 'import numpy as np\n'), ((17239, 17275), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['y_predicted', 'y'], {}), '(y_predicted, y)\n', (17259, 17275), False, 'import scipy\n'), ((10109, 10438), 'precise.pipeline_routine.FlowProjector', 'FlowProjector', ([], {'source_data': 'self.source_data', 'target_data': 'self.target_data', 'n_factors': 'self.n_factors', 'n_pv': 'self.n_pv', 'dim_reduction': 'self.dim_reduction', 'dim_reduction_target': 'self.dim_reduction_target', 'n_representations': 'self.n_representations', 'use_data': 'use_data', 'mean_center': 'self.mean_center', 'std_unit': 'self.std_unit'}), '(source_data=self.source_data, target_data=self.target_data,\n n_factors=self.n_factors, n_pv=self.n_pv, dim_reduction=self.\n dim_reduction, dim_reduction_target=self.dim_reduction_target,\n n_representations=self.n_representations, use_data=use_data,\n mean_center=self.mean_center, std_unit=self.std_unit)\n', (10122, 10438), False, 'from precise.pipeline_routine import FlowProjector, GeodesicMatrixComputer, ConsensusRepresentation\n'), ((11951, 12290), 'precise.pipeline_routine.GeodesicMatrixComputer', 'GeodesicMatrixComputer', ([], {'source_data': 'self.source_data', 'target_data': 'self.target_data', 'n_factors': 'self.n_factors', 'n_pv': 'self.n_pv', 'dim_reduction': 'self.dim_reduction', 'dim_reduction_target': 'self.dim_reduction_target', 'n_representations': 'self.n_representations', 'use_data': 'use_data', 'mean_center': 'self.mean_center', 'std_unit': 'self.std_unit'}), '(source_data=self.source_data, target_data=self.\n target_data, n_factors=self.n_factors, n_pv=self.n_pv, dim_reduction=\n self.dim_reduction, dim_reduction_target=self.dim_reduction_target,\n n_representations=self.n_representations, use_data=use_data,\n mean_center=self.mean_center, std_unit=self.std_unit)\n', (11973, 12290), False, 'from precise.pipeline_routine import FlowProjector, GeodesicMatrixComputer, ConsensusRepresentation\n'), ((12585, 12618), 'sklearn.kernel_ridge.KernelRidge', 'KernelRidge', ([], {'kernel': '"""precomputed"""'}), "(kernel='precomputed')\n", (12596, 12618), False, 'from sklearn.kernel_ridge import KernelRidge\n'), ((13463, 13803), 'precise.pipeline_routine.ConsensusRepresentation', 'ConsensusRepresentation', ([], {'source_data': 'self.source_data', 'target_data': 'self.target_data', 'n_factors': 'self.n_factors', 'n_pv': 'self.n_pv', 'dim_reduction': 'self.dim_reduction', 'dim_reduction_target': 'self.dim_reduction_target', 'n_representations': 'self.n_representations', 'use_data': 'use_data', 'mean_center': 'self.mean_center', 'std_unit': 'self.std_unit'}), '(source_data=self.source_data, target_data=self.\n target_data, n_factors=self.n_factors, n_pv=self.n_pv, dim_reduction=\n self.dim_reduction, dim_reduction_target=self.dim_reduction_target,\n n_representations=self.n_representations, use_data=use_data,\n mean_center=self.mean_center, std_unit=self.std_unit)\n', (13486, 13803), False, 'from precise.pipeline_routine import FlowProjector, GeodesicMatrixComputer, ConsensusRepresentation\n'), ((14094, 14139), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(True)', 'with_std': '(True)'}), '(with_mean=True, with_std=True)\n', (14108, 14139), False, 'from sklearn.preprocessing import StandardScaler\n'), ((10734, 10768), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'l1_ratio': 'self.l1_ratio'}), '(l1_ratio=self.l1_ratio)\n', (10744, 10768), False, 'from sklearn.linear_model import ElasticNet, Ridge\n'), ((10796, 10803), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (10801, 10803), False, 'from sklearn.linear_model import ElasticNet, Ridge\n'), ((14173, 14207), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'l1_ratio': 'self.l1_ratio'}), '(l1_ratio=self.l1_ratio)\n', (14183, 14207), False, 'from sklearn.linear_model import ElasticNet, Ridge\n'), ((14235, 14242), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (14240, 14242), False, 'from sklearn.linear_model import ElasticNet, Ridge\n')] |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from numpy.testing import assert_allclose
import pytest
from thermo.temperature import *
def test_data():
Ts_sums_calc = [i.sum() for i in [Ts_68, Ts_48, Ts_76, Ts_27]]
Ts_sums = [186818.69999999998, 175181.39999999997, 368, 133893.09999999998]
assert_allclose(Ts_sums_calc, Ts_sums)
diffs_sums_calc = [abs(i).sum() for i in [diffs_68, diffs_48, diffs_76, diffs_27]]
diffs_sums = [46.304000000000016, 151.31800000000001, 0.038800000000000001, 411.17999999999995]
assert_allclose(diffs_sums_calc, diffs_sums)
def test_conversion():
T2 = T_converter(500, 'ITS-68', 'ITS-48')
assert_allclose(T2, 499.9470092992346)
high_scales = ['ITS-90', 'ITS-68', 'ITS-27', 'ITS-48']
for scale1 in high_scales:
for scale2 in high_scales:
T = T_converter(1000, scale1, scale2)
assert_allclose(T_converter(T, scale2, scale1), 1000)
mid_scales = ['ITS-90', 'ITS-68', 'ITS-48']
for Ti in range(100, 1000, 200):
for scale1 in mid_scales:
for scale2 in mid_scales:
T = T_converter(Ti, scale1, scale2)
assert_allclose(T_converter(T, scale2, scale1), Ti, rtol=1e-6)
low_scales = ['ITS-90', 'ITS-68', 'ITS-76']
for Ti in range(15, 27, 2):
for scale1 in low_scales:
for scale2 in low_scales:
T = T_converter(Ti, scale1, scale2)
assert_allclose(T_converter(T, scale2, scale1), Ti)
with pytest.raises(Exception):
T_converter(10, 'ITS-27', 'ITS-48')
with pytest.raises(Exception):
T_converter(10, 'FAIL', 'ITS-48')
with pytest.raises(Exception):
T_converter(10, 'ITS-76', 'FAIL')
def test_diff_68():
dTs_calc = [ITS90_68_difference(i) for i in [13.7, 70, 80.5, 298.15, 1000, 1500]]
dTs = [0, 0.006818871618271216, 0, -0.006253950277664615,
0.01231818956580355, -0.31455]
assert_allclose(dTs, dTs_calc) | [
"numpy.testing.assert_allclose",
"pytest.raises"
] | [((1428, 1466), 'numpy.testing.assert_allclose', 'assert_allclose', (['Ts_sums_calc', 'Ts_sums'], {}), '(Ts_sums_calc, Ts_sums)\n', (1443, 1466), False, 'from numpy.testing import assert_allclose\n'), ((1658, 1702), 'numpy.testing.assert_allclose', 'assert_allclose', (['diffs_sums_calc', 'diffs_sums'], {}), '(diffs_sums_calc, diffs_sums)\n', (1673, 1702), False, 'from numpy.testing import assert_allclose\n'), ((1778, 1816), 'numpy.testing.assert_allclose', 'assert_allclose', (['T2', '(499.9470092992346)'], {}), '(T2, 499.9470092992346)\n', (1793, 1816), False, 'from numpy.testing import assert_allclose\n'), ((3078, 3108), 'numpy.testing.assert_allclose', 'assert_allclose', (['dTs', 'dTs_calc'], {}), '(dTs, dTs_calc)\n', (3093, 3108), False, 'from numpy.testing import assert_allclose\n'), ((2634, 2658), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2647, 2658), False, 'import pytest\n'), ((2714, 2738), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2727, 2738), False, 'import pytest\n'), ((2792, 2816), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2805, 2816), False, 'import pytest\n')] |
"""Generate coil geometries.
This module provides functions to generate various coil geometries
that can be used in conjuction with the eppy module to calculate eddy
currents in flat plates.
"""
import numpy as np
import numpy.typing as npt
# ----------------------------------------------------------------------
# User defined types
#
ArrayFloat = npt.NDArray[np.float_]
def straight_wire(start: ArrayFloat, end: ArrayFloat,
n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for straight line.
Parameters
----------
start : ndarray(dtype=float, dim=1)
Coordinate of start point (x, y, z).
end : ndarray(dtype=float, dim=1)
Coordinate of end point (x, y, z).
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
points = np.array([start, end])
line = np.array([0, 1])
line = line[None, :]
L = np.linalg.norm(end - start)
esize = L/n
R, dl = coil_segments(points, esize, lines=line)
return R, dl
def circular_coil(center: ArrayFloat, radius: float, plane: str="XY",
n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for circular coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
radius : float
Radius of the circular coil.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane in which the circular coil is defined.
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
P = np.zeros((3, 3))
if plane == "XY":
P[0] = center + np.array([radius, 0, 0])
P[1] = center + np.array([0, radius, 0])
P[2] = center - np.array([radius, 0, 0])
elif plane == "YZ":
P[0] = center + np.array([0, radius, 0])
P[1] = center + np.array([0, 0, radius])
P[2] = center - np.array([0, radius, 0])
esize = 2*np.pi*radius/n
circle = np.array([0, 1, 2])
circle = circle[None, :]
R, dl = coil_segments(P, esize, circles=circle)
return R, dl
def pancake(center: ArrayFloat, r_in: float, r_out: float,
turns: int, n: int=24) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for pancake coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
r_in : float
Inner radius.
r_out : float
Outer radius.
turns : int
Number of windings.
n : int, defaults to 24
Number of line segments per winding.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
theta = 2*np.pi*turns
N = turns*n
esize = turns*np.pi*(r_out+r_in)/N
R, dl = spiral_segments(center, r_in, r_out, theta, 0.0, esize)
return R, dl
def helical(center: ArrayFloat, radius: float, h: float, turns:int,
plane: str="XY", n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for helical coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
radius : float
Coil radius.
h : float
Coil length.
turns : float or int
Number of windings.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane by which direction (normal) of the coil is defined.
n : int, defaults to 40
Number of line segments per winding.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
theta = 2*np.pi*turns
N = turns*n
esize = turns*np.pi*radius/N
R, dl = spiral_segments(center, radius, radius, theta, h, esize)
if plane == "YZ":
normal = np.array([1.0, 0.0, 0.0])
R, dl = tilt_and_rotate_coil(R, dl, center, normal, 0.0)
return R, dl
def hairpin(center: ArrayFloat, length: float, width: float,
plane: str="XY", n: int=40) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for hairpin coil.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinate of the center (x, y, z).
length : float
Length of coil.
width : float
Length of coil.
plane : {'XY', 'YZ'}, defaults to 'XY'
Plane in which the circular coil is defined.
n : int, defaults to 40
Number of line segments.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
P = np.zeros((6, 3))
if plane == "XY":
P[0] = center + np.array([-length/2, width/2, 0])
P[1] = center + np.array([length/2, width/2, 0])
P[2] = center + np.array([length/2 + width/2, 0, 0])
P[3] = center + np.array([length/2, -width/2, 0])
P[4] = center + np.array([-length/2, -width/2, 0])
P[5] = center + np.array([-length/2 - width/2, 0, 0])
elif plane == "YZ":
P[0] = center + np.array([0, -length/2, width/2])
P[1] = center + np.array([0, length/2, width/2])
P[2] = center + np.array([0, length/2 + width/2, 0])
P[3] = center + np.array([0, length/2, -width/2])
P[4] = center + np.array([0, -length/2, -width/2])
P[5] = center + np.array([0, -length/2 - width/2, 0])
lines = np.array([[0, 1], [3, 4]])
arcs = np.array([[1, 2, 3],
[4, 5, 0]])
L = length*2 + np.pi*(width/2)**2
esize = L/n
R, dl = coil_segments(P, esize, lines=lines, arcs=arcs)
return R, dl
def coil_segments(points: ArrayFloat, esize: float,
**kw: npt.NDArray[np.int_]) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for a coil.
Parameters
----------
points : ndarray(dtype=float, dim=2)
List of coordinates (x, y, z).
esize : float
Desired element length.
**kw : keyword arguments
See below.
Keyword arguments
-----------------
**lines : ndarray(dtype=float, dim=2), optional
Connectivity matrix (N, 2) with the start and end point of a
line on each row.
**circles : ndarray(dtype=float, dim=2), optional
Array with N circle definitions (N, 3). Each circle is defined
by three points on its radius, with the current in the
direction from P1 to P2. The first element of i^th arc [i, 0]
corresponds to P1, the second element to P2 and the third
element to P3 of that particular arc.
**arcs : ndarray(dtype=float, dim=2), optional
Array with N arc definitions (N, 3). Each arc is defined by
three points on its radius, with the current running from P1
via P2 to P3. The first element of i^th arc [i, 0] corresponds
to P1, the second element to P2 and the third element to P3 of
that particular arc.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each small line segment.
dl : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
R = np.empty((0, 3), float)
dl = np.empty((0, 3), float)
lines = kw.get("lines") if 'lines' in kw else None
if lines is not None:
for line in lines:
dR, ddl = line_segments(points[line[0]], points[line[1]], esize)
R = np.vstack((R, dR))
dl = np.vstack((dl, ddl))
circles = kw.get("circles") if 'circles' in kw else None
if circles is not None:
for circle in circles:
dR, ddl = circle_segments_3p(points[circle[0]], points[circle[1]],
points[circle[2]], esize)
R = np.vstack((R, dR))
dl = np.vstack((dl, ddl))
arcs = kw.get("arcs") if 'arcs' in kw else None
if arcs is not None:
for arc in arcs:
dR, ddl = circle_segments_3p(points[arc[0]], points[arc[1]],
points[arc[2]], esize, is_arc=True)
R = np.vstack((R, dR))
dl = np.vstack((dl, ddl))
return R, dl
def line_segments(p1: ArrayFloat, p2: ArrayFloat,
esize: float) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for straight line.
Parameters
----------
p1 : ndarray(dtype=float, dim=1)
Coordinates of start point (x, y, z).
p2 : ndarray(dtype=float, dim=1)
Coordinates of end point (x, y, z).
esize : float
Desired element length.
Returns
-------
R : ndarray(float, dim=2)
Array with position vectors for all line segment.
dl : ndarray(float dim=2)
Array of line segment length vectors.
"""
L = np.linalg.norm(p2-p1)
nel = int(np.ceil(L/esize))
esize = L/nel
points = np.linspace(p1, p2, nel+1)
R = (points[:-1, :] + points[1:, :])/2
dl = np.tile(esize*(p2-p1)/L, (nel, 1))
return R, dl
def circle_segments_3p(p1: ArrayFloat, p2: ArrayFloat, p3: ArrayFloat,
esize: float,
is_arc: bool=False) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for an arc.
The arc or circle is defined three points in three dimensions. The
current is defined to run from p1 via p2 to p3.
Parameters
----------
p1 : ndarray(dtype=float, dim=1)
Coordinates of the first point (x, y, z)
p2 : ndarray(dtype=float, dim=1)
Coordinates of the first point (x, y, z)
p3 : ndarray(dtype=float, dim=1)
Coordinates of the first point (x, y, z)
esize : float
Desired element length.
is_arc : boolean, optional
Indicate whether this is an arc running from p1 to p3 (True)
or a full circle (False).
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each line segment.
dL : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
center, radius = circle_radius_center_3p(p1, p2, p3)
v1 = p1 - center
v2 = p2 - center
v3 = p3 - center
normal, _, theta = rotation_direction_and_angle(v1, v2, v3)
n = int(radius*theta/esize) if radius*theta/esize > 10 else 10
theta = theta if is_arc else 2*np.pi
alphas = np.linspace(0, theta, n, endpoint=False) + theta/n/2
R = [np.dot(rotation_matrix_3d(normal, alpha), v1) for alpha in alphas]
R = np.array(R) + center
dl = [np.dot(rotation_matrix_3d(normal, alpha+np.pi/2), v1)
for alpha in alphas] # is the length of this thing correct?
dl = (2*np.pi*radius/n) * np.array(dl)
return R, dl
def spiral_segments(center: ArrayFloat, R_in: float, R_out: float,
theta: float, h: float,
esize: float) -> tuple[ArrayFloat, ArrayFloat]:
"""Return position vectors and line segments for a spiral.
The spiral is oriented to turn around the normal of the XY-plane.
Parameters
----------
center : ndarray(dtype=float, dim=1)
Coordinates of the origin point (x, y, z)
R_in : float
Inner radius.
R_out : float.
Outer radius.
theta : float
Total winding angle.
h : float
Spiroal height.
esize : float
Desired element length.
Returns
-------
R : ndarray(dtype=float, dim=2)
Array of position vectors for each line segment.
dL : ndarray(dtype=float, dim=2)
Array of line segment vectors.
"""
n = int(theta*R_out/esize)
alpha = np.linspace(0, theta, n, endpoint=False) + theta/n/2
radii = np.linspace(R_in, R_out, n)
# Position vector.
x = center[0] + radii*np.cos(alpha)
y = center[1] + radii*np.sin(alpha)
z = center[2] + np.linspace(0, h, n)
R = np.array([x, y, z]).T
# Element segment
K = (R_out-R_in)/theta
dx = -(R_in + K*alpha)*np.sin(alpha) + K*np.cos(alpha)
dy = (R_in + K*alpha)*np.cos(alpha) + K*np.sin(alpha)
dz = h/theta*np.ones(len(dx))
dtheta = theta/n
dL = dtheta*np.array([dx, dy, dz]).T
return R, dL
def circle_radius_center_3p(p1: ArrayFloat, p2: ArrayFloat,
p3: ArrayFloat) -> tuple[ArrayFloat, float]:
"""Return radius and center of a circle that fits through three points.
Parameters
----------
p1 : ndarray(dtype=float, dim=1)
Coordinates of first point (x, y, z).
p2 : ndarray(dtype=float, dim=1)
Coordinates of second point (x, y, z).
p3 : ndarray(dtype=float, dim=1)
Coordinates of third point (x, y, z).
Returns
-------
P : ndarray(dtype=float, dim=1)
Coordinates of center (x, y, z).
R : float
Radius.
"""
# triangle edges
t = np.linalg.norm(p3-p2)
u = np.linalg.norm(p3-p1)
v = np.linalg.norm(p2-p1)
# semi-perimiter & triangle area
s = (t + u + v)/2
A = np.sqrt(s*(s-t)*(s-u)*(s-v))
# circumradius
R = t*u*v/(4*A)
# barcyntric coordinates of center
b1 = t**2 * (u**2 + v**2 - t**2)
b2 = u**2 * (v**2 + t**2 - u**2)
b3 = v**2 * (t**2 + u**2 - v**2)
# cartesian coordinate of center
P = np.column_stack((p1, p2, p3)).dot(np.hstack((b1, b2, b3)))
P = P/(b1 + b2 + b3)
return P, R
def rotation_direction_and_angle(v1: ArrayFloat, v2: ArrayFloat,
v3: ArrayFloat, eps: float=1E-10) -> tuple[ArrayFloat, float, float]:
"""Return outward normal and angles for an arc.
The direction of the current in a cricle or arc segment is defined
by means of three position vectors v1, v2, and v3, which share the
origin in the arc center. The current flows from v1 via v2 to v3.
This function returns the corresponding normal vector around which
to rotate. In addition, the angle between the vectors (in
direction of rotation) is returned as well.
Parameters
----------
v1 : ndarray(dtype=float, dim=1)
Position vector of the first point w.r.t. the center.
v2 : ndarray(dtype=float, dim=1)
Position vector of the second point w.r.t. the center.
v3 : ndarray(dtype=float, dim=1)
Position vector of the third point w.r.t. the center.
eps : float, optional
Precision (defaults to 1E-10)
Returns
-------
normal : ndarray(dtype=float, dim=1)
Normal vector that determines current direction in arc.
phi : float
Angle between first and second vector in direction of rotation.
theta : float.
Angle between first and third vector in direction of rotation.
"""
V1 = v1/np.linalg.norm(v1)
V2 = v2/np.linalg.norm(v2)
V3 = v3/np.linalg.norm(v3)
normal = np.array([0.0, 0.0, 0.0])
phi = np.arccos(np.dot(V1, V2))
theta = np.arccos(np.dot(V1, V3))
if (phi < eps) or (theta < eps):
raise ValueError("Circle points coincide.")
if abs(phi-np.pi) < eps: # v1 and v2 are aligned
# print("v1 and v2 are aligned")
normal = -np.cross(V1, V3)
theta = 2*np.pi - theta
elif abs(theta-np.pi) < eps: # v1 and v3 are aligned
# print("v1 and v3 are aligned")
normal = np.cross(V1, V2)
else: # v2 & v3 are not aligned with v1
# print("vectors are not aligned")
N12 = np.cross(V1, V2)/np.linalg.norm(np.cross(V1, V2))
N13 = np.cross(V1, V3)/np.linalg.norm(np.cross(V1, V3))
if np.linalg.norm(N12 - N13) < eps: # v2 & v3 lie in same circle half
if theta - phi < eps:
raise ValueError("Circle points coincide.")
elif theta > phi:
normal = np.cross(V1, V2)
elif phi > theta:
normal = -np.cross(V1, V2)
theta = 2*np.pi - theta
phi = 2*np.pi - phi
else:
normal = np.cross(V1, V2)
theta = 2*np.pi-theta
return normal, phi, theta
def tilt_and_rotate_coil(R: ArrayFloat, dl: ArrayFloat,
origin: ArrayFloat, new_z:ArrayFloat,
theta: float) -> tuple[ArrayFloat, ArrayFloat]:
"""Rotate coil around a given point.
Parameters
----------
R : ndarray(dtype=float, dim=2)
Position vectors.
dl : ndarray(dtype=float, dim=2)
Length segments.
origin : ndarray(dtype=float, dim=1)
Point around which to rotate.
new_z : ndarray(dtype=float, dim=1)
Direction of new_z axis in terms of old CS.
theta : float
Rotation angle around new z-axis.
Returns
-------
R_new : ndarray(dtype=float, dim=2)
New position vector.
dl_new : ndarray(dtype=float, dim=2)
New length segments.
"""
R_new = tilt_and_rotate(R, origin, new_z, theta)
O = np.array([0.0, 0.0, 0.0])
dl_new = tilt_and_rotate(dl, O, new_z, theta)
return R_new, dl_new
def tilt_and_rotate(points: ArrayFloat, origin: ArrayFloat, new_z: ArrayFloat,
theta: float, eps: float=1E-10) -> ArrayFloat:
"""Rotate points around a given origin.
Parameters
----------
points : ndarray(dtype=float, dim=2)
Points to rotate from old CS to new CS.
origin : ndarray(dtype=float, dim=1)
Point around which to rotate.
new_z : ndarray(dtype=float, dim=1)
Direction of new_z axis in terms of old CS.
theta : float
Rotation angle around new z-axis.
eps : float, optional
Precision (defaults to 1E-10)
Returns
-------
new_points : ndarray(dtype=float, dim=2)
Points in new CS.
"""
old_z = np.array([0.0, 0.0, 1.0])
new_z = new_z/np.linalg.norm(new_z)
phi = np.arccos(np.dot(old_z, new_z))
# rotate from old to new z-axis
if (phi < eps):
# do nothing as old and new z-axis are equal
R = points
elif (abs(phi-np.pi) < eps):
# rotate pi around x as z_new = -z_old
axis = np.array([1.0, 0, 0])
rot = rotation_matrix_3d(axis, phi)
R = np.array([np.dot(rot, p - origin) for p in points])
else:
axis = np.cross(old_z, new_z)
rot = rotation_matrix_3d(axis, phi)
R = np.array([np.dot(rot, p - origin) for p in points])
# rotate theta around new z axis
rot = rotation_matrix_3d(new_z, theta)
R = np.array([np.dot(rot, p) for p in R])
return R + origin
def rotation_matrix_3d(axis: ArrayFloat, theta: float) -> ArrayFloat:
"""Return rotation matrix to rotate a vector in three dimensions.
Makes use of the Euler-Rodrigues formula.
Parameters
----------
axis : ndarray(dtype=float, dim=1)
Normal axis around which to rotate.
theta : float
Rotation angle (in radians).
Returns
-------
rot : ndarray(dtype=float, dim=3)
Rotation matrices for each angle in theta.
"""
axis = axis/np.linalg.norm(axis)
a = np.cos(theta/2)
b, c, d = axis*np.sin(theta/2)
rot = np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[ 2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[ 2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
return rot
| [
"numpy.tile",
"numpy.ceil",
"numpy.sqrt",
"numpy.cross",
"numpy.hstack",
"numpy.column_stack",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"numpy.cos",
"numpy.dot",
"numpy.linalg.norm",
"numpy.sin",
"numpy.vstack"
] | [((1045, 1067), 'numpy.array', 'np.array', (['[start, end]'], {}), '([start, end])\n', (1053, 1067), True, 'import numpy as np\n'), ((1079, 1095), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1087, 1095), True, 'import numpy as np\n'), ((1129, 1156), 'numpy.linalg.norm', 'np.linalg.norm', (['(end - start)'], {}), '(end - start)\n', (1143, 1156), True, 'import numpy as np\n'), ((1992, 2008), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2000, 2008), True, 'import numpy as np\n'), ((2391, 2410), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (2399, 2410), True, 'import numpy as np\n'), ((5309, 5325), 'numpy.zeros', 'np.zeros', (['(6, 3)'], {}), '((6, 3))\n', (5317, 5325), True, 'import numpy as np\n'), ((6095, 6121), 'numpy.array', 'np.array', (['[[0, 1], [3, 4]]'], {}), '([[0, 1], [3, 4]])\n', (6103, 6121), True, 'import numpy as np\n'), ((6133, 6165), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 0]]'], {}), '([[1, 2, 3], [4, 5, 0]])\n', (6141, 6165), True, 'import numpy as np\n'), ((7859, 7882), 'numpy.empty', 'np.empty', (['(0, 3)', 'float'], {}), '((0, 3), float)\n', (7867, 7882), True, 'import numpy as np\n'), ((7892, 7915), 'numpy.empty', 'np.empty', (['(0, 3)', 'float'], {}), '((0, 3), float)\n', (7900, 7915), True, 'import numpy as np\n'), ((9493, 9516), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (9507, 9516), True, 'import numpy as np\n'), ((9578, 9606), 'numpy.linspace', 'np.linspace', (['p1', 'p2', '(nel + 1)'], {}), '(p1, p2, nel + 1)\n', (9589, 9606), True, 'import numpy as np\n'), ((9657, 9697), 'numpy.tile', 'np.tile', (['(esize * (p2 - p1) / L)', '(nel, 1)'], {}), '(esize * (p2 - p1) / L, (nel, 1))\n', (9664, 9697), True, 'import numpy as np\n'), ((12379, 12406), 'numpy.linspace', 'np.linspace', (['R_in', 'R_out', 'n'], {}), '(R_in, R_out, n)\n', (12390, 12406), True, 'import numpy as np\n'), ((13524, 13547), 'numpy.linalg.norm', 'np.linalg.norm', (['(p3 - p2)'], {}), '(p3 - p2)\n', (13538, 13547), True, 'import numpy as np\n'), ((13554, 13577), 'numpy.linalg.norm', 'np.linalg.norm', (['(p3 - p1)'], {}), '(p3 - p1)\n', (13568, 13577), True, 'import numpy as np\n'), ((13584, 13607), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (13598, 13607), True, 'import numpy as np\n'), ((13674, 13714), 'numpy.sqrt', 'np.sqrt', (['(s * (s - t) * (s - u) * (s - v))'], {}), '(s * (s - t) * (s - u) * (s - v))\n', (13681, 13714), True, 'import numpy as np\n'), ((15448, 15473), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (15456, 15473), True, 'import numpy as np\n'), ((17514, 17539), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (17522, 17539), True, 'import numpy as np\n'), ((18339, 18364), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (18347, 18364), True, 'import numpy as np\n'), ((19632, 19649), 'numpy.cos', 'np.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (19638, 19649), True, 'import numpy as np\n'), ((19693, 19940), 'numpy.array', 'np.array', (['[[a * a + b * b - c * c - d * d, 2 * (b * c - a * d), 2 * (b * d + a * c)],\n [2 * (b * c + a * d), a * a + c * c - b * b - d * d, 2 * (c * d - a * b\n )], [2 * (b * d - a * c), 2 * (c * d + a * b), a * a + d * d - b * b - \n c * c]]'], {}), '([[a * a + b * b - c * c - d * d, 2 * (b * c - a * d), 2 * (b * d +\n a * c)], [2 * (b * c + a * d), a * a + c * c - b * b - d * d, 2 * (c *\n d - a * b)], [2 * (b * d - a * c), 2 * (c * d + a * b), a * a + d * d -\n b * b - c * c]])\n', (19701, 19940), True, 'import numpy as np\n'), ((4422, 4447), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (4430, 4447), True, 'import numpy as np\n'), ((9529, 9547), 'numpy.ceil', 'np.ceil', (['(L / esize)'], {}), '(L / esize)\n', (9536, 9547), True, 'import numpy as np\n'), ((11063, 11103), 'numpy.linspace', 'np.linspace', (['(0)', 'theta', 'n'], {'endpoint': '(False)'}), '(0, theta, n, endpoint=False)\n', (11074, 11103), True, 'import numpy as np\n'), ((11200, 11211), 'numpy.array', 'np.array', (['R'], {}), '(R)\n', (11208, 11211), True, 'import numpy as np\n'), ((11386, 11398), 'numpy.array', 'np.array', (['dl'], {}), '(dl)\n', (11394, 11398), True, 'import numpy as np\n'), ((12314, 12354), 'numpy.linspace', 'np.linspace', (['(0)', 'theta', 'n'], {'endpoint': '(False)'}), '(0, theta, n, endpoint=False)\n', (12325, 12354), True, 'import numpy as np\n'), ((12531, 12551), 'numpy.linspace', 'np.linspace', (['(0)', 'h', 'n'], {}), '(0, h, n)\n', (12542, 12551), True, 'import numpy as np\n'), ((12560, 12579), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (12568, 12579), True, 'import numpy as np\n'), ((13974, 13997), 'numpy.hstack', 'np.hstack', (['(b1, b2, b3)'], {}), '((b1, b2, b3))\n', (13983, 13997), True, 'import numpy as np\n'), ((15353, 15371), 'numpy.linalg.norm', 'np.linalg.norm', (['v1'], {}), '(v1)\n', (15367, 15371), True, 'import numpy as np\n'), ((15384, 15402), 'numpy.linalg.norm', 'np.linalg.norm', (['v2'], {}), '(v2)\n', (15398, 15402), True, 'import numpy as np\n'), ((15415, 15433), 'numpy.linalg.norm', 'np.linalg.norm', (['v3'], {}), '(v3)\n', (15429, 15433), True, 'import numpy as np\n'), ((15494, 15508), 'numpy.dot', 'np.dot', (['V1', 'V2'], {}), '(V1, V2)\n', (15500, 15508), True, 'import numpy as np\n'), ((15532, 15546), 'numpy.dot', 'np.dot', (['V1', 'V3'], {}), '(V1, V3)\n', (15538, 15546), True, 'import numpy as np\n'), ((18383, 18404), 'numpy.linalg.norm', 'np.linalg.norm', (['new_z'], {}), '(new_z)\n', (18397, 18404), True, 'import numpy as np\n'), ((18425, 18445), 'numpy.dot', 'np.dot', (['old_z', 'new_z'], {}), '(old_z, new_z)\n', (18431, 18445), True, 'import numpy as np\n'), ((19603, 19623), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (19617, 19623), True, 'import numpy as np\n'), ((19667, 19684), 'numpy.sin', 'np.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (19673, 19684), True, 'import numpy as np\n'), ((2055, 2079), 'numpy.array', 'np.array', (['[radius, 0, 0]'], {}), '([radius, 0, 0])\n', (2063, 2079), True, 'import numpy as np\n'), ((2104, 2128), 'numpy.array', 'np.array', (['[0, radius, 0]'], {}), '([0, radius, 0])\n', (2112, 2128), True, 'import numpy as np\n'), ((2153, 2177), 'numpy.array', 'np.array', (['[radius, 0, 0]'], {}), '([radius, 0, 0])\n', (2161, 2177), True, 'import numpy as np\n'), ((5372, 5409), 'numpy.array', 'np.array', (['[-length / 2, width / 2, 0]'], {}), '([-length / 2, width / 2, 0])\n', (5380, 5409), True, 'import numpy as np\n'), ((5430, 5466), 'numpy.array', 'np.array', (['[length / 2, width / 2, 0]'], {}), '([length / 2, width / 2, 0])\n', (5438, 5466), True, 'import numpy as np\n'), ((5487, 5527), 'numpy.array', 'np.array', (['[length / 2 + width / 2, 0, 0]'], {}), '([length / 2 + width / 2, 0, 0])\n', (5495, 5527), True, 'import numpy as np\n'), ((5548, 5585), 'numpy.array', 'np.array', (['[length / 2, -width / 2, 0]'], {}), '([length / 2, -width / 2, 0])\n', (5556, 5585), True, 'import numpy as np\n'), ((5606, 5644), 'numpy.array', 'np.array', (['[-length / 2, -width / 2, 0]'], {}), '([-length / 2, -width / 2, 0])\n', (5614, 5644), True, 'import numpy as np\n'), ((5665, 5706), 'numpy.array', 'np.array', (['[-length / 2 - width / 2, 0, 0]'], {}), '([-length / 2 - width / 2, 0, 0])\n', (5673, 5706), True, 'import numpy as np\n'), ((8118, 8136), 'numpy.vstack', 'np.vstack', (['(R, dR)'], {}), '((R, dR))\n', (8127, 8136), True, 'import numpy as np\n'), ((8154, 8174), 'numpy.vstack', 'np.vstack', (['(dl, ddl)'], {}), '((dl, ddl))\n', (8163, 8174), True, 'import numpy as np\n'), ((8457, 8475), 'numpy.vstack', 'np.vstack', (['(R, dR)'], {}), '((R, dR))\n', (8466, 8475), True, 'import numpy as np\n'), ((8493, 8513), 'numpy.vstack', 'np.vstack', (['(dl, ddl)'], {}), '((dl, ddl))\n', (8502, 8513), True, 'import numpy as np\n'), ((8782, 8800), 'numpy.vstack', 'np.vstack', (['(R, dR)'], {}), '((R, dR))\n', (8791, 8800), True, 'import numpy as np\n'), ((8818, 8838), 'numpy.vstack', 'np.vstack', (['(dl, ddl)'], {}), '((dl, ddl))\n', (8827, 8838), True, 'import numpy as np\n'), ((12457, 12470), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (12463, 12470), True, 'import numpy as np\n'), ((12497, 12510), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (12503, 12510), True, 'import numpy as np\n'), ((12659, 12672), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (12665, 12672), True, 'import numpy as np\n'), ((12677, 12690), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (12683, 12690), True, 'import numpy as np\n'), ((12717, 12730), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (12723, 12730), True, 'import numpy as np\n'), ((12735, 12748), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (12741, 12748), True, 'import numpy as np\n'), ((12820, 12842), 'numpy.array', 'np.array', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (12828, 12842), True, 'import numpy as np\n'), ((13940, 13969), 'numpy.column_stack', 'np.column_stack', (['(p1, p2, p3)'], {}), '((p1, p2, p3))\n', (13955, 13969), True, 'import numpy as np\n'), ((15750, 15766), 'numpy.cross', 'np.cross', (['V1', 'V3'], {}), '(V1, V3)\n', (15758, 15766), True, 'import numpy as np\n'), ((15915, 15931), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (15923, 15931), True, 'import numpy as np\n'), ((18671, 18692), 'numpy.array', 'np.array', (['[1.0, 0, 0]'], {}), '([1.0, 0, 0])\n', (18679, 18692), True, 'import numpy as np\n'), ((18826, 18848), 'numpy.cross', 'np.cross', (['old_z', 'new_z'], {}), '(old_z, new_z)\n', (18834, 18848), True, 'import numpy as np\n'), ((19056, 19070), 'numpy.dot', 'np.dot', (['rot', 'p'], {}), '(rot, p)\n', (19062, 19070), True, 'import numpy as np\n'), ((2226, 2250), 'numpy.array', 'np.array', (['[0, radius, 0]'], {}), '([0, radius, 0])\n', (2234, 2250), True, 'import numpy as np\n'), ((2275, 2299), 'numpy.array', 'np.array', (['[0, 0, radius]'], {}), '([0, 0, radius])\n', (2283, 2299), True, 'import numpy as np\n'), ((2324, 2348), 'numpy.array', 'np.array', (['[0, radius, 0]'], {}), '([0, radius, 0])\n', (2332, 2348), True, 'import numpy as np\n'), ((5751, 5788), 'numpy.array', 'np.array', (['[0, -length / 2, width / 2]'], {}), '([0, -length / 2, width / 2])\n', (5759, 5788), True, 'import numpy as np\n'), ((5809, 5845), 'numpy.array', 'np.array', (['[0, length / 2, width / 2]'], {}), '([0, length / 2, width / 2])\n', (5817, 5845), True, 'import numpy as np\n'), ((5866, 5906), 'numpy.array', 'np.array', (['[0, length / 2 + width / 2, 0]'], {}), '([0, length / 2 + width / 2, 0])\n', (5874, 5906), True, 'import numpy as np\n'), ((5927, 5964), 'numpy.array', 'np.array', (['[0, length / 2, -width / 2]'], {}), '([0, length / 2, -width / 2])\n', (5935, 5964), True, 'import numpy as np\n'), ((5985, 6023), 'numpy.array', 'np.array', (['[0, -length / 2, -width / 2]'], {}), '([0, -length / 2, -width / 2])\n', (5993, 6023), True, 'import numpy as np\n'), ((6044, 6085), 'numpy.array', 'np.array', (['[0, -length / 2 - width / 2, 0]'], {}), '([0, -length / 2 - width / 2, 0])\n', (6052, 6085), True, 'import numpy as np\n'), ((16034, 16050), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (16042, 16050), True, 'import numpy as np\n'), ((16098, 16114), 'numpy.cross', 'np.cross', (['V1', 'V3'], {}), '(V1, V3)\n', (16106, 16114), True, 'import numpy as np\n'), ((16159, 16184), 'numpy.linalg.norm', 'np.linalg.norm', (['(N12 - N13)'], {}), '(N12 - N13)\n', (16173, 16184), True, 'import numpy as np\n'), ((16578, 16594), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (16586, 16594), True, 'import numpy as np\n'), ((16066, 16082), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (16074, 16082), True, 'import numpy as np\n'), ((16130, 16146), 'numpy.cross', 'np.cross', (['V1', 'V3'], {}), '(V1, V3)\n', (16138, 16146), True, 'import numpy as np\n'), ((18759, 18782), 'numpy.dot', 'np.dot', (['rot', '(p - origin)'], {}), '(rot, p - origin)\n', (18765, 18782), True, 'import numpy as np\n'), ((18915, 18938), 'numpy.dot', 'np.dot', (['rot', '(p - origin)'], {}), '(rot, p - origin)\n', (18921, 18938), True, 'import numpy as np\n'), ((16377, 16393), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (16385, 16393), True, 'import numpy as np\n'), ((16450, 16466), 'numpy.cross', 'np.cross', (['V1', 'V2'], {}), '(V1, V2)\n', (16458, 16466), True, 'import numpy as np\n')] |
from setuptools import setup, find_packages, Extension
#######################################
# Prepare list of compiled extensions #
#######################################
extensions = []
# C extension called via ctypes
extensions.append(
Extension(
# "name" defines the location of the compiled module
# within the package tree:
name='pypkgexample.mymodule_c_with_ctypes.hellofcctyp',
# "sources" are the source files to be compiled
sources=[('pypkgexample/mymodule_c_with_ctypes/'
+ '/src/hellofunctions.c')],
include_dirs=[('pypkgexample/mymodule_c_with_ctypes'
+ '/include')],
# Here one can add compilation flags, libraries,
# macro declarations, etc. See setuptools documentation.
)
)
# C extension called via cython
from Cython.Build import cythonize
cython_extensions = [
Extension(
name='pypkgexample.mymodule_c_with_cython.hellofccyth',
sources=[('pypkgexample/mymodule_c_with_cython/'
+ 'hellocython.pyx'),
('pypkgexample/mymodule_c_with_cython/'
+ '/src/hellofunctions.c')],
include_dirs=[('pypkgexample/mymodule_c_with_cython'
+ '/include')],
),
# Other cython extensions can be added here
]
# Cython extensions need to be cythonized before being added to the main
# extension list:
extensions += cythonize(cython_extensions)
# f2py extension
# (to handle f2py extensions we need to replace the setup function and
# the Extension class with their extended version from the numpy package)
from numpy.distutils.core import Extension
from numpy.distutils.core import setup
extensions.append(
Extension(
name='pypkgexample.mymodule_fortran.helloffort',
sources=['pypkgexample/mymodule_fortran/hello_subr.f90'])
)
#########
# Setup #
#########
setup(
name='pypkgexample',
version='1.0.0',
description='Example python package with compiled extensions',
url='https://github.com/giadarol/pypkgexample',
author='<NAME>',
packages=find_packages(),
ext_modules = extensions,
install_requires=[
'numpy>=1.0',
'pytest', # In principle could be made optional
]
)
| [
"Cython.Build.cythonize",
"numpy.distutils.core.Extension",
"setuptools.find_packages"
] | [((1549, 1577), 'Cython.Build.cythonize', 'cythonize', (['cython_extensions'], {}), '(cython_extensions)\n', (1558, 1577), False, 'from Cython.Build import cythonize\n'), ((253, 470), 'numpy.distutils.core.Extension', 'Extension', ([], {'name': '"""pypkgexample.mymodule_c_with_ctypes.hellofcctyp"""', 'sources': "['pypkgexample/mymodule_c_with_ctypes/' + '/src/hellofunctions.c']", 'include_dirs': "['pypkgexample/mymodule_c_with_ctypes' + '/include']"}), "(name='pypkgexample.mymodule_c_with_ctypes.hellofcctyp', sources=[\n 'pypkgexample/mymodule_c_with_ctypes/' + '/src/hellofunctions.c'],\n include_dirs=['pypkgexample/mymodule_c_with_ctypes' + '/include'])\n", (262, 470), False, 'from numpy.distutils.core import Extension\n'), ((969, 1251), 'numpy.distutils.core.Extension', 'Extension', ([], {'name': '"""pypkgexample.mymodule_c_with_cython.hellofccyth"""', 'sources': "['pypkgexample/mymodule_c_with_cython/' + 'hellocython.pyx', \n 'pypkgexample/mymodule_c_with_cython/' + '/src/hellofunctions.c']", 'include_dirs': "['pypkgexample/mymodule_c_with_cython' + '/include']"}), "(name='pypkgexample.mymodule_c_with_cython.hellofccyth', sources=[\n 'pypkgexample/mymodule_c_with_cython/' + 'hellocython.pyx', \n 'pypkgexample/mymodule_c_with_cython/' + '/src/hellofunctions.c'],\n include_dirs=['pypkgexample/mymodule_c_with_cython' + '/include'])\n", (978, 1251), False, 'from numpy.distutils.core import Extension\n'), ((1854, 1975), 'numpy.distutils.core.Extension', 'Extension', ([], {'name': '"""pypkgexample.mymodule_fortran.helloffort"""', 'sources': "['pypkgexample/mymodule_fortran/hello_subr.f90']"}), "(name='pypkgexample.mymodule_fortran.helloffort', sources=[\n 'pypkgexample/mymodule_fortran/hello_subr.f90'])\n", (1863, 1975), False, 'from numpy.distutils.core import Extension\n'), ((2244, 2259), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2257, 2259), False, 'from setuptools import setup, find_packages, Extension\n')] |
'Test of label scope and label exporter'
import numpy as np
from videocore.assembler import qpu, get_label_positions
from videocore.driver import Driver
@qpu
def given_jmp(asm):
mov(ra0, uniform)
mov(r0, 0)
L.entry
jmp(reg=ra0)
nop()
nop()
nop()
iadd(r0, r0, 1)
L.test
iadd(r0, r0, 4)
setup_vpm_write()
mov(vpm, r0)
setup_dma_store(nrows=1)
start_dma_store(uniform)
wait_dma_store()
exit()
def test_given_jump():
lbls = get_label_positions(given_jmp)
entry_pc = 0
test_pc = 0
for lbl, pc in lbls:
if lbl.name == 'entry':
entry_pc = pc
if lbl.name == 'test':
test_pc = pc
with Driver() as drv:
X = drv.alloc((1, 16), 'int32')
X[:] = 1234
drv.execute(
n_threads=1,
program=drv.program(given_jmp),
uniforms=[test_pc-entry_pc-32, X.address]
)
assert np.all(X == 4)
@qpu
def with_namespace(asm):
mov(r0, 0)
with namespace('ns1'):
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
with namespace('nested'):
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
with namespace('ns2'):
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
jmp(L.test)
nop()
nop()
nop()
iadd(r0, r0, 10)
L.test
iadd(r0, r0, 1)
setup_vpm_write()
mov(vpm, r0)
setup_dma_store(nrows=1)
start_dma_store(uniform)
wait_dma_store()
exit()
def test_with_namespace():
with Driver() as drv:
X = drv.alloc((1, 16), 'int32')
X[:] = 1234
drv.execute(
n_threads=1,
program=drv.program(with_namespace),
uniforms=[X.address]
)
assert np.all(X == 4)
| [
"videocore.driver.Driver",
"videocore.assembler.get_label_positions",
"numpy.all"
] | [((497, 527), 'videocore.assembler.get_label_positions', 'get_label_positions', (['given_jmp'], {}), '(given_jmp)\n', (516, 527), False, 'from videocore.assembler import qpu, get_label_positions\n'), ((709, 717), 'videocore.driver.Driver', 'Driver', ([], {}), '()\n', (715, 717), False, 'from videocore.driver import Driver\n'), ((955, 969), 'numpy.all', 'np.all', (['(X == 4)'], {}), '(X == 4)\n', (961, 969), True, 'import numpy as np\n'), ((1781, 1789), 'videocore.driver.Driver', 'Driver', ([], {}), '()\n', (1787, 1789), False, 'from videocore.driver import Driver\n'), ((2011, 2025), 'numpy.all', 'np.all', (['(X == 4)'], {}), '(X == 4)\n', (2017, 2025), True, 'import numpy as np\n')] |
from __future__ import print_function,division
import os,sys,re
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import h5py
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage import median_filter
from scipy.optimize import leastsq, curve_fit
from scipy.signal import lombscargle
from pkg_resources import resource_filename
import logging
import acor
from .utils import peaks_and_lphs
from .findpeaks import peakdetect
class TimeSeries(object):
def __init__(self, t, f, mask=None, cadence=None,
default_maxlag_days=200,
flatten_order=2):
self.t = np.atleast_1d(t)
self.f = np.atleast_1d(f)
if mask is None:
mask = np.isnan(self.f)
self.mask = mask
if cadence is None:
cadence = np.median(self.t[1:]-self.t[:-1])
self.cadence = cadence
self.default_maxlag_days = default_maxlag_days
self.default_maxlag = default_maxlag_days//cadence
self.flatten_order = flatten_order
#set private variables for cached acorr calculation
self._lag = None #always should be in cadences
self._ac = None
self._ac_poly_coeffs = None #polynomial coefficients subtracted out to flatten acorr
#private variables for caching pgram calculation
self._pers = None
self._pgram = None
def acorr(self, maxlag=None, smooth=18, days=True,
recalc=False):
if maxlag is None:
maxlag = self.default_maxlag
#don't recalculate the same thing if not necessary
if self._ac is not None and not recalc:
lag = self._lag
ac = self._ac
else:
x = self.f.copy()
x[self.mask] = 0
#logging.debug('{} nans in x'.format((np.isnan(x)).sum()))
ac = acor.function(x, maxlag)
lag = np.arange(maxlag)
#fit and subtract out quadratic
if self.flatten_order is not None:
c = np.polyfit(lag, ac, self.flatten_order)
ac -= np.polyval(c, lag)
self._ac_poly_coeffs = c
#smooth AC function
ac = gaussian_filter(ac, smooth)
#set private variables for cached calculation
self._ac = ac
self._lag = lag
self._maxlag = maxlag
self._smooth = smooth
if days:
return lag*self.cadence,ac
else:
return lag,ac
def acorr_peaks(self, lookahead=5, days=True,
return_heights=False, **kwargs):
lag, ac = self.acorr(days=days, **kwargs)
return peaks_and_lphs(ac, lag, return_heights=return_heights,
lookahead=lookahead)
def plot_acorr(self, days=True, smooth=18, maxlag=None,
mark_period=False, lookahead=5, fit_npeaks=4,
tol=0.2,
**kwargs):
lag, ac = self.acorr(days=days, smooth=smooth, maxlag=maxlag)
plt.plot(lag, ac, **kwargs)
pks, lphs = self.acorr_peaks(smooth=smooth,
maxlag=maxlag,
lookahead=lookahead)
#plt.ylim(ymax=1)
if mark_period:
if mark_period is True:
mark_period = None
p,e_p,pks,lphs,hts = self.acorr_period_fit(period=mark_period, return_peaks=True,
fit_npeaks=fit_npeaks, tol=tol,
smooth=smooth,
maxlag=maxlag,
lookahead=lookahead)
plt.xlim(xmax=min((fit_npeaks+1)*p, lag.max()))
for pk in pks:
plt.axvline(pk, ls=':')
def acorr_period_fit(self, period=None, fit_npeaks=4,
smooth=18, maxlag=None, lookahead=5,
tol=0.2, return_peaks=False):
peaks, lphs, hts = self.acorr_peaks(smooth=smooth, maxlag=maxlag,
lookahead=lookahead, return_heights=True)
if lphs[0] >= lphs[1]:
firstpeak = peaks[0]
else:
firstpeak = peaks[1]
if lphs[1] < 1.2*lphs[0]:
logging.warning('Second peak (selected) less than 1.2x height of first peak.')
if period is None:
period = firstpeak
if fit_npeaks > len(peaks):
fit_npeaks = len(peaks)
#peaks = peaks[:fit_npeaks]
#identify peaks to use in fit: first 'fit_npeaks' peaks closest to integer
# multiples of period guess
fit_peaks = []
fit_lphs = []
fit_hts = []
last = 0.
#used = np.zeros_like(peaks).astype(bool)
for n in np.arange(fit_npeaks)+1:
#find highest peak within 'tol' of integer multiple (that hasn't been used)
close = (np.absolute(peaks - n*period) < (tol*n*period)) & ((peaks-last) > 0.3*period)
if close.sum()==0:
fit_npeaks = n-1
break
#raise NoPeakError('No peak found near {}*{:.2f}={:.2f} (tol={})'.format(n,period,n*period,tol))
ind = np.argmax(hts[close])
last = peaks[close][ind]
fit_peaks.append(peaks[close][ind])
fit_lphs.append(lphs[close][ind])
fit_hts.append(hts[close][ind])
#used[close][ind] = True
logging.debug('{}: {}, {}'.format(n*period,peaks[close],peaks[close][ind]))
#logging.debug(used)
#ind = np.argmin(np.absolute(peaks - n*period)) #closest peak
#fit_peaks.append(peaks[ind])
#fit_lphs.append(lphs[ind])
logging.debug('fitting peaks: {}'.format(fit_peaks))
if fit_npeaks < 3:
return peaks,-1, fit_peaks, fit_lphs, fit_hts
x = np.arange(fit_npeaks + 1)
y = np.concatenate([np.array([0]),fit_peaks])
#x = np.arange(fit_npeaks) + 1
#y = fit_peaks
def fn(x,a,b):
return a*x + b
fit,cov = curve_fit(fn, x, y, p0=(period,0))
if return_peaks:
return fit[0],cov[0][0],fit_peaks,fit_lphs,fit_hts
else:
return fit[0],cov[0][0]
def periodogram(self,pmin=0.5,pmax=60,npts=2000,
recalc=False):
pers = np.logspace(np.log10(pmin),np.log10(pmax),npts)
if np.array_equal(pers,self._pers) and not recalc:
pgram = self._pgram
else:
freqs = (2*np.pi)/(pers)
t = self.t[~self.mask]
f = self.f[~self.mask]
pgram = lombscargle(t.astype('float64'),
f.astype('float64'),
freqs.astype('float64'))
self._pgram = pgram
self._pers = pers
return pers,pgram
def pgram_peaks(self, npeaks=10, lookahead=5, **kwargs):
pers,pgram = self.periodogram(**kwargs)
maxes,mins = peakdetect(pgram,pers,lookahead=lookahead)
maxes = np.array(maxes)
inds = np.argsort(maxes[:,1])
pks,hts = maxes[inds,0][-npeaks:][::-1],maxes[inds,1][-npeaks:][::-1]
return pks,hts
def save_hdf(self, filename, path=''):
"""Writes data to file, along with acorr and pgram info.
"""
data = pd.DataFrame({'t':self.t,
'f':self.f,
'mask':self.mask})
lag, ac = self.acorr(days=False)
acorr = pd.DataFrame({'lag':lag,
'ac':ac})
pks, lphs = self.acorr_peaks()
acorr_peaks = pd.DataFrame({'lag':pks,
'lph':lphs})
pers,pg = self.periodogram()
pgram = pd.DataFrame({'period':pers,
'pgram':pg})
pks, hts = self.pgram_peaks()
pgram_peaks = pd.DataFrame({'P':pks,
'height':hts})
data.to_hdf(filename,'{}/data'.format(path))
acorr.to_hdf(filename,'{}/acorr'.format(path))
acorr_peaks.to_hdf(filename,'{}/acorr_peaks'.format(path))
pgram.to_hdf(filename,'{}/pgram'.format(path))
pgram_peaks.to_hdf(filename,'{}/pgram_peaks'.format(path))
if hasattr(self,'subseries'):
for name in self.subseries:
self.subseries[name].save_hdf(filename, path=name)
def make_chunks(self, nchunks, chunksize=300, step=100):
tmin, tmax = (self.t.min(), self.t.max())
tlist = [(t, t+chunksize) for t in np.arange(tmin, tmax+step, step)]
logging.debug('(start, stop) tlist: {}'.format(tlist))
self.make_subseries(tlist)
def make_subseries(self, tlist, names=None):
"""Splits up timeseries into chunks, according to tlist
tlist is a list of (tstart,tstop) tuples. If names is provided,
those names will be used; otherwise 'sub1', 'sub2', etc.
"""
if names is None:
names = ['sub{}'.format(i) for i in 1+np.arange(len(tlist))]
self.subseries = {}
for (tlo,thi),name in zip(tlist,names):
tok = (self.t > tlo) & (self.t < thi)
t = self.t[tok]
f = self.f[tok]
mask = self.mask[tok]
self.subseries[name] = TimeSeries(t, f, mask=mask,
default_maxlag_days=self.default_maxlag_days)
@classmethod
def load_hdf(cls, filename, path=''):
data = pd.read_hdf(filename, '{}/data'.format(path))
t = np.array(data['t'])
f = np.array(data['f'])
mask = np.array(data['mask'])
new = cls(t,f,mask=mask)
acorr = pd.read_hdf(filename, '{}/acorr'.format(path))
new._lag = np.array(acorr['lag'])
new._ac = np.array(acorr['ac'])
pgram = pd.read_hdf(filename, '{}/pgram'.format(path))
new._pers = np.array(pgram['period'])
new._pgram = np.array(pgram['pgram'])
#store.close()
i=1
has_sub = True
new.subseries = {}
while has_sub:
try:
name = 'sub{}'.format(i)
new.subseries[name] = cls.load_hdf(filename, path='{}/{}'.format(path,name))
except KeyError:
has_sub = False
i += 1
return new
class NoPeakError(Exception):
pass
| [
"numpy.log10",
"scipy.ndimage.filters.gaussian_filter",
"numpy.polyfit",
"acor.function",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.polyval",
"pandas.DataFrame",
"logging.warning",
"numpy.argmax",
"numpy.isnan",
"numpy.atleast_1d",
"scipy.optimize.c... | [((641, 657), 'numpy.atleast_1d', 'np.atleast_1d', (['t'], {}), '(t)\n', (654, 657), True, 'import numpy as np\n'), ((675, 691), 'numpy.atleast_1d', 'np.atleast_1d', (['f'], {}), '(f)\n', (688, 691), True, 'import numpy as np\n'), ((3081, 3108), 'matplotlib.pyplot.plot', 'plt.plot', (['lag', 'ac'], {}), '(lag, ac, **kwargs)\n', (3089, 3108), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6040), 'numpy.arange', 'np.arange', (['(fit_npeaks + 1)'], {}), '(fit_npeaks + 1)\n', (6024, 6040), True, 'import numpy as np\n'), ((6228, 6263), 'scipy.optimize.curve_fit', 'curve_fit', (['fn', 'x', 'y'], {'p0': '(period, 0)'}), '(fn, x, y, p0=(period, 0))\n', (6237, 6263), False, 'from scipy.optimize import leastsq, curve_fit\n'), ((7231, 7246), 'numpy.array', 'np.array', (['maxes'], {}), '(maxes)\n', (7239, 7246), True, 'import numpy as np\n'), ((7262, 7285), 'numpy.argsort', 'np.argsort', (['maxes[:, 1]'], {}), '(maxes[:, 1])\n', (7272, 7285), True, 'import numpy as np\n'), ((7523, 7582), 'pandas.DataFrame', 'pd.DataFrame', (["{'t': self.t, 'f': self.f, 'mask': self.mask}"], {}), "({'t': self.t, 'f': self.f, 'mask': self.mask})\n", (7535, 7582), True, 'import pandas as pd\n'), ((7695, 7731), 'pandas.DataFrame', 'pd.DataFrame', (["{'lag': lag, 'ac': ac}"], {}), "({'lag': lag, 'ac': ac})\n", (7707, 7731), True, 'import pandas as pd\n'), ((7822, 7861), 'pandas.DataFrame', 'pd.DataFrame', (["{'lag': pks, 'lph': lphs}"], {}), "({'lag': pks, 'lph': lphs})\n", (7834, 7861), True, 'import pandas as pd\n'), ((7950, 7993), 'pandas.DataFrame', 'pd.DataFrame', (["{'period': pers, 'pgram': pg}"], {}), "({'period': pers, 'pgram': pg})\n", (7962, 7993), True, 'import pandas as pd\n'), ((8088, 8127), 'pandas.DataFrame', 'pd.DataFrame', (["{'P': pks, 'height': hts}"], {}), "({'P': pks, 'height': hts})\n", (8100, 8127), True, 'import pandas as pd\n'), ((9796, 9815), 'numpy.array', 'np.array', (["data['t']"], {}), "(data['t'])\n", (9804, 9815), True, 'import numpy as np\n'), ((9828, 9847), 'numpy.array', 'np.array', (["data['f']"], {}), "(data['f'])\n", (9836, 9847), True, 'import numpy as np\n'), ((9863, 9885), 'numpy.array', 'np.array', (["data['mask']"], {}), "(data['mask'])\n", (9871, 9885), True, 'import numpy as np\n'), ((10003, 10025), 'numpy.array', 'np.array', (["acorr['lag']"], {}), "(acorr['lag'])\n", (10011, 10025), True, 'import numpy as np\n'), ((10044, 10065), 'numpy.array', 'np.array', (["acorr['ac']"], {}), "(acorr['ac'])\n", (10052, 10065), True, 'import numpy as np\n'), ((10150, 10175), 'numpy.array', 'np.array', (["pgram['period']"], {}), "(pgram['period'])\n", (10158, 10175), True, 'import numpy as np\n'), ((10197, 10221), 'numpy.array', 'np.array', (["pgram['pgram']"], {}), "(pgram['pgram'])\n", (10205, 10221), True, 'import numpy as np\n'), ((736, 752), 'numpy.isnan', 'np.isnan', (['self.f'], {}), '(self.f)\n', (744, 752), True, 'import numpy as np\n'), ((829, 864), 'numpy.median', 'np.median', (['(self.t[1:] - self.t[:-1])'], {}), '(self.t[1:] - self.t[:-1])\n', (838, 864), True, 'import numpy as np\n'), ((1882, 1906), 'acor.function', 'acor.function', (['x', 'maxlag'], {}), '(x, maxlag)\n', (1895, 1906), False, 'import acor\n'), ((1925, 1942), 'numpy.arange', 'np.arange', (['maxlag'], {}), '(maxlag)\n', (1934, 1942), True, 'import numpy as np\n'), ((2227, 2254), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['ac', 'smooth'], {}), '(ac, smooth)\n', (2242, 2254), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((4912, 4933), 'numpy.arange', 'np.arange', (['fit_npeaks'], {}), '(fit_npeaks)\n', (4921, 4933), True, 'import numpy as np\n'), ((5341, 5362), 'numpy.argmax', 'np.argmax', (['hts[close]'], {}), '(hts[close])\n', (5350, 5362), True, 'import numpy as np\n'), ((6520, 6534), 'numpy.log10', 'np.log10', (['pmin'], {}), '(pmin)\n', (6528, 6534), True, 'import numpy as np\n'), ((6535, 6549), 'numpy.log10', 'np.log10', (['pmax'], {}), '(pmax)\n', (6543, 6549), True, 'import numpy as np\n'), ((6567, 6599), 'numpy.array_equal', 'np.array_equal', (['pers', 'self._pers'], {}), '(pers, self._pers)\n', (6581, 6599), True, 'import numpy as np\n'), ((2055, 2094), 'numpy.polyfit', 'np.polyfit', (['lag', 'ac', 'self.flatten_order'], {}), '(lag, ac, self.flatten_order)\n', (2065, 2094), True, 'import numpy as np\n'), ((2117, 2135), 'numpy.polyval', 'np.polyval', (['c', 'lag'], {}), '(c, lag)\n', (2127, 2135), True, 'import numpy as np\n'), ((3871, 3894), 'matplotlib.pyplot.axvline', 'plt.axvline', (['pk'], {'ls': '""":"""'}), "(pk, ls=':')\n", (3882, 3894), True, 'import matplotlib.pyplot as plt\n'), ((4385, 4463), 'logging.warning', 'logging.warning', (['"""Second peak (selected) less than 1.2x height of first peak."""'], {}), "('Second peak (selected) less than 1.2x height of first peak.')\n", (4400, 4463), False, 'import logging\n'), ((6069, 6082), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (6077, 6082), True, 'import numpy as np\n'), ((8769, 8803), 'numpy.arange', 'np.arange', (['tmin', '(tmax + step)', 'step'], {}), '(tmin, tmax + step, step)\n', (8778, 8803), True, 'import numpy as np\n'), ((5046, 5077), 'numpy.absolute', 'np.absolute', (['(peaks - n * period)'], {}), '(peaks - n * period)\n', (5057, 5077), True, 'import numpy as np\n')] |
import time
import logging
import numpy as np
from supervised.callbacks.callback import Callback
from supervised.exceptions import AutoMLException
from supervised.utils.config import LOG_LEVEL
log = logging.getLogger(__name__)
log.setLevel(LOG_LEVEL)
class TotalTimeConstraint(Callback):
def __init__(self, params={}):
super(TotalTimeConstraint, self).__init__(params)
self.name = params.get("name", "total_time_constraint")
self.total_time_limit = params.get("total_time_limit")
self.total_time_start = params.get("total_time_start")
self.expected_learners_cnt = params.get("expected_learners_cnt", 1)
def on_learner_train_start(self, logs):
self.train_start_time = time.time()
def on_learner_train_end(self, logs):
if (
self.total_time_limit is not None
and len(self.learners) == 1
and self.expected_learners_cnt > 1
# just check for the first learner
# need to have more than 1 learner
# otherwise it is a finish of the training
):
one_fold_time = time.time() - self.train_start_time
estimate_all_folds = one_fold_time * self.expected_learners_cnt
total_elapsed_time = np.round(time.time() - self.total_time_start, 2)
# we need to add time for the rest of learners (assuming that all folds training time is the same)
estimate_elapsed_time = total_elapsed_time + one_fold_time * (
self.expected_learners_cnt - 1
)
if estimate_elapsed_time >= self.total_time_limit:
raise AutoMLException(
"Stop training after the first fold. "
f"Time needed to train on the first fold {np.round(one_fold_time)} seconds. "
"The time estimate for training on all folds is larger than total_time_limit."
)
if (
self.total_time_limit is not None
and len(self.learners) < self.expected_learners_cnt
# dont stop for last learner, we are finishing anyway
):
total_elapsed_time = np.round(time.time() - self.total_time_start, 2)
if total_elapsed_time > self.total_time_limit + 600:
# add 10 minutes of margin
# margin is added because of unexpected time changes
# if training on each fold will be the same
# then the training will be stopped after first fold (above condition)
raise AutoMLException(
"Force to stop the training. "
"Total time for AutoML training already exceeded."
)
def on_iteration_end(self, logs, predictions):
total_elapsed_time = np.round(time.time() - self.total_time_start, 2)
if self.total_time_limit is not None:
log.debug(
f"Total elapsed time {total_elapsed_time} seconds. "
+ f"Time left {np.round(self.total_time_limit - total_elapsed_time, 2)} seconds."
)
# not time left, stop now
if total_elapsed_time >= self.total_time_limit:
self.learner.stop_training = True
else:
log.debug(f"Total elapsed time {total_elapsed_time} seconds")
| [
"logging.getLogger",
"time.time",
"numpy.round",
"supervised.exceptions.AutoMLException"
] | [((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n'), ((727, 738), 'time.time', 'time.time', ([], {}), '()\n', (736, 738), False, 'import time\n'), ((1116, 1127), 'time.time', 'time.time', ([], {}), '()\n', (1125, 1127), False, 'import time\n'), ((2565, 2670), 'supervised.exceptions.AutoMLException', 'AutoMLException', (['"""Force to stop the training. Total time for AutoML training already exceeded."""'], {}), "(\n 'Force to stop the training. Total time for AutoML training already exceeded.'\n )\n", (2580, 2670), False, 'from supervised.exceptions import AutoMLException\n'), ((2813, 2824), 'time.time', 'time.time', ([], {}), '()\n', (2822, 2824), False, 'import time\n'), ((1271, 1282), 'time.time', 'time.time', ([], {}), '()\n', (1280, 1282), False, 'import time\n'), ((2178, 2189), 'time.time', 'time.time', ([], {}), '()\n', (2187, 2189), False, 'import time\n'), ((1783, 1806), 'numpy.round', 'np.round', (['one_fold_time'], {}), '(one_fold_time)\n', (1791, 1806), True, 'import numpy as np\n'), ((3023, 3078), 'numpy.round', 'np.round', (['(self.total_time_limit - total_elapsed_time)', '(2)'], {}), '(self.total_time_limit - total_elapsed_time, 2)\n', (3031, 3078), True, 'import numpy as np\n')] |
import numbers
import warnings
from typing import Union
from copy import deepcopy
from abc import ABC, abstractmethod
import torch
import numpy as np
import SimpleITK as sitk
from .. import TypeData, INTENSITY, DATA
from ..data.image import Image
from ..data.subject import Subject
from ..data.dataset import ImagesDataset
from ..utils import nib_to_sitk, sitk_to_nib
from .interpolation import Interpolation
class Transform(ABC):
"""Abstract class for all TorchIO transforms.
All classes used to transform a sample from an
:py:class:`~torchio.ImagesDataset` should subclass it.
All subclasses should overwrite
:py:meth:`torchio.tranforms.Transform.apply_transform`,
which takes a sample, applies some transformation and returns the result.
Args:
p: Probability that this transform will be applied.
"""
def __init__(self, p: float = 1):
self.probability = self.parse_probability(p)
def __call__(self, data: Union[Subject, torch.Tensor, np.ndarray]):
"""Transform a sample and return the result.
Args:
data: Instance of :py:class:`~torchio.Subject`, 4D
:py:class:`torch.Tensor` or 4D NumPy array with dimensions
:math:`(C, D, H, W)`, where :math:`C` is the number of channels
and :math:`D, H, W` are the spatial dimensions. If the input is
a tensor, the affine matrix is an identity and a tensor will be
also returned.
"""
if torch.rand(1).item() > self.probability:
return data
if isinstance(data, (np.ndarray, torch.Tensor)):
is_array = isinstance(data, np.ndarray)
is_tensor = True
sample = self.parse_tensor(data)
else:
is_tensor = is_array = False
sample = data
self.parse_sample(sample)
# If the input is a tensor, it will be deepcopied when calling
# ImagesDataset.__getitem__
if not is_tensor:
sample = deepcopy(sample)
with np.errstate(all='raise'):
transformed = self.apply_transform(sample)
if is_tensor:
num_channels = len(data)
images = [
transformed[f'channel_{i}'][DATA]
for i in range(num_channels)
]
transformed = torch.cat(images)
if is_array:
transformed = transformed.numpy()
return transformed
@abstractmethod
def apply_transform(self, sample: Subject):
raise NotImplementedError
@staticmethod
def parse_probability(probability: float) -> float:
is_number = isinstance(probability, numbers.Number)
if not (is_number and 0 <= probability <= 1):
message = (
'Probability must be a number in [0, 1],'
f' not {probability}'
)
raise ValueError(message)
return probability
@staticmethod
def parse_sample(sample: Subject) -> None:
if not isinstance(sample, Subject):
message = (
'Input to a transform must be a PyTorch tensor or an instance'
' of torchio.Subject generated by a torchio.ImagesDataset,'
f' not "{type(sample)}"'
)
raise RuntimeError(message)
def parse_tensor(self, data: TypeData) -> Subject:
if isinstance(data, np.ndarray):
tensor = torch.from_numpy(data)
else:
tensor = data
tensor = tensor.float() # does nothing if already float
num_dimensions = tensor.dim()
if num_dimensions != 4:
message = (
'The input tensor must have 4 dimensions (channels, i, j, k),'
f' but has {num_dimensions}: {tensor.shape}'
)
raise RuntimeError(message)
return self._get_subject_from_tensor(tensor)
@staticmethod
def parse_interpolation(interpolation: str) -> Interpolation:
if isinstance(interpolation, Interpolation):
message = (
'Interpolation of type torchio.Interpolation'
' is deprecated, please use a string instead'
)
warnings.warn(message, FutureWarning)
elif isinstance(interpolation, str):
interpolation = interpolation.lower()
supported_values = [key.name.lower() for key in Interpolation]
if interpolation in supported_values:
interpolation = getattr(Interpolation, interpolation.upper())
else:
message = (
f'Interpolation "{interpolation}" is not among'
f' the supported values: {supported_values}'
)
raise AttributeError(message)
else:
message = (
'image_interpolation must be a string,'
f' not {type(interpolation)}'
)
raise TypeError(message)
return interpolation
@staticmethod
def _get_subject_from_tensor(tensor: torch.Tensor) -> Subject:
subject_dict = {}
for channel_index, channel_tensor in enumerate(tensor):
name = f'channel_{channel_index}'
image = Image(tensor=channel_tensor, type=INTENSITY)
subject_dict[name] = image
subject = Subject(subject_dict)
dataset = ImagesDataset([subject])
sample = dataset[0]
return sample
@staticmethod
def nib_to_sitk(data: TypeData, affine: TypeData):
return nib_to_sitk(data, affine)
@staticmethod
def sitk_to_nib(image: sitk.Image):
return sitk_to_nib(image)
@property
def name(self):
return self.__class__.__name__
| [
"torch.rand",
"torch.from_numpy",
"numpy.errstate",
"copy.deepcopy",
"warnings.warn",
"torch.cat"
] | [((2031, 2047), 'copy.deepcopy', 'deepcopy', (['sample'], {}), '(sample)\n', (2039, 2047), False, 'from copy import deepcopy\n'), ((2062, 2086), 'numpy.errstate', 'np.errstate', ([], {'all': '"""raise"""'}), "(all='raise')\n", (2073, 2086), True, 'import numpy as np\n'), ((2361, 2378), 'torch.cat', 'torch.cat', (['images'], {}), '(images)\n', (2370, 2378), False, 'import torch\n'), ((3466, 3488), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (3482, 3488), False, 'import torch\n'), ((4247, 4284), 'warnings.warn', 'warnings.warn', (['message', 'FutureWarning'], {}), '(message, FutureWarning)\n', (4260, 4284), False, 'import warnings\n'), ((1513, 1526), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (1523, 1526), False, 'import torch\n')] |
""" Implements DEIMOS-specific functions, including reading in slitmask design files.
"""
import glob
import re
import os
import numpy as np
import warnings
from scipy import interpolate
from astropy.io import fits
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import parse
from pypeit.core import framematch
from pypeit.par import pypeitpar
from pypeit.spectrographs import spectrograph
from pypeit.utils import index_of_x_eq_y
from pypeit.spectrographs.slitmask import SlitMask
from pypeit.spectrographs.opticalmodel import ReflectionGrating, OpticalModel, DetectorMap
from IPython import embed
class KeckDEIMOSSpectrograph(spectrograph.Spectrograph):
"""
Child to handle Keck/DEIMOS specific code
"""
def __init__(self):
# Get it started
super(KeckDEIMOSSpectrograph, self).__init__()
self.spectrograph = 'keck_deimos'
self.telescope = telescopes.KeckTelescopePar()
self.camera = 'DEIMOS'
self.detector = [
# Detector 1
pypeitpar.DetectorPar(
dataext = 1,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.19,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.226,
ronoise = 2.570,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_01'
),
# Detector 2
pypeitpar.DetectorPar(
dataext = 2,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.46,
saturation = 65535.,
nonlinear = 0.95,
numamplifiers = 1,
gain = 1.188,
ronoise = 2.491,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_02'
),
# Detector 3
pypeitpar.DetectorPar(
dataext = 3,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.03,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.248,
ronoise = 2.618,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_03'
),
# Detector 4
pypeitpar.DetectorPar(
dataext = 4,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.80,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.220,
ronoise = 2.557,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_04'
),
# Detector 5
pypeitpar.DetectorPar(
dataext = 5,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.71,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.184,
ronoise = 2.482,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_05'
),
# Detector 6
pypeitpar.DetectorPar(
dataext = 6,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 4.28,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.177,
ronoise = 2.469,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_06'
),
# Detector 7
pypeitpar.DetectorPar(
dataext = 7,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.33,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.201,
ronoise = 2.518,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_07'),
# Detector 8
pypeitpar.DetectorPar(
dataext = 8,
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
platescale = 0.1185,
darkcurr = 3.69,
saturation = 65535.,
nonlinear = 0.95, # Changed by JFH from 0.86 to 0.95
numamplifiers = 1,
gain = 1.230,
ronoise = 2.580,
datasec = '', # These are provided by read_deimos
oscansec = '',
suffix = '_08'
)]
self.numhead = 9
# Uses default timeunit
# Uses default primary_hdrext
# self.sky_file ?
# Don't instantiate these until they're needed
self.grating = None
self.optical_model = None
self.detector_map = None
# TODO: I think all of the default_pypeit_par methods should be
# static. nonlinear_counts shouldn't need to be a parameter because
# it's held by the spectrograph class, right?
def default_pypeit_par(self):
"""
Set default parameters for Keck DEIMOS reductions.
"""
par = pypeitpar.PypeItPar()
par['rdx']['spectrograph'] = 'keck_deimos'
par['flexure']['method'] = 'boxcar'
# Set wave tilts order
par['calibrations']['slitedges']['edge_thresh'] = 50.
par['calibrations']['slitedges']['fit_order'] = 3
par['calibrations']['slitedges']['minimum_slit_gap'] = 0.25
par['calibrations']['slitedges']['minimum_slit_length'] = 4.
par['calibrations']['slitedges']['sync_clip'] = False
# 1D wavelength solution
par['calibrations']['wavelengths']['lamps'] = ['ArI','NeI','KrI','XeI']
par['calibrations']['wavelengths']['nonlinear_counts'] \
= self.detector[0]['nonlinear'] * self.detector[0]['saturation']
par['calibrations']['wavelengths']['n_first'] = 3
par['calibrations']['wavelengths']['match_toler'] = 2.5
# Alter the method used to combine pixel flats
par['calibrations']['pixelflatframe']['process']['combine'] = 'median'
par['calibrations']['pixelflatframe']['process']['sig_lohi'] = [10.,10.]
# Set the default exposure time ranges for the frame typing
par['calibrations']['biasframe']['exprng'] = [None, 2]
par['calibrations']['darkframe']['exprng'] = [999999, None] # No dark frames
par['calibrations']['pinholeframe']['exprng'] = [999999, None] # No pinhole frames
par['calibrations']['pixelflatframe']['exprng'] = [None, 30]
par['calibrations']['traceframe']['exprng'] = [None, 30]
par['scienceframe']['exprng'] = [30, None]
# LACosmics parameters
par['scienceframe']['process']['sigclip'] = 4.0
par['scienceframe']['process']['objlim'] = 1.5
return par
def config_specific_par(self, scifile, inp_par=None):
"""
Modify the PypeIt parameters to hard-wired values used for
specific instrument configurations.
.. todo::
Document the changes made!
Args:
scifile (str):
File to use when determining the configuration and how
to adjust the input parameters.
inp_par (:class:`pypeit.par.parset.ParSet`, optional):
Parameter set used for the full run of PypeIt. If None,
use :func:`default_pypeit_par`.
Returns:
:class:`pypeit.par.parset.ParSet`: The PypeIt paramter set
adjusted for configuration specific parameter values.
"""
par = self.default_pypeit_par() if inp_par is None else inp_par
headarr = self.get_headarr(scifile)
# Turn PCA off for long slits
# TODO: I'm a bit worried that this won't catch all
# long-slits...
if ('Long' in self.get_meta_value(headarr, 'decker')) or (
'LVMslit' in self.get_meta_value(headarr, 'decker')):
par['calibrations']['slitedges']['sync_predict'] = 'nearest'
# Templates
if self.get_meta_value(headarr, 'dispname') == '600ZD':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_deimos_600.fits'
par['calibrations']['wavelengths']['lamps'] += ['CdI', 'ZnI', 'HgI']
elif self.get_meta_value(headarr, 'dispname') == '830G':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_deimos_830G.fits'
elif self.get_meta_value(headarr, 'dispname') == '1200G':
par['calibrations']['wavelengths']['method'] = 'full_template'
par['calibrations']['wavelengths']['reid_arxiv'] = 'keck_deimos_1200G.fits'
# FWHM
binning = parse.parse_binning(self.get_meta_value(headarr, 'binning'))
par['calibrations']['wavelengths']['fwhm'] = 6.0 / binning[1]
# Return
return par
def init_meta(self):
"""
Generate the meta data dict
Note that the children can add to this
Returns:
self.meta: dict (generated in place)
"""
meta = {}
# Required (core)
meta['ra'] = dict(ext=0, card='RA')
meta['dec'] = dict(ext=0, card='DEC')
meta['target'] = dict(ext=0, card='TARGNAME')
meta['decker'] = dict(ext=0, card='SLMSKNAM')
meta['binning'] = dict(card=None, compound=True)
meta['mjd'] = dict(ext=0, card='MJD-OBS')
meta['exptime'] = dict(ext=0, card='ELAPTIME')
meta['airmass'] = dict(ext=0, card='AIRMASS')
meta['dispname'] = dict(ext=0, card='GRATENAM')
# Extras for config and frametyping
meta['hatch'] = dict(ext=0, card='HATCHPOS')
meta['dispangle'] = dict(card=None, compound=True, rtol=1e-5)
# Image type
meta['idname'] = dict(ext=0, card='OBSTYPE')
# Lamps
meta['lampstat01'] = dict(ext=0, card='LAMPS')
# Ingest
self.meta = meta
def compound_meta(self, headarr, meta_key):
"""
Args:
headarr: list
meta_key: str
Returns:
value
"""
if meta_key == 'binning':
binspatial, binspec = parse.parse_binning(headarr[0]['BINNING'])
binning = parse.binning2string(binspec, binspatial)
return binning
elif meta_key == 'dispangle':
if headarr[0]['GRATEPOS'] == 3:
return headarr[0]['G3TLTWAV']
elif headarr[0]['GRATEPOS'] == 4:
return headarr[0]['G4TLTWAV']
else:
msgs.warn('This is probably a problem. Non-standard DEIMOS GRATEPOS={0}.'.format(headarr[0]['GRATEPOS']))
else:
msgs.error("Not ready for this compound meta")
def configuration_keys(self):
"""
Return the metadata keys that defines a unique instrument
configuration.
This list is used by :class:`pypeit.metadata.PypeItMetaData` to
identify the unique configurations among the list of frames read
for a given reduction.
Returns:
list: List of keywords of data pulled from meta
"""
return ['dispname', 'decker', 'binning', 'dispangle']
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
"""
good_exp = framematch.check_frame_exptime(fitstbl['exptime'], exprng)
if ftype == 'science':
#return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['hatch'] == 'open')
return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['hatch'] == 'open')
if ftype == 'bias':
return good_exp & (fitstbl['lampstat01'] == 'Off') & (fitstbl['hatch'] == 'closed')
if ftype in ['pixelflat', 'trace']:
# Flats and trace frames are typed together
return good_exp & (fitstbl['idname'] == 'IntFlat') & (fitstbl['hatch'] == 'closed')
if ftype in ['pinhole', 'dark']:
# Don't type pinhole or dark frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['arc', 'tilt']:
return good_exp & (fitstbl['idname'] == 'Line') & (fitstbl['hatch'] == 'closed')
msgs.warn('Cannot determine if frames are of type {0}.'.format(ftype))
return np.zeros(len(fitstbl), dtype=bool)
# TODO: We should aim to get rid of this...
def idname(self, ftype):
"""
Return the `idname` for the selected frame type for this instrument.
Args:
ftype (str):
File type, which should be one of the keys in
:class:`pypeit.core.framematch.FrameTypeBitMask`.
Returns:
str: The value of `idname` that should be available in the
`PypeItMetaData` instance that identifies frames of this
type.
"""
# TODO: Fill in the rest of these.
name = { 'arc': 'Line',
'tilt': None,
'bias': None,
'dark': None,
'pinhole': None,
'pixelflat': 'IntFlat',
'science': 'Object',
'standard': None,
'trace': 'IntFlat' }
return name[ftype]
def get_rawimage(self, raw_file, det):
"""
Read a raw DEIMOS data frame (one or more detectors).
Data are unpacked from the multi-extension HDU. Function is
based :func:`pypeit.spectrographs.keck_lris.read_lris`, which
was based on the IDL procedure ``readmhdufits.pro``.
Parameters
----------
raw_file : str
Filename
Returns
-------
array : ndarray
Combined image
hdu: HDUList
sections : tuple
List of datasec, oscansec sections
"""
# Check for file; allow for extra .gz, etc. suffix
fil = glob.glob(raw_file + '*')
if len(fil) != 1:
msgs.error('Found {0} files matching {1}'.format(len(fil), raw_file + '*'))
# Read
try:
msgs.info("Reading DEIMOS file: {:s}".format(fil[0]))
except AttributeError:
print("Reading DEIMOS file: {:s}".format(fil[0]))
hdu = fits.open(fil[0])
head0 = hdu[0].header
# Get post, pre-pix values
postpix = head0['POSTPIX']
detlsize = head0['DETLSIZE']
x0, x_npix, y0, y_npix = np.array(parse.load_sections(detlsize)).flatten()
# Create final image
if det is None:
image = np.zeros((x_npix, y_npix + 4 * postpix))
rawdatasec_img = np.zeros_like(image, dtype=int)
oscansec_img = np.zeros_like(image, dtype=int)
# get the x and y binning factors...
binning = head0['BINNING']
if binning != '1,1':
msgs.error("This binning for DEIMOS might not work. But it might..")
# DEIMOS detectors
nchip = 8
if det is None:
chips = range(nchip)
else:
chips = [det - 1] # Indexing starts at 0 here
# Loop
for tt in chips:
data, oscan = deimos_read_1chip(hdu, tt + 1)
# One detector??
if det is not None:
image = np.zeros((data.shape[0], data.shape[1] + oscan.shape[1]))
rawdatasec_img = np.zeros_like(image, dtype=int)
oscansec_img = np.zeros_like(image, dtype=int)
# Indexing
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)
# Fill
image[y1:y2, x1:x2] = data
rawdatasec_img[y1:y2, x1:x2] = 1 # Amp
image[o_y1:o_y2, o_x1:o_x2] = oscan
oscansec_img[o_y1:o_y2, o_x1:o_x2] = 1 # Amp
# Return
exptime = hdu[self.meta['exptime']['ext']].header[self.meta['exptime']['card']]
return image, hdu, exptime, rawdatasec_img, oscansec_img
#return image, hdu, (dsec, osec)
'''
def load_raw_frame(self, raw_file, det=None):
"""
Wrapper to the raw image reader for DEIMOS
Args:
raw_file: str, filename
det: int, REQUIRED
Desired detector
**null_kwargs:
Captured and never used
Returns:
raw_img: ndarray
Raw image; likely unsigned int
head0: Header
"""
raw_img, hdu, _ = read_deimos(raw_file, det=det)
return raw_img, hdu
'''
'''
def get_image_section(self, inp=None, det=1, section='datasec'):
"""
Return a string representation of a slice defining a section of
the detector image.
Overwrites base class function
Args:
inp (:obj:`str`, `astropy.io.fits.Header`_, optional):
String providing the file name to read, or the relevant
header object. Default is None, meaning that the
detector attribute must provide the image section
itself, not the header keyword.
det (:obj:`int`, optional):
1-indexed detector number.
section (:obj:`str`, optional):
The section to return. Should be either 'datasec' or
'oscansec', according to the
:class:`pypeitpar.DetectorPar` keywords.
Returns:
tuple: Returns three objects: (1) A list of string
representations for the image sections, one string per
amplifier. The sections are *always* returned in PypeIt
order: spectral then spatial. (2) Boolean indicating if the
slices are one indexed. (3) Boolean indicating if the
slices should include the last pixel. The latter two are
always returned as True following the FITS convention.
"""
# Read the file
if inp is None:
msgs.error('Must provide Keck DEIMOS file or hdulist to get image section.')
# Read em
shape, datasec, oscansec, _ = deimos_image_sections(inp, det)
if section == 'datasec':
return datasec, False, False
elif section == 'oscansec':
return oscansec, False, False
else:
raise ValueError('Unrecognized keyword: {0}'.format(section))
def get_raw_image_shape(self, hdulist, det=None, **null_kwargs):
"""
Overrides :class:`Spectrograph.get_image_shape` for LRIS images.
Must always provide a file.
"""
# Do it
self._check_detector()
shape, datasec, oscansec, _ = deimos_image_sections(hdulist, det)
self.naxis = shape
return self.naxis
'''
def bpm(self, filename, det, shape=None):
"""
Override parent bpm function with BPM specific to DEIMOS.
.. todo::
Allow for binning changes.
Parameters
----------
det : int, REQUIRED
**null_kwargs:
Captured and never used
Returns
-------
bpix : ndarray
0 = ok; 1 = Mask
"""
bpm_img = self.empty_bpm(filename, det, shape=shape)
if det == 1:
bpm_img[:,1052:1054] = 1
elif det == 2:
bpm_img[:,0:4] = 1
bpm_img[:,376:381] = 1
bpm_img[:,489] = 1
bpm_img[:,1333:1335] = 1
bpm_img[:,2047] = 1
elif det == 3:
bpm_img[:,0:4] = 1
bpm_img[:,221] = 1
bpm_img[:,260] = 1
bpm_img[:,366] = 1
bpm_img[:,816:819] = 1
bpm_img[:,851] = 1
bpm_img[:,940] = 1
bpm_img[:,1167] = 1
bpm_img[:,1280] = 1
bpm_img[:,1301:1303] = 1
bpm_img[:,1744:1747] = 1
bpm_img[:,-4:] = 1
elif det == 4:
bpm_img[:,0:4] = 1
bpm_img[:,47] = 1
bpm_img[:,744] = 1
bpm_img[:,790:792] = 1
bpm_img[:,997:999] = 1
elif det == 5:
bpm_img[:,25:27] = 1
bpm_img[:,128:130] = 1
bpm_img[:,1535:1539] = 1
elif det == 7:
bpm_img[:,426:428] = 1
bpm_img[:,676] = 1
bpm_img[:,1176:1178] = 1
elif det == 8:
bpm_img[:,440] = 1
bpm_img[:,509:513] = 1
bpm_img[:,806] = 1
bpm_img[:,931:934] = 1
return bpm_img
def get_slitmask(self, filename):
"""
Parse the slitmask data from a DEIMOS file into a
:class:`pypeit.spectrographs.slitmask.SlitMask` object.
Args:
filename (:obj:`str`):
Name of the file to read.
"""
# Open the file
hdu = fits.open(filename)
# Build the object data
# - Find the index of the object IDs in the slit-object
# mapping that match the object catalog
mapid = hdu['SlitObjMap'].data['ObjectID']
catid = hdu['ObjectCat'].data['ObjectID']
indx = index_of_x_eq_y(mapid, catid)
# - Pull out the slit ID, object ID, and object coordinates
objects = np.array([hdu['SlitObjMap'].data['dSlitId'][indx].astype(float),
catid.astype(float), hdu['ObjectCat'].data['RA_OBJ'],
hdu['ObjectCat'].data['DEC_OBJ']]).T
# - Only keep the objects that are in the slit-object mapping
objects = objects[mapid[indx] == catid]
# Match the slit IDs in DesiSlits to those in BluSlits
indx = index_of_x_eq_y(hdu['DesiSlits'].data['dSlitId'], hdu['BluSlits'].data['dSlitId'],
strict=True)
# Instantiate the slit mask object and return it
self.slitmask = SlitMask(np.array([hdu['BluSlits'].data['slitX1'],
hdu['BluSlits'].data['slitY1'],
hdu['BluSlits'].data['slitX2'],
hdu['BluSlits'].data['slitY2'],
hdu['BluSlits'].data['slitX3'],
hdu['BluSlits'].data['slitY3'],
hdu['BluSlits'].data['slitX4'],
hdu['BluSlits'].data['slitY4']]).T.reshape(-1,4,2),
slitid=hdu['BluSlits'].data['dSlitId'],
align=hdu['DesiSlits'].data['slitTyp'][indx] == 'A',
science=hdu['DesiSlits'].data['slitTyp'][indx] == 'P',
onsky=np.array([hdu['DesiSlits'].data['slitRA'][indx],
hdu['DesiSlits'].data['slitDec'][indx],
hdu['DesiSlits'].data['slitLen'][indx],
hdu['DesiSlits'].data['slitWid'][indx],
hdu['DesiSlits'].data['slitLPA'][indx]]).T,
objects=objects)
return self.slitmask
def get_grating(self, filename):
"""
Taken from xidl/DEEP2/spec2d/pro/deimos_omodel.pro and
xidl/DEEP2/spec2d/pro/deimos_grating.pro
"""
hdu = fits.open(filename)
# Grating slider
slider = hdu[0].header['GRATEPOS']
# TODO: Add test for slider
# Central wavelength, grating angle, and tilt position
if slider == 3:
central_wave = hdu[0].header['G3TLTWAV']
# Not used
#angle = (hdu[0].header['G3TLTRAW'] + 29094)/2500
tilt = hdu[0].header['G3TLTVAL']
elif slider in [2,4]:
# Slider is 2 or 4
central_wave = hdu[0].header['G4TLTWAV']
# Not used
#angle = (hdu[0].header['G4TLTRAW'] + 40934)/2500
tilt = hdu[0].header['G4TLTVAL']
else:
raise ValueError('Slider has unknown value: {0}'.format(slider))
# Ruling
name = hdu[0].header['GRATENAM']
if 'Mirror' in name:
ruling = 0
else:
# Remove all non-numeric characters from the name and
# convert to a floating point number
ruling = float(re.sub('[^0-9]', '', name))
# Adjust
if abs(ruling-1200) < 0.5:
ruling = 1200.06
elif abs(ruling-831) < 2:
ruling = 831.90
# Get the orientation of the grating
roll, yaw, tilt = KeckDEIMOSSpectrograph._grating_orientation(slider, ruling, tilt)
self.grating = None if ruling == 0 else ReflectionGrating(ruling, tilt, roll, yaw,
central_wave=central_wave)
return self.grating
def get_detector_map(self):
if self.detector_map is None:
self.detector_map = DEIMOSDetectorMap()
return self.detector_map
@staticmethod
def _grating_orientation(slider, ruling, tilt):
"""
Return the roll, yaw, and tilt of the grating.
Numbers are hardwired.
From xidl/DEEP2/spec2d/pro/omodel_params.pro
"""
if slider == 2 and int(ruling) == 0:
# Mirror in place of the grating
return 0., 0., -19.423
if slider == 2:
raise ValueError('Ruling should be 0 if slider in position 2.')
# Use the calibrated coefficients
_ruling = int(ruling) if int(ruling) in [600, 831, 900, 1200] else 'other'
orientation_coeffs = {3: { 600: [ 0.145, -0.008, 5.6e-4, -0.182],
831: [ 0.143, 0.000, 5.6e-4, -0.182],
900: [ 0.141, 0.000, 5.6e-4, -0.134],
1200: [ 0.145, 0.055, 5.6e-4, -0.181],
'other': [ 0.145, 0.000, 5.6e-4, -0.182] },
4: { 600: [-0.065, 0.063, 6.9e-4, -0.298],
831: [-0.034, 0.060, 6.9e-4, -0.196],
900: [-0.064, 0.083, 6.9e-4, -0.277],
1200: [-0.052, 0.122, 6.9e-4, -0.294],
'other': [-0.050, 0.080, 6.9e-4, -0.250] } }
# Return calbirated roll, yaw, and tilt
return orientation_coeffs[slider][_ruling][0], \
orientation_coeffs[slider][_ruling][1], \
tilt*(1-orientation_coeffs[slider][_ruling][2]) \
+ orientation_coeffs[slider][_ruling][3]
def mask_to_pixel_coordinates(self, x=None, y=None, wave=None, order=1, filename=None,
corners=False):
r"""
Convert the mask coordinates in mm to pixel coordinates on the
DEIMOS detector.
If not already instantiated, the :attr:`slitmask`,
:attr:`grating`, :attr:`optical_model`, and :attr:`detector_map`
attributes are instantiated. If these are not instantiated, a
file must be provided. If no arguments are provided, the
function expects these attributes to be set and will output the
pixel coordinates for the centers of the slits in the
:attr:`slitmask` at the central wavelength of the
:attr:`grating`.
Method generally expected to be executed in one of two modes:
- Use the `filename` to read the slit mask and determine the
detector positions at the central wavelength.
- Specifically map the provided x, y, and wave values to the
detector.
If arrays are provided for both `x`, `y`, and `wave`, the
returned objects have the shape :math:`N_\lambda\times S_x`,
where :math:`S_x` is the shape of the x and y arrays.
Args:
x (array-like, optional):
The x coordinates in the slit mask in mm. Default is to
use the center of the slits in the :attr:`slitmask`.
y (array-like, optional):
The y coordinates in the slit mask in mm. Default is to
use the center of the slits in the :attr:`slitmask`.
wave (array-like, optional):
The wavelengths in angstroms for the propagated
coordinates. Default is to use the central wavelength
of the :attr:`grating`.
order (:obj:`int`, optional):
The grating order. Default is 1.
filename (:obj:`str`, optional):
The filename to use to (re)instantiate the
:attr:`slitmask` and :attr:`grating`. Default is to use
previously instantiated attributes.
corners (:obj:`bool`, optional):
Instead of using the centers of the slits in the
:attr:`slitmask`, return the detector pixel coordinates
for the corners of all slits.
Returns:
numpy.ndarray: Returns 5 arrays: (1-2) the x and y
coordinates in the image plane in mm, (3) the detector
(1-indexed) where the slit should land at the provided
wavelength(s), and (4-5) the pixel coordinates (1-indexed)
in the relevant detector.
Raises:
ValueError:
Raised if the user provides one but not both of the x
and y coordinates, if no coordinates are provided or
available within the :attr:`slitmask`, or if the
:attr:`grating` hasn't been defined and not file is
provided.
"""
# Cannot provide just one of x or y
if x is None and y is not None or x is not None and y is None:
raise ValueError('Must provide both x and y or neither to use slit mask.')
# Use the file to update the slitmask (if no x coordinates are
# provided) and the grating
if filename is not None:
if x is None and y is None:
# Reset the slit mask
self.get_slitmask(filename)
# Reset the grating
self.get_grating(filename)
# Check that any coordinates are available
if x is None and y is None and self.slitmask is None:
raise ValueError('No coordinates; Provide them directly or instantiate slit mask.')
# Make sure the coordinates are numpy arrays
_x = None if x is None else np.atleast_1d(x)
_y = None if y is None else np.atleast_1d(y)
if _x is None:
# Use all the slit centers or corners
_x = self.slitmask.corners[...,0].ravel() if corners else self.slitmask.center[:,0]
_y = self.slitmask.corners[...,1].ravel() if corners else self.slitmask.center[:,1]
# Check that the grating is defined
if self.grating is None:
raise ValueError('Must define a grating first; provide a file or use get_grating()')
# Instantiate the optical model or reset it grating
if self.optical_model is None:
self.optical_model = DEIMOSOpticalModel(self.grating)
else:
self.optical_model.reset_grating(self.grating)
# Instantiate the detector map, if necessary
self.get_detector_map()
# Compute the detector image plane coordinates (mm)
x_img, y_img = self.optical_model.mask_to_imaging_coordinates(_x, _y, wave=wave,
order=order)
# Reshape if computing the corner positions
if corners:
x_img = x_img.reshape(self.slitmask.corners.shape[:2])
y_img = y_img.reshape(self.slitmask.corners.shape[:2])
# Use the detector map to convert to the detector coordinates
return (x_img, y_img) + self.detector_map.ccd_coordinates(x_img, y_img)
class DEIMOSOpticalModel(OpticalModel):
# TODO: Are focal_r_surface (!R_IMSURF) and focal_r_curvature
# (!R_CURV) supposed to be the same? If so, consolodate these into
# a single number.
def __init__(self, grating):
super(DEIMOSOpticalModel, self).__init__(
20018.4, # Pupil distance in mm (!PPLDIST, !D_1)
2133.6, # Radius of the image surface in mm (!R_IMSURF)
2124.71, # Focal-plane radius of curvature in mm (!R_CURV)
2120.9, # Mask radius of curvature in mm (!M_RCURV)
np.radians(6.), # Mask tilt angle in radians (!M_ANGLE)
128.803, # Mask y zero point in mm (!ZPT_YM)
3.378, # Mask z zero-point in mm (!MASK_HT0)
2197.1, # Collimator distance in mm (sys.COL_DST)
4394.2, # Collimator radius of curvature in mm (!R_COLL)
-0.75, # Collimator curvature constant (!K_COLL)
np.radians(0.002), # Collimator tilt error in radians (sys.COL_ERR)
0.0, # Collimator tilt phi angle in radians (sys.COL_PHI)
grating, # DEIMOS grating object
np.radians(2.752), # Camera angle in radians (sys.CAM_ANG)
np.pi/2, # Camera tilt phi angle in radians (sys.CAM_PHI)
382.0, # Camera focal length in mm (sys.CAM_FOC)
DEIMOSCameraDistortion(), # Object used to apply/remove camera distortions
np.radians(0.021), # ICS rotation in radians (sys.MOS_ROT)
[-0.234, -3.822]) # Camera optical axis center in mm (sys.X_OPT,sys.Y_OPT)
# Include tent mirror
self.tent_theta = np.radians(71.5-0.5) # Tent mirror theta angle (sys.TNT_ANG)
self.tent_phi = np.radians(90.+0.081) # Tent mirror phi angle (sys.TNT_PHI)
#TENT MIRROR: this mirror is OK to leave in del-theta,phi
self.tent_reflection \
= OpticalModel.get_reflection_transform(self.tent_theta, self.tent_phi)
def reset_grating(self, grating):
self.grating = grating
def mask_coo_to_grating_input_vectors(self, x, y):
"""
Propagate rays from the mask plane to the grating.
Taken from xidl/DEEP2/spec2d/pro/model/pre_grating.pro
Need to override parent class to add tent mirror reflection.
"""
r = super(DEIMOSOpticalModel, self).mask_coo_to_grating_input_vectors(x, y)
# Reflect off the tent mirror and return
return OpticalModel.reflect(r, self.tent_reflection)
class DEIMOSCameraDistortion:
"""Class to remove or apply DEIMOS camera distortion."""
def __init__(self):
self.c0 = 1.
self.c2 = 0.0457563
self.c4 = -0.3088123
self.c6 = -14.917
x = np.linspace(-0.6, 0.6, 1000)
y = self.remove_distortion(x)
self.interpolator = interpolate.interp1d(y, x)
def remove_distortion(self, x):
x2 = np.square(x)
return x / (self.c0 + x2 * (self.c2 + x2 * (self.c4 + x2 * self.c6)))
def apply_distortion(self, y):
indx = (y > self.interpolator.x[0]) & (y < self.interpolator.x[-1])
if not np.all(indx):
warnings.warn('Some input angles outside of valid distortion interval!')
x = np.zeros_like(y)
x[indx] = self.interpolator(y[indx])
return x
class DEIMOSDetectorMap(DetectorMap):
"""
A map of the center coordinates and rotation of each CCD in DEIMOS.
!! PIXEL COORDINATES ARE 1-INDEXED !!
"""
def __init__(self):
# Number of chips
self.nccd = 8
# Number of pixels for each chip in each dimension
self.npix = np.array([2048, 4096])
# The size of the CCD pixels in mm
self.pixel_size = 0.015
# Nominal gap between each CCD in each dimension in mm
self.ccd_gap = np.array([1, 0.1])
# Width of the CCD edge in each dimension in mm
self.ccd_edge = np.array([0.154, 0.070])
# Effective size of each chip in each dimension in pixels
self.ccd_size = self.npix + (2*self.ccd_edge + self.ccd_gap)/self.pixel_size
# Center coordinates
origin = np.array([[-1.5,-0.5], [-0.5,-0.5], [ 0.5,-0.5], [ 1.5,-0.5],
[-1.5, 0.5], [-0.5, 0.5], [ 0.5, 0.5], [ 1.5, 0.5]])
offset = np.array([[-20.05, 14.12], [-12.64, 7.25], [0.00, 0.00], [-1.34, -19.92],
[-19.02, 16.46], [ -9.65, 8.95], [1.88, 1.02], [ 4.81, -24.01]])
self.ccd_center = origin * self.ccd_size[None,:] + offset
# Construct the rotation matrix
self.rotation = np.radians([-0.082, 0.030, 0.0, -0.1206, 0.136, -0.06, -0.019, -0.082])
cosa = np.cos(self.rotation)
sina = np.sin(self.rotation)
self.rot_matrix = np.array([cosa, -sina, sina, cosa]).T.reshape(self.nccd,2,2)
# ccd_geom.pro has offsets by sys.CN_XERR, but these are all 0.
'''
def deimos_image_sections(inp, det):
"""
Parse the image for the raw image shape and data sections
Args:
inp (str or `astropy.io.fits.HDUList`_ object):
det (int):
Returns:
tuple:
shape, dsec, osec, ext_items
ext_items is a large tuple of bits and pieces for other methods
ext_items = hdu, chips, postpix, image
"""
# Check for file; allow for extra .gz, etc. suffix
if isinstance(inp, str):
fil = glob.glob(inp + '*')
if len(fil) != 1:
msgs.error('Found {0} files matching {1}'.format(len(fil), inp + '*'))
# Read
try:
msgs.info("Reading DEIMOS file: {:s}".format(fil[0]))
except AttributeError:
print("Reading DEIMOS file: {:s}".format(fil[0]))
# Open
hdu = fits.open(fil[0])
else:
hdu = inp
head0 = hdu[0].header
# Get post, pre-pix values
precol = head0['PRECOL']
postpix = head0['POSTPIX']
preline = head0['PRELINE']
postline = head0['POSTLINE']
detlsize = head0['DETLSIZE']
x0, x_npix, y0, y_npix = np.array(parse.load_sections(detlsize)).flatten()
# Setup for datasec, oscansec
dsec = []
osec = []
# get the x and y binning factors...
binning = head0['BINNING']
if binning != '1,1':
msgs.error("This binning for DEIMOS might not work. But it might..")
xbin, ybin = [int(ibin) for ibin in binning.split(',')]
# DEIMOS detectors
nchip = 8
if det is None:
chips = range(nchip)
else:
chips = [det-1] # Indexing starts at 0 here
for tt in chips:
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)
# Sections
idsec = '[{:d}:{:d},{:d}:{:d}]'.format(y1, y2, x1, x2)
iosec = '[{:d}:{:d},{:d}:{:d}]'.format(o_y1, o_y2, o_x1, o_x2)
dsec.append(idsec)
osec.append(iosec)
# Create final image (if the full image is requested)
if det is None:
image = np.zeros((x_npix,y_npix+4*postpix))
shape = image.shape
else:
image = None
head = hdu[chips[0]+1].header
shape = (head['NAXIS2'], head['NAXIS1']-precol) # We don't load up the precol
# Pack up a few items for use elsewhere
ext_items = hdu, chips, postpix, image
# Return
return shape, dsec, osec, ext_items
def read_deimos(raw_file, det=None):
"""
Read a raw DEIMOS data frame (one or more detectors)
Packed in a multi-extension HDU
Based on pypeit.arlris.read_lris...
Based on readmhdufits.pro
Parameters
----------
raw_file : str
Filename
Returns
-------
array : ndarray
Combined image
hdu: HDUList
sections : tuple
List of datasec, oscansec sections
"""
# Parse the header
shape, dsec, osec, ext_items = deimos_image_sections(raw_file, det)
# Unpack
hdu, chips, postpix, image = ext_items
# Loop
for tt in chips:
data, oscan = deimos_read_1chip(hdu, tt+1)
#if n_elements(nobias) eq 0 then nobias = 0
# One detector??
if det is not None:
image = np.zeros((data.shape[0],data.shape[1]+oscan.shape[1]))
# Indexing
x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2 = indexing(tt, postpix, det=det)
# Fill
image[y1:y2, x1:x2] = data
image[o_y1:o_y2, o_x1:o_x2] = oscan
# Return
return image, hdu, (dsec,osec)
'''
def indexing(itt, postpix, det=None):
"""
Some annoying book-keeping for instrument placement.
Parameters
----------
itt : int
postpix : int
det : int, optional
Returns
-------
"""
# Deal with single chip
if det is not None:
tt = 0
else:
tt = itt
ii = 2048
jj = 4096
# y indices
if tt < 4:
y1, y2 = 0, jj
else:
y1, y2 = jj, 2*jj
o_y1, o_y2 = y1, y2
# x
x1, x2 = (tt%4)*ii, (tt%4 + 1)*ii
if det is None:
o_x1 = 4*ii + (tt%4)*postpix
else:
o_x1 = ii + (tt%4)*postpix
o_x2 = o_x1 + postpix
# Return
return x1, x2, y1, y2, o_x1, o_x2, o_y1, o_y2
def deimos_read_1chip(hdu,chipno):
""" Read one of the DEIMOS detectors
Args:
hdu (astropy.io.fits.HDUList):
chipno (int):
Returns:
np.ndarray, np.ndarray:
data, oscan
"""
# Extract datasec from header
datsec = hdu[chipno].header['DATASEC']
detsec = hdu[chipno].header['DETSEC']
postpix = hdu[0].header['POSTPIX']
precol = hdu[0].header['PRECOL']
x1_dat, x2_dat, y1_dat, y2_dat = np.array(parse.load_sections(datsec)).flatten()
x1_det, x2_det, y1_det, y2_det = np.array(parse.load_sections(detsec)).flatten()
# This rotates the image to be increasing wavelength to the top
#data = np.rot90((hdu[chipno].data).T, k=2)
#nx=data.shape[0]
#ny=data.shape[1]
# Science data
fullimage = hdu[chipno].data
data = fullimage[x1_dat:x2_dat,y1_dat:y2_dat]
# Overscan
oscan = fullimage[:,y2_dat:]
# Flip as needed
if x1_det > x2_det:
data = np.flipud(data)
oscan = np.flipud(oscan)
if y1_det > y2_det:
data = np.fliplr(data)
oscan = np.fliplr(oscan)
# Return
return data, oscan
| [
"numpy.radians",
"pypeit.par.pypeitpar.PypeItPar",
"pypeit.par.pypeitpar.DetectorPar",
"scipy.interpolate.interp1d",
"numpy.array",
"astropy.io.fits.open",
"numpy.sin",
"pypeit.core.framematch.check_frame_exptime",
"pypeit.core.parse.load_sections",
"pypeit.msgs.error",
"pypeit.spectrographs.opt... | [((919, 948), 'pypeit.telescopes.KeckTelescopePar', 'telescopes.KeckTelescopePar', ([], {}), '()\n', (946, 948), False, 'from pypeit import telescopes\n'), ((9511, 9532), 'pypeit.par.pypeitpar.PypeItPar', 'pypeitpar.PypeItPar', ([], {}), '()\n', (9530, 9532), False, 'from pypeit.par import pypeitpar\n'), ((15945, 16003), 'pypeit.core.framematch.check_frame_exptime', 'framematch.check_frame_exptime', (["fitstbl['exptime']", 'exprng'], {}), "(fitstbl['exptime'], exprng)\n", (15975, 16003), False, 'from pypeit.core import framematch\n'), ((18516, 18541), 'glob.glob', 'glob.glob', (["(raw_file + '*')"], {}), "(raw_file + '*')\n", (18525, 18541), False, 'import glob\n'), ((18858, 18875), 'astropy.io.fits.open', 'fits.open', (['fil[0]'], {}), '(fil[0])\n', (18867, 18875), False, 'from astropy.io import fits\n'), ((25410, 25429), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (25419, 25429), False, 'from astropy.io import fits\n'), ((25697, 25726), 'pypeit.utils.index_of_x_eq_y', 'index_of_x_eq_y', (['mapid', 'catid'], {}), '(mapid, catid)\n', (25712, 25726), False, 'from pypeit.utils import index_of_x_eq_y\n'), ((26226, 26326), 'pypeit.utils.index_of_x_eq_y', 'index_of_x_eq_y', (["hdu['DesiSlits'].data['dSlitId']", "hdu['BluSlits'].data['dSlitId']"], {'strict': '(True)'}), "(hdu['DesiSlits'].data['dSlitId'], hdu['BluSlits'].data[\n 'dSlitId'], strict=True)\n", (26241, 26326), False, 'from pypeit.utils import index_of_x_eq_y\n'), ((27993, 28012), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (28002, 28012), False, 'from astropy.io import fits\n'), ((38672, 38694), 'numpy.radians', 'np.radians', (['(71.5 - 0.5)'], {}), '(71.5 - 0.5)\n', (38682, 38694), True, 'import numpy as np\n'), ((38758, 38782), 'numpy.radians', 'np.radians', (['(90.0 + 0.081)'], {}), '(90.0 + 0.081)\n', (38768, 38782), True, 'import numpy as np\n'), ((38936, 39005), 'pypeit.spectrographs.opticalmodel.OpticalModel.get_reflection_transform', 'OpticalModel.get_reflection_transform', (['self.tent_theta', 'self.tent_phi'], {}), '(self.tent_theta, self.tent_phi)\n', (38973, 39005), False, 'from pypeit.spectrographs.opticalmodel import ReflectionGrating, OpticalModel, DetectorMap\n'), ((39497, 39542), 'pypeit.spectrographs.opticalmodel.OpticalModel.reflect', 'OpticalModel.reflect', (['r', 'self.tent_reflection'], {}), '(r, self.tent_reflection)\n', (39517, 39542), False, 'from pypeit.spectrographs.opticalmodel import ReflectionGrating, OpticalModel, DetectorMap\n'), ((39781, 39809), 'numpy.linspace', 'np.linspace', (['(-0.6)', '(0.6)', '(1000)'], {}), '(-0.6, 0.6, 1000)\n', (39792, 39809), True, 'import numpy as np\n'), ((39876, 39902), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['y', 'x'], {}), '(y, x)\n', (39896, 39902), False, 'from scipy import interpolate\n'), ((39953, 39965), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (39962, 39965), True, 'import numpy as np\n'), ((40282, 40298), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (40295, 40298), True, 'import numpy as np\n'), ((40684, 40706), 'numpy.array', 'np.array', (['[2048, 4096]'], {}), '([2048, 4096])\n', (40692, 40706), True, 'import numpy as np\n'), ((40870, 40888), 'numpy.array', 'np.array', (['[1, 0.1]'], {}), '([1, 0.1])\n', (40878, 40888), True, 'import numpy as np\n'), ((40970, 40993), 'numpy.array', 'np.array', (['[0.154, 0.07]'], {}), '([0.154, 0.07])\n', (40978, 40993), True, 'import numpy as np\n'), ((41194, 41312), 'numpy.array', 'np.array', (['[[-1.5, -0.5], [-0.5, -0.5], [0.5, -0.5], [1.5, -0.5], [-1.5, 0.5], [-0.5, \n 0.5], [0.5, 0.5], [1.5, 0.5]]'], {}), '([[-1.5, -0.5], [-0.5, -0.5], [0.5, -0.5], [1.5, -0.5], [-1.5, 0.5],\n [-0.5, 0.5], [0.5, 0.5], [1.5, 0.5]])\n', (41202, 41312), True, 'import numpy as np\n'), ((41353, 41492), 'numpy.array', 'np.array', (['[[-20.05, 14.12], [-12.64, 7.25], [0.0, 0.0], [-1.34, -19.92], [-19.02, \n 16.46], [-9.65, 8.95], [1.88, 1.02], [4.81, -24.01]]'], {}), '([[-20.05, 14.12], [-12.64, 7.25], [0.0, 0.0], [-1.34, -19.92], [-\n 19.02, 16.46], [-9.65, 8.95], [1.88, 1.02], [4.81, -24.01]])\n', (41361, 41492), True, 'import numpy as np\n'), ((41658, 41728), 'numpy.radians', 'np.radians', (['[-0.082, 0.03, 0.0, -0.1206, 0.136, -0.06, -0.019, -0.082]'], {}), '([-0.082, 0.03, 0.0, -0.1206, 0.136, -0.06, -0.019, -0.082])\n', (41668, 41728), True, 'import numpy as np\n'), ((41745, 41766), 'numpy.cos', 'np.cos', (['self.rotation'], {}), '(self.rotation)\n', (41751, 41766), True, 'import numpy as np\n'), ((41782, 41803), 'numpy.sin', 'np.sin', (['self.rotation'], {}), '(self.rotation)\n', (41788, 41803), True, 'import numpy as np\n'), ((47138, 47153), 'numpy.flipud', 'np.flipud', (['data'], {}), '(data)\n', (47147, 47153), True, 'import numpy as np\n'), ((47170, 47186), 'numpy.flipud', 'np.flipud', (['oscan'], {}), '(oscan)\n', (47179, 47186), True, 'import numpy as np\n'), ((47226, 47241), 'numpy.fliplr', 'np.fliplr', (['data'], {}), '(data)\n', (47235, 47241), True, 'import numpy as np\n'), ((47258, 47274), 'numpy.fliplr', 'np.fliplr', (['oscan'], {}), '(oscan)\n', (47267, 47274), True, 'import numpy as np\n'), ((1051, 1307), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(1)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(4.19)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.226)', 'ronoise': '(2.57)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_01"""'}), "(dataext=1, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=4.19, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.226, ronoise=2.57, datasec='',\n oscansec='', suffix='_01')\n", (1072, 1307), False, 'from pypeit.par import pypeitpar\n'), ((2048, 2305), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(2)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(3.46)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.188)', 'ronoise': '(2.491)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_02"""'}), "(dataext=2, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=3.46, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.188, ronoise=2.491, datasec='',\n oscansec='', suffix='_02')\n", (2069, 2305), False, 'from pypeit.par import pypeitpar\n'), ((3009, 3266), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(3)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(4.03)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.248)', 'ronoise': '(2.618)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_03"""'}), "(dataext=3, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=4.03, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.248, ronoise=2.618, datasec='',\n oscansec='', suffix='_03')\n", (3030, 3266), False, 'from pypeit.par import pypeitpar\n'), ((4006, 4261), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(4)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(3.8)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.22)', 'ronoise': '(2.557)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_04"""'}), "(dataext=4, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=3.8, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.22, ronoise=2.557, datasec='',\n oscansec='', suffix='_04')\n", (4027, 4261), False, 'from pypeit.par import pypeitpar\n'), ((5003, 5260), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(5)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(4.71)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.184)', 'ronoise': '(2.482)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_05"""'}), "(dataext=5, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=4.71, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.184, ronoise=2.482, datasec='',\n oscansec='', suffix='_05')\n", (5024, 5260), False, 'from pypeit.par import pypeitpar\n'), ((6000, 6257), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(6)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(4.28)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.177)', 'ronoise': '(2.469)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_06"""'}), "(dataext=6, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=4.28, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.177, ronoise=2.469, datasec='',\n oscansec='', suffix='_06')\n", (6021, 6257), False, 'from pypeit.par import pypeitpar\n'), ((6996, 7253), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(7)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(3.33)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.201)', 'ronoise': '(2.518)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_07"""'}), "(dataext=7, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=3.33, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.201, ronoise=2.518, datasec='',\n oscansec='', suffix='_07')\n", (7017, 7253), False, 'from pypeit.par import pypeitpar\n'), ((7963, 8218), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'dataext': '(8)', 'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.1185)', 'darkcurr': '(3.69)', 'saturation': '(65535.0)', 'nonlinear': '(0.95)', 'numamplifiers': '(1)', 'gain': '(1.23)', 'ronoise': '(2.58)', 'datasec': '""""""', 'oscansec': '""""""', 'suffix': '"""_08"""'}), "(dataext=8, specaxis=0, specflip=False, xgap=0.0, ygap\n =0.0, ysize=1.0, platescale=0.1185, darkcurr=3.69, saturation=65535.0,\n nonlinear=0.95, numamplifiers=1, gain=1.23, ronoise=2.58, datasec='',\n oscansec='', suffix='_08')\n", (7984, 8218), False, 'from pypeit.par import pypeitpar\n'), ((14760, 14802), 'pypeit.core.parse.parse_binning', 'parse.parse_binning', (["headarr[0]['BINNING']"], {}), "(headarr[0]['BINNING'])\n", (14779, 14802), False, 'from pypeit.core import parse\n'), ((14825, 14866), 'pypeit.core.parse.binning2string', 'parse.binning2string', (['binspec', 'binspatial'], {}), '(binspec, binspatial)\n', (14845, 14866), False, 'from pypeit.core import parse\n'), ((19171, 19211), 'numpy.zeros', 'np.zeros', (['(x_npix, y_npix + 4 * postpix)'], {}), '((x_npix, y_npix + 4 * postpix))\n', (19179, 19211), True, 'import numpy as np\n'), ((19241, 19272), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'int'}), '(image, dtype=int)\n', (19254, 19272), True, 'import numpy as np\n'), ((19300, 19331), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'int'}), '(image, dtype=int)\n', (19313, 19331), True, 'import numpy as np\n'), ((19454, 19523), 'pypeit.msgs.error', 'msgs.error', (['"""This binning for DEIMOS might not work. But it might.."""'], {}), "('This binning for DEIMOS might not work. But it might..')\n", (19464, 19523), False, 'from pypeit import msgs\n'), ((29370, 29439), 'pypeit.spectrographs.opticalmodel.ReflectionGrating', 'ReflectionGrating', (['ruling', 'tilt', 'roll', 'yaw'], {'central_wave': 'central_wave'}), '(ruling, tilt, roll, yaw, central_wave=central_wave)\n', (29387, 29439), False, 'from pypeit.spectrographs.opticalmodel import ReflectionGrating, OpticalModel, DetectorMap\n'), ((35232, 35248), 'numpy.atleast_1d', 'np.atleast_1d', (['x'], {}), '(x)\n', (35245, 35248), True, 'import numpy as np\n'), ((35285, 35301), 'numpy.atleast_1d', 'np.atleast_1d', (['y'], {}), '(y)\n', (35298, 35301), True, 'import numpy as np\n'), ((37321, 37336), 'numpy.radians', 'np.radians', (['(6.0)'], {}), '(6.0)\n', (37331, 37336), True, 'import numpy as np\n'), ((37832, 37849), 'numpy.radians', 'np.radians', (['(0.002)'], {}), '(0.002)\n', (37842, 37849), True, 'import numpy as np\n'), ((38090, 38107), 'numpy.radians', 'np.radians', (['(2.752)'], {}), '(2.752)\n', (38100, 38107), True, 'import numpy as np\n'), ((38450, 38467), 'numpy.radians', 'np.radians', (['(0.021)'], {}), '(0.021)\n', (38460, 38467), True, 'import numpy as np\n'), ((40171, 40183), 'numpy.all', 'np.all', (['indx'], {}), '(indx)\n', (40177, 40183), True, 'import numpy as np\n'), ((40197, 40269), 'warnings.warn', 'warnings.warn', (['"""Some input angles outside of valid distortion interval!"""'], {}), "('Some input angles outside of valid distortion interval!')\n", (40210, 40269), False, 'import warnings\n'), ((15280, 15326), 'pypeit.msgs.error', 'msgs.error', (['"""Not ready for this compound meta"""'], {}), "('Not ready for this compound meta')\n", (15290, 15326), False, 'from pypeit import msgs\n'), ((19884, 19941), 'numpy.zeros', 'np.zeros', (['(data.shape[0], data.shape[1] + oscan.shape[1])'], {}), '((data.shape[0], data.shape[1] + oscan.shape[1]))\n', (19892, 19941), True, 'import numpy as np\n'), ((19975, 20006), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'int'}), '(image, dtype=int)\n', (19988, 20006), True, 'import numpy as np\n'), ((20038, 20069), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'int'}), '(image, dtype=int)\n', (20051, 20069), True, 'import numpy as np\n'), ((28991, 29017), 're.sub', 're.sub', (['"""[^0-9]"""', '""""""', 'name'], {}), "('[^0-9]', '', name)\n", (28997, 29017), False, 'import re\n'), ((46639, 46666), 'pypeit.core.parse.load_sections', 'parse.load_sections', (['datsec'], {}), '(datsec)\n', (46658, 46666), False, 'from pypeit.core import parse\n'), ((46724, 46751), 'pypeit.core.parse.load_sections', 'parse.load_sections', (['detsec'], {}), '(detsec)\n', (46743, 46751), False, 'from pypeit.core import parse\n'), ((19056, 19085), 'pypeit.core.parse.load_sections', 'parse.load_sections', (['detlsize'], {}), '(detlsize)\n', (19075, 19085), False, 'from pypeit.core import parse\n'), ((27317, 27541), 'numpy.array', 'np.array', (["[hdu['DesiSlits'].data['slitRA'][indx], hdu['DesiSlits'].data['slitDec'][\n indx], hdu['DesiSlits'].data['slitLen'][indx], hdu['DesiSlits'].data[\n 'slitWid'][indx], hdu['DesiSlits'].data['slitLPA'][indx]]"], {}), "([hdu['DesiSlits'].data['slitRA'][indx], hdu['DesiSlits'].data[\n 'slitDec'][indx], hdu['DesiSlits'].data['slitLen'][indx], hdu[\n 'DesiSlits'].data['slitWid'][indx], hdu['DesiSlits'].data['slitLPA'][indx]]\n )\n", (27325, 27541), True, 'import numpy as np\n'), ((41830, 41865), 'numpy.array', 'np.array', (['[cosa, -sina, sina, cosa]'], {}), '([cosa, -sina, sina, cosa])\n', (41838, 41865), True, 'import numpy as np\n'), ((26444, 26724), 'numpy.array', 'np.array', (["[hdu['BluSlits'].data['slitX1'], hdu['BluSlits'].data['slitY1'], hdu[\n 'BluSlits'].data['slitX2'], hdu['BluSlits'].data['slitY2'], hdu[\n 'BluSlits'].data['slitX3'], hdu['BluSlits'].data['slitY3'], hdu[\n 'BluSlits'].data['slitX4'], hdu['BluSlits'].data['slitY4']]"], {}), "([hdu['BluSlits'].data['slitX1'], hdu['BluSlits'].data['slitY1'],\n hdu['BluSlits'].data['slitX2'], hdu['BluSlits'].data['slitY2'], hdu[\n 'BluSlits'].data['slitX3'], hdu['BluSlits'].data['slitY3'], hdu[\n 'BluSlits'].data['slitX4'], hdu['BluSlits'].data['slitY4']])\n", (26452, 26724), True, 'import numpy as np\n')] |
# tests/artclass/test_train.py
# Test artclass/train.py unit components.
import numpy as np
from artclass import train
def test_find_best_threshold():
y_true = np.array([[1, 0], [0, 1], [0, 0], [1, 1]])
y_prob = np.array([[0.75, 0.25], [0.25, 0.75], [0.25, 0.25], [0.75, 0.75]])
assert train.find_best_threshold(y_true=y_true, y_prob=y_prob) == 0.75
| [
"numpy.array",
"artclass.train.find_best_threshold"
] | [((168, 210), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 0], [1, 1]]'], {}), '([[1, 0], [0, 1], [0, 0], [1, 1]])\n', (176, 210), True, 'import numpy as np\n'), ((224, 290), 'numpy.array', 'np.array', (['[[0.75, 0.25], [0.25, 0.75], [0.25, 0.25], [0.75, 0.75]]'], {}), '([[0.75, 0.25], [0.25, 0.75], [0.25, 0.25], [0.75, 0.75]])\n', (232, 290), True, 'import numpy as np\n'), ((302, 357), 'artclass.train.find_best_threshold', 'train.find_best_threshold', ([], {'y_true': 'y_true', 'y_prob': 'y_prob'}), '(y_true=y_true, y_prob=y_prob)\n', (327, 357), False, 'from artclass import train\n')] |
import pickle
import numpy as np
import pytest
from sklearn.neighbors._quad_tree import _QuadTree
from sklearn.utils import check_random_state
def test_quadtree_boundary_computation():
# Introduce a point into a quad tree with boundaries not easy to compute.
Xs = []
# check a random case
Xs.append(np.array([[-1, 1], [-4, -1]], dtype=np.float32))
# check the case where only 0 are inserted
Xs.append(np.array([[0, 0], [0, 0]], dtype=np.float32))
# check the case where only negative are inserted
Xs.append(np.array([[-1, -2], [-4, 0]], dtype=np.float32))
# check the case where only small numbers are inserted
Xs.append(np.array([[-1e-6, 1e-6], [-4e-6, -1e-6]], dtype=np.float32))
for X in Xs:
tree = _QuadTree(n_dimensions=2, verbose=0)
tree.build_tree(X)
tree._check_coherence()
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
tree = _QuadTree(n_dimensions=2, verbose=0)
tree.build_tree(X)
tree._check_coherence()
@pytest.mark.parametrize('n_dimensions', (2, 3))
@pytest.mark.parametrize('protocol', (0, 1, 2))
def test_quad_tree_pickle(n_dimensions, protocol):
rng = check_random_state(0)
X = rng.random_sample((10, n_dimensions))
tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
tree.build_tree(X)
s = pickle.dumps(tree, protocol=protocol)
bt2 = pickle.loads(s)
for x in X:
cell_x_tree = tree.get_cell(x)
cell_x_bt2 = bt2.get_cell(x)
assert cell_x_tree == cell_x_bt2
@pytest.mark.parametrize('n_dimensions', (2, 3))
def test_qt_insert_duplicate(n_dimensions):
rng = check_random_state(0)
X = rng.random_sample((10, n_dimensions))
Xd = np.r_[X, X[:5]]
tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
tree.build_tree(Xd)
cumulative_size = tree.cumulative_size
leafs = tree.leafs
# Assert that the first 5 are indeed duplicated and that the next
# ones are single point leaf
for i, x in enumerate(X):
cell_id = tree.get_cell(x)
assert leafs[cell_id]
assert cumulative_size[cell_id] == 1 + (i < 5)
def test_summarize():
# Simple check for quad tree's summarize
angle = 0.9
X = np.array([[-10., -10.], [9., 10.], [10., 9.], [10., 10.]],
dtype=np.float32)
query_pt = X[0, :]
n_dimensions = X.shape[1]
offset = n_dimensions + 2
qt = _QuadTree(n_dimensions, verbose=0)
qt.build_tree(X)
idx, summary = qt._py_summarize(query_pt, X, angle)
node_dist = summary[n_dimensions]
node_size = summary[n_dimensions + 1]
# Summary should contain only 1 node with size 3 and distance to
# X[1:] barycenter
barycenter = X[1:].mean(axis=0)
ds2c = ((X[0] - barycenter) ** 2).sum()
assert idx == offset
assert node_size == 3, "summary size = {}".format(node_size)
assert np.isclose(node_dist, ds2c)
# Summary should contain all 3 node with size 1 and distance to
# each point in X[1:] for ``angle=0``
idx, summary = qt._py_summarize(query_pt, X, 0.)
barycenter = X[1:].mean(axis=0)
ds2c = ((X[0] - barycenter) ** 2).sum()
assert idx == 3 * (offset)
for i in range(3):
node_dist = summary[i * offset + n_dimensions]
node_size = summary[i * offset + n_dimensions + 1]
ds2c = ((X[0] - X[i + 1]) ** 2).sum()
assert node_size == 1, "summary size = {}".format(node_size)
assert np.isclose(node_dist, ds2c)
| [
"sklearn.utils.check_random_state",
"numpy.isclose",
"pickle.dumps",
"pytest.mark.parametrize",
"numpy.array",
"pickle.loads",
"sklearn.neighbors._quad_tree._QuadTree"
] | [((2471, 2518), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_dimensions"""', '(2, 3)'], {}), "('n_dimensions', (2, 3))\n", (2494, 2518), False, 'import pytest\n'), ((2521, 2567), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""protocol"""', '(0, 1, 2)'], {}), "('protocol', (0, 1, 2))\n", (2544, 2567), False, 'import pytest\n'), ((3009, 3056), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_dimensions"""', '(2, 3)'], {}), "('n_dimensions', (2, 3))\n", (3032, 3056), False, 'import pytest\n'), ((2631, 2652), 'sklearn.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (2649, 2652), False, 'from sklearn.utils import check_random_state\n'), ((2716, 2763), 'sklearn.neighbors._quad_tree._QuadTree', '_QuadTree', ([], {'n_dimensions': 'n_dimensions', 'verbose': '(0)'}), '(n_dimensions=n_dimensions, verbose=0)\n', (2725, 2763), False, 'from sklearn.neighbors._quad_tree import _QuadTree\n'), ((2799, 2836), 'pickle.dumps', 'pickle.dumps', (['tree'], {'protocol': 'protocol'}), '(tree, protocol=protocol)\n', (2811, 2836), False, 'import pickle\n'), ((2848, 2863), 'pickle.loads', 'pickle.loads', (['s'], {}), '(s)\n', (2860, 2863), False, 'import pickle\n'), ((3113, 3134), 'sklearn.utils.check_random_state', 'check_random_state', (['(0)'], {}), '(0)\n', (3131, 3134), False, 'from sklearn.utils import check_random_state\n'), ((3222, 3269), 'sklearn.neighbors._quad_tree._QuadTree', '_QuadTree', ([], {'n_dimensions': 'n_dimensions', 'verbose': '(0)'}), '(n_dimensions=n_dimensions, verbose=0)\n', (3231, 3269), False, 'from sklearn.neighbors._quad_tree import _QuadTree\n'), ((3727, 3816), 'numpy.array', 'np.array', (['[[-10.0, -10.0], [9.0, 10.0], [10.0, 9.0], [10.0, 10.0]]'], {'dtype': 'np.float32'}), '([[-10.0, -10.0], [9.0, 10.0], [10.0, 9.0], [10.0, 10.0]], dtype=np\n .float32)\n', (3735, 3816), True, 'import numpy as np\n'), ((3920, 3954), 'sklearn.neighbors._quad_tree._QuadTree', '_QuadTree', (['n_dimensions'], {'verbose': '(0)'}), '(n_dimensions, verbose=0)\n', (3929, 3954), False, 'from sklearn.neighbors._quad_tree import _QuadTree\n'), ((4404, 4431), 'numpy.isclose', 'np.isclose', (['node_dist', 'ds2c'], {}), '(node_dist, ds2c)\n', (4414, 4431), True, 'import numpy as np\n'), ((334, 381), 'numpy.array', 'np.array', (['[[-1, 1], [-4, -1]]'], {'dtype': 'np.float32'}), '([[-1, 1], [-4, -1]], dtype=np.float32)\n', (342, 381), True, 'import numpy as np\n'), ((446, 490), 'numpy.array', 'np.array', (['[[0, 0], [0, 0]]'], {'dtype': 'np.float32'}), '([[0, 0], [0, 0]], dtype=np.float32)\n', (454, 490), True, 'import numpy as np\n'), ((562, 609), 'numpy.array', 'np.array', (['[[-1, -2], [-4, 0]]'], {'dtype': 'np.float32'}), '([[-1, -2], [-4, 0]], dtype=np.float32)\n', (570, 609), True, 'import numpy as np\n'), ((686, 749), 'numpy.array', 'np.array', (['[[-1e-06, 1e-06], [-4e-06, -1e-06]]'], {'dtype': 'np.float32'}), '([[-1e-06, 1e-06], [-4e-06, -1e-06]], dtype=np.float32)\n', (694, 749), True, 'import numpy as np\n'), ((783, 819), 'sklearn.neighbors._quad_tree._QuadTree', '_QuadTree', ([], {'n_dimensions': '(2)', 'verbose': '(0)'}), '(n_dimensions=2, verbose=0)\n', (792, 819), False, 'from sklearn.neighbors._quad_tree import _QuadTree\n'), ((1135, 1179), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {'dtype': 'np.float32'}), '([[1, 2], [3, 4]], dtype=np.float32)\n', (1143, 1179), True, 'import numpy as np\n'), ((1254, 1306), 'numpy.array', 'np.array', (['[[1.0, 2.0], [1.0, 3.0]]'], {'dtype': 'np.float32'}), '([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32)\n', (1262, 1306), True, 'import numpy as np\n'), ((1390, 1450), 'numpy.array', 'np.array', (['[[1.00001, 2.0], [1.00002, 3.0]]'], {'dtype': 'np.float32'}), '([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32)\n', (1398, 1450), True, 'import numpy as np\n'), ((1525, 1577), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 2.0]]'], {'dtype': 'np.float32'}), '([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32)\n', (1533, 1577), True, 'import numpy as np\n'), ((1661, 1721), 'numpy.array', 'np.array', (['[[1.0, 2.00001], [3.0, 2.00002]]'], {'dtype': 'np.float32'}), '([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32)\n', (1669, 1721), True, 'import numpy as np\n'), ((1808, 1876), 'numpy.array', 'np.array', (['[[1.00001, 2.00001], [1.00002, 2.00002]]'], {'dtype': 'np.float32'}), '([[1.00001, 2.00001], [1.00002, 2.00002]], dtype=np.float32)\n', (1816, 1876), True, 'import numpy as np\n'), ((2021, 2092), 'numpy.array', 'np.array', (['[[1, 0.0003817754041], [2, 0.000381775375]]'], {'dtype': 'np.float32'}), '([[1, 0.0003817754041], [2, 0.000381775375]], dtype=np.float32)\n', (2029, 2092), True, 'import numpy as np\n'), ((2238, 2313), 'numpy.array', 'np.array', (['[[0.0003817754041, 1.0], [0.000381775375, 2.0]]'], {'dtype': 'np.float32'}), '([[0.0003817754041, 1.0], [0.000381775375, 2.0]], dtype=np.float32)\n', (2246, 2313), True, 'import numpy as np\n'), ((2367, 2403), 'sklearn.neighbors._quad_tree._QuadTree', '_QuadTree', ([], {'n_dimensions': '(2)', 'verbose': '(0)'}), '(n_dimensions=2, verbose=0)\n', (2376, 2403), False, 'from sklearn.neighbors._quad_tree import _QuadTree\n'), ((4993, 5020), 'numpy.isclose', 'np.isclose', (['node_dist', 'ds2c'], {}), '(node_dist, ds2c)\n', (5003, 5020), True, 'import numpy as np\n')] |
# Copyright (c) 2017 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, absolute_import
import typing
from functools import partial
import numpy as np
import six
from nnef_tools.conversion.tensorflow import tf_pb_to_tf_py
from nnef_tools.core import utils
from nnef_tools.io.tensorflow.tf_graph import *
from nnef_tools.shape_inference import shape_inference as infer
# noinspection PyProtectedMember
_tf_py_dtype_to_tf_pb_dtype = utils.key_value_swapped(tf_pb_to_tf_py._tf_py_dtype_by_tf_pb_dtype)
def convert(tf_graph):
# type: (TFGraph)->None
for tensor in tf_graph.tensors:
if tensor.is_variable:
if tensor.data.dtype == np.int64:
tensor.data = tensor.data.astype(np.int32)
tensor.dtype = "int32"
if tensor.data.dtype == np.float64:
tensor.data = tensor.data.astype(np.float32)
tensor.dtype = "float32"
for op in list(tf_graph.operations):
if op.name == "tf.nn.softmax":
expand_softmax(tf_graph, op)
for op in list(tf_graph.operations):
assert op.name in _DefaultConverters, "No tf_py_to_tf_pb converter for {}".format(op.name)
_DefaultConverters[op.name](op)
for tensor in tf_graph.tensors:
tensor.dtype = _tf_py_dtype_to_tf_pb_dtype[tensor.dtype]
for op in tf_graph.operations:
if op.name not in ['LogicalAnd', 'LogicalNot', 'LogicalOr']:
if op.name in ['Select', 'Conv2DBackpropInput', 'Conv3DBackpropInputV2']:
op.attribs['T'] = op.inputs[1].dtype
else:
op.attribs['T'] = op.inputs[0].dtype
if op.name == 'MaxPoolWithArgmax':
op.attribs['Targmax'] = 'DT_INT64'
tf_graph.generate_missing_names()
def expand_softmax(tf_graph, tf_op):
assert tf_op.input.rank != 0
axis = tf_op.attribs.get('axis')
if axis is None:
axis = -1
if axis < 0:
axis += tf_op.input.rank
tf_op.attribs['axis'] = -1
if tf_op.input.rank == 2 and axis == 1:
return
if axis != tf_op.input.rank - 1:
perm = utils.without(range(tf_op.input.rank), axis) + [axis]
perm_inv = utils.inverse_permutation(perm)
transpose = TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_op.input,
attribs=dict(perm=perm),
outputs=TFTensor(graph=tf_graph,
name=None,
shape=infer.transpose(input=tf_op.input.shape, axes=perm),
dtype=tf_op.input.dtype))
tf_op.inputs = transpose.output
old_output = tf_op.output
tf_op.outputs = TFTensor(graph=tf_graph,
name=None,
shape=tf_op.input.shape,
dtype=tf_op.input.dtype)
TFOperation(graph=tf_graph,
name="tf.transpose",
inputs=tf_op.output,
attribs=dict(perm=perm_inv),
outputs=old_output)
if tf_op.input.rank != 2:
shape = [-1, tf_op.input.shape[-1]]
reshape = TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=tf_op.input,
attribs=dict(shape=shape),
outputs=TFTensor(graph=tf_graph,
name=None,
shape=infer.reshape(input=tf_op.input.shape, shape=shape),
dtype=tf_op.input.dtype))
tf_op.inputs = reshape.output
old_output = tf_op.output
tf_op.outputs = TFTensor(graph=tf_graph, name=None, shape=list(tf_op.input.shape), dtype=tf_op.input.dtype)
TFOperation(graph=tf_graph,
name="tf.reshape",
inputs=tf_op.output,
attribs=dict(shape=old_output.shape),
outputs=old_output)
def create_constant_tensor(graph, value, np_dtype=None):
if np_dtype is not None:
arr = np.array(value, dtype=np_dtype)
else:
arr = np.array(value)
if arr.dtype == np.float64:
arr = arr.astype(np.float32)
elif arr.dtype == np.int64:
arr = arr.astype(np.int32) # Constants must be int32 at most places
return TFTensor(graph=graph, name=None, shape=list(arr.shape), dtype=str(arr.dtype), data=arr.flatten().tolist())
def generic_converter(op, # type: TFOperation
target_name, # type: str
revert_inputs=False, # type: bool
attrib_name_dict=None, # type: typing.Optional[typing.Dict[str, str]]
attrib_to_input_dict=None, # type: typing.Optional[typing.Dict[str, int]]
attribs_to_remove=None # type: typing.Optional[typing.List[str]]
):
op.name = target_name
if revert_inputs:
op.inputs = tuple(reversed(op.inputs))
if attrib_name_dict:
attribs = {}
for k, v in six.iteritems(op.attribs):
if k in attrib_name_dict:
attribs[attrib_name_dict[k]] = v
else:
attribs[k] = v
op.attribs = attribs
if attrib_to_input_dict:
inputs = list(op.inputs)
attribs_inputs = sorted([(a, i) for a, i in six.iteritems(attrib_to_input_dict)], key=lambda t: t[1])
for attrib_name, input_id in attribs_inputs:
if input_id < 0:
input_id += len(inputs) + 1
inputs.insert(input_id, create_constant_tensor(op.graph, op.attribs[attrib_name]))
del op.attribs[attrib_name]
op.inputs = inputs
if attribs_to_remove:
for attrib_name in attribs_to_remove:
del op.attribs[attrib_name]
def generate_back_converters(converters):
back_converters = {}
for target_name, converter in six.iteritems(converters):
if isinstance(converter, partial) and converter.func == tf_pb_to_tf_py.generic_converter:
from_name = converter.keywords['target_name']
revert_inputs = converter.keywords.get('revert_inputs', False)
attrib_name_dict = utils.key_value_swapped(converter.keywords.get('attrib_name_dict', {}))
attrib_to_input_dict = utils.key_value_swapped(converter.keywords.get('input_to_attrib_dict', {}))
attribs_to_remove = list(six.iterkeys(converter.keywords.get('new_attribs', {})))
back_converters[from_name] = partial(generic_converter,
target_name=target_name,
revert_inputs=revert_inputs,
attrib_name_dict=attrib_name_dict,
attrib_to_input_dict=attrib_to_input_dict,
attribs_to_remove=attribs_to_remove)
return back_converters
def convert_batch_normalization(op):
# type: (TFOperation)->None
op.name = "FusedBatchNorm"
input, mean, variance, offset, scale = op.inputs
is_nhwc = not (mean.rank >= 2 and mean.shape[1] > 1)
def make_1d(tensor):
# type: (TFTensor)->TFTensor
if tensor.rank == 1:
return tensor
return TFOperation(name='Reshape',
graph=tensor.graph,
inputs=(tensor, create_constant_tensor(graph=tensor.graph, value=[tensor.count])),
outputs=TFTensor(graph=tensor.graph, shape=[tensor.count], dtype=tensor.dtype)).output
op.inputs = (input, make_1d(scale), make_1d(offset), make_1d(mean), make_1d(variance))
op.attribs['is_training'] = False
op.attribs['epsilon'] = op.attribs['variance_epsilon']
op.attribs['data_format'] = 'NHWC' if is_nhwc else 'NCHW'
del op.attribs['variance_epsilon']
def convert_flatten(op):
# type: (TFOperation)->None
op.name = "Reshape"
op.inputs = tuple(op.inputs) + (create_constant_tensor(op.graph, list(op.output.shape)),)
def convert_split(op): # TODO what if split has -1, and fix split
# type: (TFOperation)->None
if isinstance(op.attribs['num_or_size_splits'], (list, tuple)):
op.name = 'SplitV'
op.inputs = (op.input,
create_constant_tensor(op.graph, op.attribs['num_or_size_splits'], np_dtype=np.int64),
create_constant_tensor(op.graph, op.attribs['axis']))
op.attribs['num_split'] = len(op.attribs['num_or_size_splits'])
del op.attribs['num_or_size_splits']
del op.attribs['axis']
else:
op.name = 'Split'
op.inputs = (create_constant_tensor(op.graph, op.attribs['axis']), op.input)
op.attribs['num_split'] = op.attribs['num_or_size_splits']
del op.attribs['num_or_size_splits']
del op.attribs['axis']
def postconvert_concat(op):
# type: (TFOperation)->None
op.attribs['N'] = len(op.inputs) - 1
def postconvert_slice(op):
# type: (TFOperation)->None
op.attribs['Index'] = 'DT_INT32'
def converter_sequence(fun1, fun2):
def f(op):
fun1(op)
fun2(op)
return f
# noinspection PyProtectedMember
_DefaultConverters = generate_back_converters(
tf_pb_to_tf_py._DefaultConverters
) # type: typing.Dict[str, typing.Callable[[TFOperation], None]]
_DefaultConverters.update({
"tf.nn.batch_normalization": convert_batch_normalization,
"tf.layers.flatten": convert_flatten,
'tf.concat': converter_sequence(_DefaultConverters['tf.concat'], postconvert_concat),
'tf.nn.depthwise_conv2d': _DefaultConverters['tf.nn.depthwise_conv2d_native'],
'tf.split': convert_split,
'tf.slice': converter_sequence(_DefaultConverters['tf.slice'], postconvert_slice)
})
| [
"nnef_tools.shape_inference.shape_inference.reshape",
"numpy.array",
"nnef_tools.core.utils.inverse_permutation",
"functools.partial",
"nnef_tools.core.utils.key_value_swapped",
"six.iteritems",
"nnef_tools.shape_inference.shape_inference.transpose"
] | [((1003, 1070), 'nnef_tools.core.utils.key_value_swapped', 'utils.key_value_swapped', (['tf_pb_to_tf_py._tf_py_dtype_by_tf_pb_dtype'], {}), '(tf_pb_to_tf_py._tf_py_dtype_by_tf_pb_dtype)\n', (1026, 1070), False, 'from nnef_tools.core import utils\n'), ((6749, 6774), 'six.iteritems', 'six.iteritems', (['converters'], {}), '(converters)\n', (6762, 6774), False, 'import six\n'), ((2754, 2785), 'nnef_tools.core.utils.inverse_permutation', 'utils.inverse_permutation', (['perm'], {}), '(perm)\n', (2779, 2785), False, 'from nnef_tools.core import utils\n'), ((4875, 4906), 'numpy.array', 'np.array', (['value'], {'dtype': 'np_dtype'}), '(value, dtype=np_dtype)\n', (4883, 4906), True, 'import numpy as np\n'), ((4931, 4946), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (4939, 4946), True, 'import numpy as np\n'), ((5880, 5905), 'six.iteritems', 'six.iteritems', (['op.attribs'], {}), '(op.attribs)\n', (5893, 5905), False, 'import six\n'), ((7356, 7561), 'functools.partial', 'partial', (['generic_converter'], {'target_name': 'target_name', 'revert_inputs': 'revert_inputs', 'attrib_name_dict': 'attrib_name_dict', 'attrib_to_input_dict': 'attrib_to_input_dict', 'attribs_to_remove': 'attribs_to_remove'}), '(generic_converter, target_name=target_name, revert_inputs=\n revert_inputs, attrib_name_dict=attrib_name_dict, attrib_to_input_dict=\n attrib_to_input_dict, attribs_to_remove=attribs_to_remove)\n', (7363, 7561), False, 'from functools import partial\n'), ((6187, 6222), 'six.iteritems', 'six.iteritems', (['attrib_to_input_dict'], {}), '(attrib_to_input_dict)\n', (6200, 6222), False, 'import six\n'), ((3176, 3227), 'nnef_tools.shape_inference.shape_inference.transpose', 'infer.transpose', ([], {'input': 'tf_op.input.shape', 'axes': 'perm'}), '(input=tf_op.input.shape, axes=perm)\n', (3191, 3227), True, 'from nnef_tools.shape_inference import shape_inference as infer\n'), ((4245, 4296), 'nnef_tools.shape_inference.shape_inference.reshape', 'infer.reshape', ([], {'input': 'tf_op.input.shape', 'shape': 'shape'}), '(input=tf_op.input.shape, shape=shape)\n', (4258, 4296), True, 'from nnef_tools.shape_inference import shape_inference as infer\n')] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# This script is written to investigate what (if any) mathematical
# relationship exists among the orders that could be used to simplify
# fitting and reduce jitter.
#
# <NAME>
# Created: 2018-05-08
# Last modified: 2018-12-24
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Current version:
__version__ = "0.0.2"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
#import argparse
#import mimetypes
#import linecache
#import getopt
#import shutil
import resource
import signal
#import glob
import gc
import os
import sys
import time
import copy
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
#import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib.ticker as mt
#import matplotlib._pylab_helpers as hlp
#from matplotlib.colors import LogNorm
#from matplotlib import colors
#import matplotlib.colors as mplcolors
#import matplotlib.gridspec as gridspec
#from functools import partial
#from collections import OrderedDict
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
#import PIL.Image as pli
#import seaborn as sns
#import cmocean
import theil_sen as ts
#import window_filter as wf
#import itertools as itt
import nres_extraction
reload(nres_extraction)
trio = nres_extraction.TraceIO()
##--------------------------------------------------------------------------##
## Fast FITS I/O:
#try:
# import fitsio
#except ImportError:
# sys.stderr.write("\nError: fitsio module not found!\n")
# sys.exit(1)
## FITS I/O:
#try:
# import astropy.io.fits as pf
#except ImportError:
# try:
# import pyfits as pf
# except ImportError:
# sys.stderr.write("\nError! No FITS I/O module found!\n"
# "Install either astropy.io.fits or pyfits and try again!\n\n")
# sys.exit(1)
## ASCII I/O:
#try:
# import astropy.io.ascii as aia
#except ImportError:
# sys.stderr.write("\nError: astropy module not found!\n")
# sys.exit(1)
##--------------------------------------------------------------------------##
## Colors for fancy terminal output:
NRED = '\033[0;31m' ; BRED = '\033[1;31m'
NGREEN = '\033[0;32m' ; BGREEN = '\033[1;32m'
NYELLOW = '\033[0;33m' ; BYELLOW = '\033[1;33m'
NBLUE = '\033[0;34m' ; BBLUE = '\033[1;34m'
NMAG = '\033[0;35m' ; BMAG = '\033[1;35m'
NCYAN = '\033[0;36m' ; BCYAN = '\033[1;36m'
NWHITE = '\033[0;37m' ; BWHITE = '\033[1;37m'
ENDC = '\033[0m'
## Suppress colors in cron jobs:
if (os.getenv('FUNCDEF') == '--nocolors'):
NRED = '' ; BRED = ''
NGREEN = '' ; BGREEN = ''
NYELLOW = '' ; BYELLOW = ''
NBLUE = '' ; BBLUE = ''
NMAG = '' ; BMAG = ''
NCYAN = '' ; BCYAN = ''
NWHITE = '' ; BWHITE = ''
ENDC = ''
## Fancy text:
degree_sign = u'\N{DEGREE SIGN}'
## Dividers:
halfdiv = "----------------------------------------"
fulldiv = halfdiv + halfdiv
##--------------------------------------------------------------------------##
## Save FITS image with clobber (astropy / pyfits):
#def qsave(iname, idata, header=None, padkeys=1000, **kwargs):
# this_func = sys._getframe().f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# if header:
# while (len(header) < padkeys):
# header.append() # pad header
# if os.path.isfile(iname):
# os.remove(iname)
# pf.writeto(iname, idata, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
## Save FITS image with clobber (fitsio):
#def qsave(iname, idata, header=None, **kwargs):
# this_func = sys._getframe().f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# #if os.path.isfile(iname):
# # os.remove(iname)
# fitsio.write(iname, idata, clobber=True, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
def ldmap(things):
return dict(zip(things, range(len(things))))
def argnear(vec, val):
return (np.abs(vec - val)).argmin()
## Robust location/scale estimate using median/MAD:
def calc_ls_med_MAD(a, axis=None):
"""Return median and median absolute deviation of *a* (scaled to normal)."""
med_val = np.median(a, axis=axis)
sig_hat = (1.482602218 * np.median(np.abs(a - med_val), axis=axis))
return (med_val, sig_hat)
## Robust location/scale estimate using median/IQR:
def calc_ls_med_IQR(a, axis=None):
"""Return median and inter-quartile range of *a* (scaled to normal)."""
pctiles = np.percentile(a, [25, 50, 75], axis=axis)
med_val = pctiles[1]
sig_hat = (0.741301109 * (pctiles[2] - pctiles[0]))
return (med_val, sig_hat)
## Select inliners given specified sigma threshold:
def pick_inliers(data, sig_thresh):
med, sig = calc_ls_med_IQR(data)
return ((np.abs(data - med) / sig) <= sig_thresh)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Get trace file from command line:
if (len(sys.argv) != 2):
sys.stderr.write("Syntax: %s trace_file.fits\n"
% os.path.basename(__FILE__))
sys.exit(1)
trace_file = sys.argv[1]
if not os.path.isfile(trace_file):
sys.stderr.write("File not found: %s\n" % trace_file)
sys.exit(1)
##--------------------------------------------------------------------------##
## Load traces for analysis:
trdata = trio.load_traces(trace_file)
traces = trdata.get_trace_list()
pars = np.array([x['params'].tolist() for x in traces])
c0, c1, c2 = pars.T
fiber1 = slice(0, None, 2)
fiber2 = slice(1, None, 2)
##--------------------------------------------------------------------------##
## Theil-Sen line-fitting:
#model = ts.linefit(xvals, yvals)
#icept, slope = ts.linefit(xvals, yvals)
icept2, slope2 = ts.linefit(c0, np.sqrt(c0) * c1)
line2 = icept2 + c0 * slope2
icept3, slope3 = ts.linefit(c0, c0 * c2)
line3 = icept3 + c0 * slope3
f1_icept3, f1_slope3 = ts.linefit(c0[fiber1], (c0 * c2)[fiber1])
f2_icept3, f2_slope3 = ts.linefit(c0[fiber2], (c0 * c2)[fiber2])
f1_line3 = f1_icept3 + c0[fiber1] * f1_slope3
f2_line3 = f2_icept3 + c0[fiber2] * f2_slope3
f1_resid3 = f1_line3 - (c0 * c2)[fiber1]
f2_resid3 = f2_line3 - (c0 * c2)[fiber2]
##--------------------------------------------------------------------------##
## Force c2 onto best-fit line:
linearized_pars = np.copy(pars)
linearized_pars[fiber1, 2] = f1_line3 / c0[fiber1]
linearized_pars[fiber2, 2] = f2_line3 / c0[fiber2]
linearized_data = copy.deepcopy(data)
for tfit,ldata in zip(linearized_data, linearized_pars):
tfit['params'] = ldata
mod_save = "modified_trace.fits"
sys.stderr.write("Saving adjusted trace file '%s' ... " % mod_save)
trio.store_traces(mod_save, linearized_data)
sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
## Misc:
#def log_10_product(x, pos):
# """The two args are the value and tick position.
# Label ticks with the product of the exponentiation."""
# return '%.2f' % (x) # floating-point
#
#formatter = plt.FuncFormatter(log_10_product) # wrap function for use
## Convenient, percentile-based plot limits:
#def nice_limits(vec, pctiles=[1,99], pad=1.2):
# ends = np.percentile(vec, pctiles)
# middle = np.average(ends)
# return (middle + pad * (ends - middle))
## Convenient plot limits for datetime/astropy.Time content:
#def nice_time_limits(tvec, buffer=0.05):
# lower = tvec.min()
# upper = tvec.max()
# ndays = upper - lower
# return ((lower - 0.05*ndays).datetime, (upper + 0.05*ndays).datetime)
## Convenient limits for datetime objects:
#def dt_limits(vec, pad=0.1):
# tstart, tstop = vec.min(), vec.max()
# trange = (tstop - tstart).total_seconds()
# tpad = dt.timedelta(seconds=pad*trange)
# return (tstart - tpad, tstop + tpad)
##--------------------------------------------------------------------------##
## Plot config:
# gridspec examples:
# https://matplotlib.org/users/gridspec.html
#gs1 = gridspec.GridSpec(4, 4)
#gs1.update(wspace=0.025, hspace=0.05) # set axis spacing
##--------------------------------------------------------------------------##
fig_dims = (12, 10)
fig = plt.figure(1, figsize=fig_dims)
plt.gcf().clf()
#fig, axs = plt.subplots(3, 2, sharex=True, figsize=fig_dims, num=1)
fig, axs = plt.subplots(3, 3, sharex='col', figsize=fig_dims, num=1)
# sharex='col' | sharex='row'
#fig.frameon = False # disable figure frame drawing
#fig.subplots_adjust(left=0.07, right=0.95)
#ax1 = plt.subplot(gs[0, 0])
#ax1 = fig.add_subplot(111)
#ax1 = fig.add_axes([0, 0, 1, 1])
#ax1.patch.set_facecolor((0.8, 0.8, 0.8))
#ax1.grid(True)
#ax1.axis('off')
pkwargs = {'ls':'', 'ms':2, 'marker':'.'}
ypos = pars[:, 0]
yrng = np.arange(ypos.size)
for i in range(3):
#ax = axs[i, 0]
ppar0 = np.copy(pars)
ppar1 = pars * np.sqrt(ypos[:, None])
ppar2 = pars * ypos[:, None]
#rpar = pars * np.sqrt(ypos[:, None])
#axs[i, 0].plot(yrng[fiber1], ppar0[fiber1, i], **pkwargs)
#axs[i, 0].plot(yrng[fiber2], ppar0[fiber2, i], **pkwargs)
axs[i, 0].plot(ypos[fiber1], ppar0[fiber1, i], label='p0', **pkwargs)
axs[i, 0].plot(ypos[fiber2], ppar0[fiber2, i], **pkwargs)
axs[i, 1].plot(ypos[fiber1], ppar1[fiber1, i], **pkwargs)
axs[i, 1].plot(ypos[fiber2], ppar1[fiber2, i], **pkwargs)
axs[i, 2].plot(ypos[fiber1], ppar2[fiber1, i], **pkwargs)
axs[i, 2].plot(ypos[fiber2], ppar2[fiber2, i], **pkwargs)
axs[1, 1].plot(ypos, line2, c='r', lw=0.5)
axs[2, 2].plot(ypos, line3, c='r', lw=0.5)
## Disable axis offsets:
#ax1.xaxis.get_major_formatter().set_useOffset(False)
#ax1.yaxis.get_major_formatter().set_useOffset(False)
#ax1.plot(kde_pnts, kde_vals)
#blurb = "some text"
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes)
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes,
# va='top', ha='left', bbox=dict(facecolor='white', pad=10.0))
# fontdict={'family':'monospace'}) # fixed-width
#colors = cm.rainbow(np.linspace(0, 1, len(plot_list)))
#for camid, c in zip(plot_list, colors):
# cam_data = subsets[camid]
# xvalue = cam_data['CCDATEMP']
# yvalue = cam_data['PIX_MED']
# yvalue = cam_data['IMEAN']
# ax1.scatter(xvalue, yvalue, color=c, lw=0, label=camid)
#mtickpos = [2,5,7]
#ndecades = 1.0 # for symlog, set width of linear portion in units of dex
#nonposx='mask' | nonposx='clip' | nonposy='mask' | nonposy='clip'
#ax1.set_xscale('log', basex=10, nonposx='mask', subsx=mtickpos)
#ax1.set_xscale('log', nonposx='clip', subsx=[3])
#ax1.set_yscale('symlog', basey=10, linthreshy=0.1, linscaley=ndecades)
#ax1.xaxis.set_major_formatter(formatter) # re-format x ticks
#ax1.set_ylim(ax1.get_ylim()[::-1])
#ax1.set_xlabel('whatever', labelpad=30) # push X label down
#ax1.set_xticks([1.0, 3.0, 10.0, 30.0, 100.0])
#ax1.set_xticks([1, 2, 3], ['Jan', 'Feb', 'Mar'])
#for label in ax1.get_xticklabels():
# label.set_rotation(30)
#ax1.set_xlim(nice_limits(xvec, pctiles=[1,99], pad=1.2))
#ax1.set_ylim(nice_limits(yvec, pctiles=[1,99], pad=1.2))
#spts = ax1.scatter(x, y, lw=0, s=5)
#cbar = fig.colorbar(spts, orientation='vertical')
#cbar.formatter.set_useOffset(False)
#cbar.update_ticks()
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
# cyclical colormap ... cmocean.cm.phase
# cmocean: https://matplotlib.org/cmocean/
######################################################################
# CHANGELOG (plot_traces.py):
#---------------------------------------------------------------------
#
# 2018-05-08:
# -- Increased __version__ to 0.0.1.
# -- First created plot_traces.py.
#
| [
"numpy.sqrt",
"theil_sen.linefit",
"sys.exit",
"copy.deepcopy",
"nres_extraction.TraceIO",
"numpy.arange",
"numpy.abs",
"matplotlib.pyplot.gcf",
"imp.reload",
"os.path.isfile",
"sys.stderr.write",
"matplotlib.pyplot.draw",
"numpy.copy",
"numpy.median",
"os.getenv",
"matplotlib.pyplot.f... | [((2113, 2136), 'imp.reload', 'reload', (['nres_extraction'], {}), '(nres_extraction)\n', (2119, 2136), False, 'from imp import reload\n'), ((2144, 2169), 'nres_extraction.TraceIO', 'nres_extraction.TraceIO', ([], {}), '()\n', (2167, 2169), False, 'import nres_extraction\n'), ((6979, 7002), 'theil_sen.linefit', 'ts.linefit', (['c0', '(c0 * c2)'], {}), '(c0, c0 * c2)\n', (6989, 7002), True, 'import theil_sen as ts\n'), ((7056, 7097), 'theil_sen.linefit', 'ts.linefit', (['c0[fiber1]', '(c0 * c2)[fiber1]'], {}), '(c0[fiber1], (c0 * c2)[fiber1])\n', (7066, 7097), True, 'import theil_sen as ts\n'), ((7121, 7162), 'theil_sen.linefit', 'ts.linefit', (['c0[fiber2]', '(c0 * c2)[fiber2]'], {}), '(c0[fiber2], (c0 * c2)[fiber2])\n', (7131, 7162), True, 'import theil_sen as ts\n'), ((7469, 7482), 'numpy.copy', 'np.copy', (['pars'], {}), '(pars)\n', (7476, 7482), True, 'import numpy as np\n'), ((7604, 7623), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (7617, 7623), False, 'import copy\n'), ((7742, 7809), 'sys.stderr.write', 'sys.stderr.write', (['("Saving adjusted trace file \'%s\' ... " % mod_save)'], {}), '("Saving adjusted trace file \'%s\' ... " % mod_save)\n', (7758, 7809), False, 'import sys\n'), ((7855, 7882), 'sys.stderr.write', 'sys.stderr.write', (['"""done.\n"""'], {}), "('done.\\n')\n", (7871, 7882), False, 'import sys\n'), ((9302, 9333), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'fig_dims'}), '(1, figsize=fig_dims)\n', (9312, 9333), True, 'import matplotlib.pyplot as plt\n'), ((9430, 9487), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'sharex': '"""col"""', 'figsize': 'fig_dims', 'num': '(1)'}), "(3, 3, sharex='col', figsize=fig_dims, num=1)\n", (9442, 9487), True, 'import matplotlib.pyplot as plt\n'), ((9848, 9868), 'numpy.arange', 'np.arange', (['ypos.size'], {}), '(ypos.size)\n', (9857, 9868), True, 'import numpy as np\n'), ((12357, 12367), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (12365, 12367), True, 'import matplotlib.pyplot as plt\n'), ((3386, 3406), 'os.getenv', 'os.getenv', (['"""FUNCDEF"""'], {}), "('FUNCDEF')\n", (3395, 3406), False, 'import os\n'), ((5202, 5225), 'numpy.median', 'np.median', (['a'], {'axis': 'axis'}), '(a, axis=axis)\n', (5211, 5225), True, 'import numpy as np\n'), ((5506, 5547), 'numpy.percentile', 'np.percentile', (['a', '[25, 50, 75]'], {'axis': 'axis'}), '(a, [25, 50, 75], axis=axis)\n', (5519, 5547), True, 'import numpy as np\n'), ((6240, 6251), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6248, 6251), False, 'import sys\n'), ((6285, 6311), 'os.path.isfile', 'os.path.isfile', (['trace_file'], {}), '(trace_file)\n', (6299, 6311), False, 'import os\n'), ((6317, 6370), 'sys.stderr.write', 'sys.stderr.write', (["('File not found: %s\\n' % trace_file)"], {}), "('File not found: %s\\n' % trace_file)\n", (6333, 6370), False, 'import sys\n'), ((6375, 6386), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6383, 6386), False, 'import sys\n'), ((9920, 9933), 'numpy.copy', 'np.copy', (['pars'], {}), '(pars)\n', (9927, 9933), True, 'import numpy as np\n'), ((6914, 6925), 'numpy.sqrt', 'np.sqrt', (['c0'], {}), '(c0)\n', (6921, 6925), True, 'import numpy as np\n'), ((9334, 9343), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9341, 9343), True, 'import matplotlib.pyplot as plt\n'), ((9953, 9975), 'numpy.sqrt', 'np.sqrt', (['ypos[:, None]'], {}), '(ypos[:, None])\n', (9960, 9975), True, 'import numpy as np\n'), ((4991, 5008), 'numpy.abs', 'np.abs', (['(vec - val)'], {}), '(vec - val)\n', (4997, 5008), True, 'import numpy as np\n'), ((5265, 5284), 'numpy.abs', 'np.abs', (['(a - med_val)'], {}), '(a - med_val)\n', (5271, 5284), True, 'import numpy as np\n'), ((5798, 5816), 'numpy.abs', 'np.abs', (['(data - med)'], {}), '(data - med)\n', (5804, 5816), True, 'import numpy as np\n'), ((6208, 6234), 'os.path.basename', 'os.path.basename', (['__FILE__'], {}), '(__FILE__)\n', (6224, 6234), False, 'import os\n')] |
"""
This is a wrapper for executing various parallel map routines in a consistent way.
The primary routines are:
work_orders(): Calls a function with args or kwargs.
def _do_work(i, foo=None):
print(i, foo)
results = zap.work_orders([
dict(fn=_do_work, args=(i,), foo=i*10)
for i in range(10)
])
arrays():
def _do_work_on_an_arrays(a, b, foo=None):
# a and b are np.ndarrays
print(a, b, foo)
array0 = np.zeros((10, 100))
array1 = np.zeros((10, 1000))
results = zap.arrays(_do_work_on_an_arrays, dict(a=array0, b=array1), foo=1)
df_rows(): Split along Dataframe rows
def _do_work_on_df_row(row, foo=None):
# row is a row of a dataframe
print(row, foo)
df = pd.DataFrame(dict(a=[1,2], b=[3,4]))
results = zap.df_rows(_do_work_on_df_row, df, foo=1)
df_groups(): Split along Dataframe groups
def _do_work_on_df_group(group, foo=None):
# group is a pandas groupby object
print(group, foo)
df = pd.DataFrame(dict(a=[1,1,2,2], b=[3,4,4,5]))
results = zap.df_groups(_do_work_on_df_group, df.groupby("a"), foo=1)
Contexts
A zap context establishes how parallelism will be allowed:
# Exmaple, run _do_work in, at most, 5 sub-processes
with zap.Context(cpu_limit=5, mode="process", progress=progress):
results = zap.work_orders([
dict(fn=_do_work, args=(i,), foo=i*10)
for i in range(10)
])
Debugging run-away processes.
Sometimes you can get into a situation where processes seem
to be stranded and are still running after a ^C.
This is complicated by running docker under OSX.
Docker under OSX is actually running under a Linux VM called
"com.docker.hyperkit". The OSX pid of that process
has nothing to do with the pid of the processes that are
running inside the VM and the pids running insider the
container (inside the VM).
You can drop into the hyperkit VM with the following command
from the an OSX shell using Erisyon's "p" helper.
$ OSX_VM=1 ./p
Once in there you can "top" or "htop" and see what processes are
running. Let's say that you see that pid 5517 taking 100% cpu.
You can then find the pid INSIDE the container with this:
$ cat /proc/5517/status | grep NSpid
> NSpid: 5517 832
The second number of which is the pid INSIDE the container (832).
"""
import gc
import os
import random
import signal
import sys
import time
import traceback
from concurrent.futures import (
ProcessPoolExecutor,
ThreadPoolExecutor,
as_completed,
thread,
)
from concurrent.futures.process import BrokenProcessPool
from contextlib import contextmanager
from multiprocessing import cpu_count
from plaster.tools.zlog.zlog import spy
import numpy as np
import pandas as pd
import psutil
from munch import Munch
import logging
log = logging.getLogger(__name__)
from plaster.tools.utils import utils
_context_depth = 0
_cpu_limit = None
_mode = "process"
_progress = None
_allow_inner_parallelism = False
_trap_exceptions = True
_thread_name_prefix = "zap_"
_zap_verbose = False
if os.environ.get("ZAP_DEBUG_MODE") == "True":
_mode = "debug"
@contextmanager
def Context(
cpu_limit=None,
mode=None,
progress=None,
allow_inner_parallelism=False,
trap_exceptions=True,
thread_name_prefix=None,
mem_per_workorder=None,
verbose=None,
_force_mode=False, # Used for testing purposes
):
"""
Arguments:
cpu_limit: int (default None==all). Maximum number of CPUs to use
None: all
positive numbers: that many cpus
negative numbers: all cpus except this many. eg: -2 = all cpus less two
default: all
mode: str. (Default "process")
"process": Run in sub-processes
"thread": Run as threads
"debug" : Run the work orders serially and blocking (ie no threads or processes)
This is useful both for debugging and to prevent inner contexts from
parallelizing. See allow_inner_parallelism.
allow_inner_parallelism: bool (default False)
If True, allow inner contexts to parallelize normally.
Usually this is a bad idea as it can lead to serious contention
wherein a group or parallel work order each tries to allocate
all cpus for themselves and causes CPU and/or Memory contention.
progress: function pointer
If non None will callback with args (work_order_i, n_total_work_orders, retry)
trap_exceptions: bool (default True)
If true, exceptions are trapped and returned as a result.
When false, any worker execption bubbles up immediately to
the caller and other workers will die when they die.
The default is True because there's nothing more annoying than
running a long-running parallel job only to find that
after hours of execution there was one rare exception stopped
the whole run!
thread_name_prefix: str (default "zap_")
Set the thread names for easier debugging
mem_per_workorder: int
If not None, use this to estimate the cpu_limit
verbose: bool
If True, emit debugging traces
"""
global _cpu_limit, _mode, _progress, _allow_inner_parallelism, _context_depth, _trap_exceptions, _thread_name_prefix, _zap_verbose
_context_depth += 1
orig_cpu_limit = _cpu_limit
orig_mode = _mode
orig_progress = _progress
orig_allow_inner_parallelism = _allow_inner_parallelism
orig_trap_exceptions = _trap_exceptions
orig_thread_name_prefix = _thread_name_prefix
orig_zap_verbose = _zap_verbose
if _context_depth > 1 and not _allow_inner_parallelism:
# In a nested context if inner_parallelism is not allowed then
# the zaps are kicked into mode = "debug" meaning that
# work_orders will execute serially in the current thread & process.
mode = "debug"
if cpu_limit is not None:
_cpu_limit = cpu_limit
elif mem_per_workorder is not None:
gb = 2 ** 30
vmm = psutil.virtual_memory().total - (8 * gb) # Reserve 8 GB for other factors
_cpu_limit = max(1, min(vmm // mem_per_workorder, _cpu_count()))
if mode is not None and _mode != "debug":
# debug mode will not allow any inner contexts to be anything other than debug
_mode = mode
if _force_mode:
_mode = mode
_progress = progress
_allow_inner_parallelism = allow_inner_parallelism
_trap_exceptions = trap_exceptions
_thread_name_prefix = thread_name_prefix
if verbose is not None:
_zap_verbose = verbose
try:
yield
finally:
_cpu_limit = orig_cpu_limit
_mode = orig_mode
_progress = orig_progress
_allow_inner_parallelism = orig_allow_inner_parallelism
_trap_exceptions = orig_trap_exceptions
_thread_name_prefix = orig_thread_name_prefix
_zap_verbose = orig_zap_verbose
_context_depth -= 1
def _cpu_count():
"""mock-point"""
return cpu_count()
def _show_work_order_exception(e):
"""Mock-point"""
log.exception(f"Exception raised by a work order {e.work_order}")
def _mock_BrokenProcessPool_exception():
"""mock_point"""
pass
def _set_zap(**kwargs):
"""
Creates a global variable with the zap information to bypass
the serialization that multiprocessing would otherwise do.
"""
zap_id = int(time.time() * 1000000)
zap = Munch(id=zap_id, **kwargs)
globals()[f"__zap_{zap_id}"] = zap
return zap
def _get_zap(zap_id):
"""
Fetches zap data from global. See _set_zap.
"""
return globals()[f"__zap_{zap_id}"]
def _del_zap(zap_id):
del globals()[f"__zap_{zap_id}"]
def _run_work_order_fn(zap_id, work_order_i):
"""
Wrap the function to handle args, kwargs, capture exceptions, and re-seed RNG.
Note: This may run in the sub-process or thread and therefore should not use stdio.
"""
start_time = time.time()
try:
work_order = _get_zap(zap_id).work_orders[work_order_i]
# RE-INITIALIZE the random seed because numpy resets the seed in sub-processes.
np.random.seed(seed=int(time.time() * 100_000) % int(2 ** 32))
random.seed()
args = work_order.pop("args", ())
fn = work_order.pop("fn")
assert callable(fn)
result = fn(*args, **work_order)
# GARBAGE collect.
# A future improvement might be allow this to be configurable
gc.collect()
except Exception as e:
formatted = traceback.format_exception(
etype=type(e), value=e, tb=e.__traceback__
)
result = e
result.exception_lines = formatted
return result, time.time() - start_time
def _call_progress(zap, i, retry=False):
if zap.progress is not None:
try:
zap.progress(i + 1, zap.n_work_orders, retry)
except Exception as e:
log.exception(e, "Warning: progress function exceptioned; ignoring.")
def _dump_exception(result):
"""Mock-point"""
exception_name = f"{result.__class__.__name__}({result})"
log.error(
f"Work order generated un-trapped exception: '{exception_name}'. exception_lines were: {result.exception_lines}"
)
print(f"Work order generated un-trapped exception: '{exception_name}'.")
print("".join(result.exception_lines))
def _examine_result(zap, result, work_order):
if isinstance(result, Exception):
result.work_order = work_order
if not zap.trap_exceptions:
_dump_exception(result)
raise result
return result
def _warn_about_retries(n_retries, zap):
"""Mock-point"""
log.warning(
f"There were {n_retries} processes killed in a zap (id={zap.id} fn_name={zap.fn_name})."
f"This was likely caused by running out of memory. The zap executor will "
f"now try to re-run each zap serially to reduce memory pressure but this may be very slow.\n"
f"If you are running under docker or a VM you might consider trying to raise memory limits."
)
def _do_zap_with_executor(executor, zap):
"""
Execute work_orders through a thread or process pool executor
"""
retry_iz = []
wo_i_by_future = {}
for i, work_order in enumerate(zap.work_orders):
# Important: the executor submit must not be passed
# the actual work_order to bypass serialization.
future = executor.submit(_run_work_order_fn, zap.id, i)
wo_i_by_future[future] = i
results = [None] * zap.n_work_orders
timings = [None] * zap.n_work_orders
n_done = 0
for future in as_completed(wo_i_by_future):
i = wo_i_by_future[future]
work_order = zap.work_orders[i]
try:
result, duration = future.result()
_mock_BrokenProcessPool_exception() # Used for testing
_call_progress(zap, n_done)
n_done += 1
results[i] = _examine_result(zap, result, work_order)
timings[i] = duration
except BrokenProcessPool as e:
# This can happen if the child process(es) run out
# of memory. In that case, we need to retry those
# work_orders.
retry_iz += [i]
n_retries = len(retry_iz)
if n_retries > 0:
_warn_about_retries(n_retries, zap)
for i in retry_iz:
# These retries are likely a result of running out memory
# and we don't know how many of those processes we can support
# so the only safe thing to do is to run them one at a time.
# If this becomes a constant issue then we could try some
# sort of exponential back-off on number of concurrent processes.
# Sometimes this can create a 'queue.Full' exception in the babysitter
# threads that are not handled gracefully by Python.
# See:
# https://bugs.python.org/issue8426
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue
# https://github.com/python/cpython/pull/3895
try:
_call_progress(zap, i, retry=True)
result, duration = _run_work_order_fn(zap.id, i)
results[i] = _examine_result(zap, result, zap.work_orders[i])
timings[i] = duration
except Exception as e:
results[i] = e
timings[i] = None
return results, timings
def _do_work_orders_process_mode(zap):
if sys.platform != "linux":
raise Exception(
"Process mode zap is not working under non-linux at moment sue to differences in globals() under non-linux."
)
with ProcessPoolExecutor(max_workers=zap.max_workers) as executor:
try:
return _do_zap_with_executor(executor, zap)
except KeyboardInterrupt:
# If I do not os.kill the processes then it seems
# that will gracefully send kill signals and wait
# for the children. I typically want to just abandon
# anything that the process is doing and have it end instantly.
# Thus, I just reach in to get the child pids and kill -9 them.
for k, v in executor._processes.items():
try:
os.kill(v.pid, signal.SIGKILL)
except ProcessLookupError:
log.info(f"{v.pid} had already died")
raise
def _do_work_orders_thread_mode(zap):
with ThreadPoolExecutor(
max_workers=zap.max_workers, thread_name_prefix=zap.thread_name_prefix
) as executor:
try:
return _do_zap_with_executor(executor, zap)
except BaseException as e:
# Any sort of exception needs to clear all threads.
# Note that KeyboardInterrupt inherits from BaseException not
# Exception so using BaseException to include KeyboardInterrupts
# Unlike above with os.kill(), the thread clears are not so destructive,
# so we want to call them in any situation in which we're bubbling up the
# exception.
executor._threads.clear()
thread._threads_queues.clear()
raise e
def _do_work_orders_debug_mode(zap):
"""
debug_mode skips all multi-processing so that console-based debuggers are happy
"""
results = [None] * zap.n_work_orders
timings = [None] * zap.n_work_orders
for i, work_order in enumerate(zap.work_orders):
result, duration = _run_work_order_fn(zap.id, i)
results[i] = _examine_result(zap, result, work_order)
timings[i] = duration
_call_progress(zap, i)
return results, timings
def get_cpu_limit(_cpu_limit=None):
if _cpu_limit is None:
_cpu_limit = _cpu_count()
if _cpu_limit < 0:
_cpu_limit = _cpu_count() + _cpu_limit # eg: 4 cpu + (-1) is 3
assert _cpu_limit > 0
return _cpu_limit
def work_orders(_work_orders, _return_timings=False, _fn_name=None):
"""
Runs work_orders in parallel.
work_orders: List[Dict]
Each work_order should have a "fn" element that points to the fn to run
If the work_order has an "args" element those will be passed as *args
all other elements of the work_order will be passed as **kwargs
_return_timings:
If True, then returns a tuple of results, timings
otherwise just returns results
"""
if _fn_name is None:
try:
_fn_name = _work_orders[0]["fn"].__name__
except:
_fn_name = "Unknown"
pass
zap = _set_zap(
work_orders=_work_orders,
n_work_orders=len(_work_orders),
progress=_progress,
thread_name_prefix=_thread_name_prefix,
trap_exceptions=_trap_exceptions,
max_workers=get_cpu_limit(_cpu_limit),
fn_name=_fn_name,
)
if _zap_verbose:
log.info(
(
f"\nStarting zap.id {zap.id & 0xFFFF:4x} on worker function '{_fn_name}'' "
f"with {zap.n_work_orders} work_orders in mode '{_mode}'"
)
+ (f" (using up to {zap.max_workers} workers)." if _mode != "debug" else "")
)
if _mode not in ("debug", "process", "thread"):
raise ValueError(f"Unknown zap mode '{_mode}'")
results, timings = None, None
try:
if _mode == "debug":
# debug_mode takes precedence; ie over-rides any multi-processing
results, timings = _do_work_orders_debug_mode(zap)
elif _mode == "process":
results, timings = _do_work_orders_process_mode(zap)
elif _mode == "thread":
results, timings = _do_work_orders_thread_mode(zap)
except Exception as e:
if hasattr(e, "exception_lines"):
_show_work_order_exception(e)
raise e
finally:
if _zap_verbose:
log.info(f"\nDone zap.id {zap.id & 0xFFFF:4x} {_fn_name}.")
_del_zap(zap.id)
if _return_timings:
return results, timings
return results
def make_batch_slices(n_rows: int, _batch_size=None, _limit_slice=None):
assert isinstance(n_rows, int)
if _limit_slice is None:
_limit_slice = slice(0, n_rows, 1)
if isinstance(_limit_slice, int):
_limit_slice = slice(0, _limit_slice, 1)
_limit_slice = [_limit_slice.start, _limit_slice.stop, _limit_slice.step]
if _limit_slice[2] is None:
_limit_slice[2] = 1
if _limit_slice[1] is None:
_limit_slice[1] = n_rows
assert _limit_slice[2] == 1 # Until I have time to think this through
n_rows = _limit_slice[1] - _limit_slice[0]
if n_rows == 0:
return []
if _batch_size is None:
# If not specified, base it on the number of cpus.
# Note, if n_batches is only as big as the _cpu_count then there won't
# be any output on the progress bar until it is done so it is scaled
# by eight here to ensure the progress bar will at least move 8 times.
n_batches = min(n_rows, 8 * _cpu_count())
batch_size = max(1, (n_rows // n_batches) + 1)
else:
batch_size = _batch_size
n_batches = max(
1, (n_rows // batch_size) + (0 if n_rows % batch_size == 0 else 1)
)
if batch_size <= 0:
raise ValueError(f"illegal batch_size {batch_size}")
assert batch_size * n_batches >= n_rows
batch_slices = []
for batch_i in range(n_batches):
start = _limit_slice[0] + batch_i * batch_size
stop = _limit_slice[0] + min((batch_i + 1) * batch_size, n_rows)
if stop > start:
batch_slices += [(start, stop)]
return batch_slices
def _run_arrays(inner_fn, slice, arrays_dict, **kwargs):
"""
Assumes that the lengths of the value arrays are all the same.
"""
# SETUP the re-usable kwargs with parameters and arrays and then poke values one row at a time
res = []
for row_i in range(slice[0], slice[1]):
for field_i, (key, array) in enumerate(arrays_dict.items()):
kwargs[key] = array[row_i]
val = inner_fn(**kwargs)
if isinstance(val, tuple):
res += [val]
else:
res += [(val,)]
return res
def arrays(
fn, arrays_dict, _batch_size=None, _stack=False, _limit_slice=None, **kwargs,
):
"""
Split an array by its first dimension and send each row to fn.
The array_dict is one or more parallel arrays that will
be passed to fn(). **kwargs will end up as (constant) kwargs
to fn().
Example:
def myfn(a, b, c):
return a + b + c
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
res = zap.arrays(
myfn,
dict(a=a, b=b),
c=1
)
# This will call:
# myfn(1, 4, 1)
# myfn(2, 5, 1)
# myfn(3, 6, 1)
# and res == [1+4+1, 2+5+1, 3+6+1]
These calls are batched into parallel processes (or _process_mode is False)
where the _batch_size is set or if None it will be chosen to use all cpus.
When fn returns a tuple of fields, these return fields
will be maintained.
Example:
def myfn(a, b, c):
return a, b+c
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
res = zap.arrays(
myfn,
dict(a=a, b=b),
c=1
)
# This will call as before but now:
# res == ([1, 2, 3], [4+1, 5+1, 6+1])
If _stack is True then _each return field_ will be wrapped
with a np.array() before it is returned. If _stack is a list
then you can selective wrap the np.array only to the return
fields of your choice.
Example:
def myfn(a, b, c):
return a, b+c
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
res = zap.arrays(
myfn,
dict(a=a, b=b),
c=1,
_stack=True
)
# This will call as before but now:
# res == (np.array([1, 2, 3]), np.array([4+1, 5+1, 6+1]))
# Of called with _stack=[True, False]
# res == (np.array([1, 2, 3]), [4+1, 5+1, 6+1])
"""
n_rows = len(list(arrays_dict.values())[0])
assert all([len(a) == n_rows for a in arrays_dict.values()])
batch_slices = make_batch_slices(n_rows, _batch_size, _limit_slice)
result_batches = work_orders(
_work_orders=[
Munch(
fn=_run_arrays,
inner_fn=fn,
slice=batch_slice,
arrays_dict=arrays_dict,
**kwargs,
)
for batch_slice in batch_slices
],
_fn_name=fn.__name__,
)
if len(result_batches) == 0:
raise ValueError("No batches were returned")
first_batch = result_batches[0]
if isinstance(first_batch, Exception):
raise first_batch
if len(first_batch) == 0:
raise ValueError("First batch had no elements")
first_return = first_batch[0]
if isinstance(first_return, Exception):
raise first_return
assert isinstance(first_return, tuple)
n_fields = len(first_return)
unbatched = []
for field_i in range(n_fields):
field_rows = []
for batch in result_batches:
field_rows += utils.listi(batch, field_i)
unbatched += [field_rows]
if _stack is not None:
if isinstance(_stack, bool):
_stack = [_stack] * n_fields
if isinstance(_stack, (list, tuple)):
assert all([isinstance(s, bool) for s in _stack])
assert len(_stack) == n_fields
# If requested, wrap the return field in np.array()
for field_i in range(n_fields):
if _stack[field_i]:
unbatched[field_i] = np.array(unbatched[field_i])
if n_fields == 1:
return unbatched[0]
else:
return tuple(unbatched)
def _run_df_rows(inner_fn, slice, df, **kwargs):
"""
Assumes that the lengths of the value arrays are all the same.
"""
# SETUP the re-usable kwargs with parameters and arrays and then poke values one row at a time
res = []
for row_i in range(slice[0], slice[1]):
args = (df.iloc[row_i : row_i + 1],)
val = inner_fn(*args, **kwargs)
res += [val]
return res
def df_rows(
fn, df, _batch_size=None, _limit_slice=None, **kwargs,
):
"""
Split a dataframe along its rows. I do not want to actually
split it because I want to minimize what is serialized.
"""
n_rows = len(df)
batch_slices = make_batch_slices(n_rows, _batch_size, _limit_slice)
result_batches = work_orders(
_work_orders=[
Munch(fn=_run_df_rows, inner_fn=fn, slice=batch_slice, df=df, **kwargs,)
for batch_slice in batch_slices
],
_fn_name=fn.__name__,
)
unbatched = []
for batch in result_batches:
for ret in batch:
if not isinstance(ret, pd.DataFrame):
raise TypeError(
"return values from the fn of df_rows must be DataFrames"
)
unbatched += [ret]
return pd.concat(unbatched).reset_index(drop=True)
def df_groups(fn, df_group, **kwargs):
"""
Run function on each group of groupby
There is a lot of complexity to the way that groupby handles return
values from the functions so I use the apply to accumulate the
work orders and then use apply again to return the results and
let the apply whatever magic it wants to to reformat the result
"""
def _do_get_calls(group, **kwargs):
return Munch(args=(group.copy(),), _index=tuple(group.index.values), **kwargs)
# 3/15/2021 DHW: GroupBy.apply is doing something that makes the result of the above group.copy() get mangled when passed to zap workers.
# _work_orders = df_group.apply(_do_get_calls)
_work_orders = [_do_get_calls(group) for i, group in df_group]
wo_kwargs = {}
non_wo_kwargs = {}
for k, v in kwargs.items():
if k.startswith("_"):
non_wo_kwargs[k] = v
else:
wo_kwargs[k] = v
_work_orders_with_fn = []
for wo in _work_orders:
del wo["_index"]
wo["fn"] = fn
wo.update(wo_kwargs)
_work_orders_with_fn += [wo]
results = work_orders(_work_orders_with_fn, _fn_name=fn.__name__, **non_wo_kwargs)
# results is a list. One element per work order which in this case is
# one work_order per group.
#
# Each WO result is the return value of the function; if multiple
# return values then it is a tuple.
return results
| [
"logging.getLogger",
"os.kill",
"concurrent.futures.thread._threads_queues.clear",
"concurrent.futures.ThreadPoolExecutor",
"os.environ.get",
"multiprocessing.cpu_count",
"random.seed",
"concurrent.futures.as_completed",
"plaster.tools.utils.utils.listi",
"numpy.array",
"psutil.virtual_memory",
... | [((3031, 3058), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3048, 3058), False, 'import logging\n'), ((3281, 3313), 'os.environ.get', 'os.environ.get', (['"""ZAP_DEBUG_MODE"""'], {}), "('ZAP_DEBUG_MODE')\n", (3295, 3313), False, 'import os\n'), ((7342, 7353), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (7351, 7353), False, 'from multiprocessing import cpu_count\n'), ((7775, 7801), 'munch.Munch', 'Munch', ([], {'id': 'zap_id'}), '(id=zap_id, **kwargs)\n', (7780, 7801), False, 'from munch import Munch\n'), ((8297, 8308), 'time.time', 'time.time', ([], {}), '()\n', (8306, 8308), False, 'import time\n'), ((10977, 11005), 'concurrent.futures.as_completed', 'as_completed', (['wo_i_by_future'], {}), '(wo_i_by_future)\n', (10989, 11005), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed, thread\n'), ((8550, 8563), 'random.seed', 'random.seed', ([], {}), '()\n', (8561, 8563), False, 'import random\n'), ((8816, 8828), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8826, 8828), False, 'import gc\n'), ((12990, 13038), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'zap.max_workers'}), '(max_workers=zap.max_workers)\n', (13009, 13038), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed, thread\n'), ((13789, 13884), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'zap.max_workers', 'thread_name_prefix': 'zap.thread_name_prefix'}), '(max_workers=zap.max_workers, thread_name_prefix=zap.\n thread_name_prefix)\n', (13807, 13884), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed, thread\n'), ((7742, 7753), 'time.time', 'time.time', ([], {}), '()\n', (7751, 7753), False, 'import time\n'), ((9051, 9062), 'time.time', 'time.time', ([], {}), '()\n', (9060, 9062), False, 'import time\n'), ((22680, 22707), 'plaster.tools.utils.utils.listi', 'utils.listi', (['batch', 'field_i'], {}), '(batch, field_i)\n', (22691, 22707), False, 'from plaster.tools.utils import utils\n'), ((24549, 24569), 'pandas.concat', 'pd.concat', (['unbatched'], {}), '(unbatched)\n', (24558, 24569), True, 'import pandas as pd\n'), ((14472, 14502), 'concurrent.futures.thread._threads_queues.clear', 'thread._threads_queues.clear', ([], {}), '()\n', (14500, 14502), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed, thread\n'), ((21802, 21895), 'munch.Munch', 'Munch', ([], {'fn': '_run_arrays', 'inner_fn': 'fn', 'slice': 'batch_slice', 'arrays_dict': 'arrays_dict'}), '(fn=_run_arrays, inner_fn=fn, slice=batch_slice, arrays_dict=\n arrays_dict, **kwargs)\n', (21807, 21895), False, 'from munch import Munch\n'), ((23170, 23198), 'numpy.array', 'np.array', (['unbatched[field_i]'], {}), '(unbatched[field_i])\n', (23178, 23198), True, 'import numpy as np\n'), ((24085, 24156), 'munch.Munch', 'Munch', ([], {'fn': '_run_df_rows', 'inner_fn': 'fn', 'slice': 'batch_slice', 'df': 'df'}), '(fn=_run_df_rows, inner_fn=fn, slice=batch_slice, df=df, **kwargs)\n', (24090, 24156), False, 'from munch import Munch\n'), ((6353, 6376), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (6374, 6376), False, 'import psutil\n'), ((13590, 13620), 'os.kill', 'os.kill', (['v.pid', 'signal.SIGKILL'], {}), '(v.pid, signal.SIGKILL)\n', (13597, 13620), False, 'import os\n'), ((8503, 8514), 'time.time', 'time.time', ([], {}), '()\n', (8512, 8514), False, 'import time\n')] |
"""Spherical harmonic vector wind computations."""
# Copyright (c) 2012-2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from spharm import Spharmt, gaussian_lats_wts
class VectorWind(object):
"""Vector Wind computations (standard `numpy` interface)."""
def __init__(self, u, v, gridtype='regular', rsphere=6.3712e6):
"""Initialize a VectorWind instance.
**Arguments:**
*u*, *v*
Zonal and meridional wind components respectively. Their
types should be either `numpy.ndarray` or
`numpy.ma.MaskedArray`. *u* and *v* must have matching
shapes and contain no missing values. *u* and *v* may be 2
or 3-dimensional with shape (nlat, nlon) or
(nlat, nlon, nt), where nlat and nlon are the number of
latitudes and longitudes respectively and nt is the number
of fields. The latitude dimension must be oriented
north-to-south. The longitude dimension should be
oriented west-to-east.
**Optional arguments:**
*gridtype*
Type of the input grid, either 'regular' for evenly-spaced
grids, or 'gaussian' for Gaussian grids. Defaults to
'regular'.
*rsphere*
The radius in metres of the sphere used in the spherical
harmonic computations. Default is 6371200 m, the approximate
mean spherical Earth radius.
**See also:**
`~windspharm.tools.prep_data`,
`~windspharm.tools.recover_data`,
`~windspharm.tools.get_recovery`,
`~windspharm.tools.reverse_latdim`,
`~windspharm.tools.order_latdim`.
**Examples:**
Initialize a `VectorWind` instance with zonal and meridional
components of the vector wind on the default regular
(evenly-spaced) grid:
from windspharm.standard import VectorWind
w = VectorWind(u, v)
Initialize a `VectorWind` instance with zonal and meridional
components of the vector wind specified on a Gaussian grid:
from windspharm.standard import VectorWind
w = VectorWind(u, v, gridtype='gaussian')
"""
# For both the input components check if there are missing values by
# attempting to fill missing values with NaN and detect them. If the
# inputs are not masked arrays then take copies and check for NaN.
try:
self.u = u.filled(fill_value=np.nan)
except AttributeError:
self.u = u.copy()
try:
self.v = v.filled(fill_value=np.nan)
except AttributeError:
self.v = v.copy()
if np.isnan(self.u).any() or np.isnan(self.v).any():
raise ValueError('u and v cannot contain missing values')
# Make sure the shapes of the two components match.
if u.shape != v.shape:
raise ValueError('u and v must be the same shape')
if len(u.shape) not in (2, 3):
raise ValueError('u and v must be rank 2 or 3 arrays')
nlat = u.shape[0]
nlon = u.shape[1]
try:
# Create a Spharmt object to do the computations.
self.gridtype = gridtype.lower()
self.s = Spharmt(nlon, nlat, gridtype=self.gridtype,
rsphere=rsphere)
except ValueError:
if self.gridtype not in ('regular', 'gaussian'):
err = 'invalid grid type: {0:s}'.format(repr(gridtype))
else:
err = 'invalid input dimensions'
raise ValueError(err)
# Method aliases.
self.rotationalcomponent = self.nondivergentcomponent
self.divergentcomponent = self.irrotationalcomponent
def magnitude(self):
"""Wind speed (magnitude of vector wind).
**Returns:**
*speed*
The wind speed.
**Example:**
Magnitude of the vector wind::
spd = w.magnitude()
"""
return (self.u ** 2 + self.v ** 2) ** 0.5
def vrtdiv(self, truncation=None):
"""Relative vorticity and horizontal divergence.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*vrt*, *div*
The relative vorticity and divergence respectively.
**See also:**
`~VectorWind.vorticity`, `~VectorWind.divergence`.
**Examples:**
Compute the relative vorticity and divergence::
vrt, div = w.vrtdiv()
Compute the relative vorticity and divergence and apply spectral
truncation at triangular T13::
vrtT13, divT13 = w.vrtdiv(truncation=13)
"""
vrtspec, divspec = self.s.getvrtdivspec(self.u,
self.v,
ntrunc=truncation)
vrtgrid = self.s.spectogrd(vrtspec)
divgrid = self.s.spectogrd(divspec)
return vrtgrid, divgrid
def vorticity(self, truncation=None):
"""Relative vorticity.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*vrt*
The relative vorticity.
**See also:**
`~VectorWind.vrtdiv`, `~VectorWind.absolutevorticity`.
**Examples:**
Compute the relative vorticity::
vrt = w.vorticity()
Compute the relative vorticity and apply spectral truncation at
triangular T13::
vrtT13 = w.vorticity(truncation=13)
"""
vrtspec, divspec = self.s.getvrtdivspec(self.u,
self.v,
ntrunc=truncation)
vrtgrid = self.s.spectogrd(vrtspec)
return vrtgrid
def divergence(self, truncation=None):
"""Horizontal divergence.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*div*
The divergence.
**See also:**
`~VectorWind.vrtdiv`.
**Examples:**
Compute the divergence::
div = w.divergence()
Compute the divergence and apply spectral truncation at
triangular T13::
divT13 = w.divergence(truncation=13)
"""
vrtspec, divspec = self.s.getvrtdivspec(self.u,
self.v,
ntrunc=truncation)
divgrid = self.s.spectogrd(divspec)
return divgrid
def planetaryvorticity(self, omega=None):
"""Planetary vorticity (Coriolis parameter).
**Optional argument:**
*omega*
Earth's angular velocity. The default value if not specified
is 7.292x10**-5 s**-1.
**Returns:**
*pvorticity*
The planetary vorticity.
**See also:**
`~VectorWind.absolutevorticity`.
**Example:**
Compute planetary vorticity using default values::
pvrt = w.planetaryvorticity()
Override the default value for Earth's angular velocity::
pvrt = w.planetaryvorticity(omega=7.2921150)
"""
if omega is None:
# Define the Earth's angular velocity.
omega = 7.292e-05
nlat = self.s.nlat
if self.gridtype == 'gaussian':
lat, wts = gaussian_lats_wts(nlat)
else:
if nlat % 2:
lat = np.linspace(90, -90, nlat)
else:
dlat = 180. / nlat
lat = np.arange(90 - dlat / 2., -90, -dlat)
try:
cp = 2. * omega * np.sin(np.deg2rad(lat))
except (TypeError, ValueError):
raise ValueError('invalid value for omega: {!r}'.format(omega))
indices = [slice(0, None)] + [np.newaxis] * (len(self.u.shape) - 1)
f = cp[indices] * np.ones(self.u.shape, dtype=np.float32)
return f
def absolutevorticity(self, omega=None, truncation=None):
"""Absolute vorticity (sum of relative and planetary vorticity).
**Optional arguments:**
*omega*
Earth's angular velocity. The default value if not specified
is 7.292x10**-5 s**-1.
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*avorticity*
The absolute (relative + planetary) vorticity.
**See also:**
`~VectorWind.vorticity`, `~VectorWind.planetaryvorticity`.
**Examples:**
Compute absolute vorticity::
avrt = w.absolutevorticity()
Compute absolute vorticity and apply spectral truncation at
triangular T13, also override the default value for Earth's
angular velocity::
avrt = w.absolutevorticity(omega=7.2921150, truncation=13)
"""
pvrt = self.planetaryvorticity(omega=omega)
rvrt = self.vorticity(truncation=truncation)
return pvrt + rvrt
def sfvp(self, truncation=None):
"""Streamfunction and velocity potential.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*sf*, *vp*
The streamfunction and velocity potential respectively.
**See also:**
`~VectorWind.streamfunction`, `~VectorWind.velocitypotential`.
**Examples:**
Compute streamfunction and velocity potential::
sf, vp = w.sfvp()
Compute streamfunction and velocity potential and apply spectral
truncation at triangular T13::
sfT13, vpT13 = w.sfvp(truncation=13)
"""
psigrid, chigrid = self.s.getpsichi(self.u, self.v, ntrunc=truncation)
return psigrid, chigrid
def streamfunction(self, truncation=None):
"""Streamfunction.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*sf*
The streamfunction.
**See also:**
`~VectorWind.sfvp`.
**Examples:**
Compute streamfunction::
sf = w.streamfunction()
Compute streamfunction and apply spectral truncation at
triangular T13::
sfT13 = w.streamfunction(truncation=13)
"""
psigrid, chigrid = self.sfvp(truncation=truncation)
return psigrid
def velocitypotential(self, truncation=None):
"""Velocity potential.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*vp*
The velocity potential.
**See also:**
`~VectorWind.sfvp`.
**Examples:**
Compute velocity potential::
vp = w.velocity potential()
Compute velocity potential and apply spectral truncation at
triangular T13::
vpT13 = w.velocity potential(truncation=13)
"""
psigrid, chigrid = self.sfvp(truncation=truncation)
return chigrid
def helmholtz(self, truncation=None):
"""Irrotational and non-divergent components of the vector wind.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*uchi*, *vchi*, *upsi*, *vpsi*
Zonal and meridional components of irrotational and
non-divergent wind components respectively.
**See also:**
`~VectorWind.irrotationalcomponent`,
`~VectorWind.nondivergentcomponent`.
**Examples:**
Compute the irrotational and non-divergent components of the
vector wind::
uchi, vchi, upsi, vpsi = w.helmholtz()
Compute the irrotational and non-divergent components of the
vector wind and apply spectral truncation at triangular T13::
uchiT13, vchiT13, upsiT13, vpsiT13 = w.helmholtz(truncation=13)
"""
psigrid, chigrid = self.s.getpsichi(self.u, self.v, ntrunc=truncation)
psispec = self.s.grdtospec(psigrid)
chispec = self.s.grdtospec(chigrid)
vpsi, upsi = self.s.getgrad(psispec)
uchi, vchi = self.s.getgrad(chispec)
return uchi, vchi, -upsi, vpsi
def irrotationalcomponent(self, truncation=None):
"""Irrotational (divergent) component of the vector wind.
.. note::
If both the irrotational and non-divergent components are
required then `~VectorWind.helmholtz` should be used instead.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*uchi*, *vchi*
The zonal and meridional components of the irrotational wind
respectively.
**See also:**
`~VectorWind.helmholtz`.
**Examples:**
Compute the irrotational component of the vector wind::
uchi, vchi = w.irrotationalcomponent()
Compute the irrotational component of the vector wind and apply
spectral truncation at triangular T13::
uchiT13, vchiT13 = w.irrotationalcomponent(truncation=13)
"""
psigrid, chigrid = self.s.getpsichi(self.u, self.v, ntrunc=truncation)
chispec = self.s.grdtospec(chigrid)
uchi, vchi = self.s.getgrad(chispec)
return uchi, vchi
def nondivergentcomponent(self, truncation=None):
"""Non-divergent (rotational) component of the vector wind.
.. note::
If both the non-divergent and irrotational components are
required then `~VectorWind.helmholtz` should be used instead.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*upsi*, *vpsi*
The zonal and meridional components of the non-divergent
wind respectively.
**See also:**
`~VectorWind.helmholtz`.
**Examples:**
Compute the non-divergent component of the vector wind::
upsi, vpsi = w.nondivergentcomponent()
Compute the non-divergent component of the vector wind and apply
spectral truncation at triangular T13::
upsiT13, vpsiT13 = w.nondivergentcomponent(truncation=13)
"""
psigrid, chigrid = self.s.getpsichi(self.u, self.v, ntrunc=truncation)
psispec = self.s.grdtospec(psigrid)
vpsi, upsi = self.s.getgrad(psispec)
return -upsi, vpsi
def gradient(self, chi, truncation=None):
"""Computes the vector gradient of a scalar field on the sphere.
**Argument:**
*chi*
A scalar field. Its shape must be either (nlat, nlon) or
(nlat, nlon, nfields) where nlat and nlon are the same
as those for the vector wind components that initialized the
`VectorWind` instance.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation.
**Returns:**
*uchi*, *vchi*
The zonal and meridional components of the vector gradient
respectively.
**Examples:**
Compute the vector gradient of absolute vorticity::
avrt = w.absolutevorticity()
avrt_zonal, avrt_meridional = w.gradient(avrt)
Compute the vector gradient of absolute vorticity and apply
spectral truncation at triangular T13::
avrt = w.absolutevorticity()
avrt_zonalT13, avrt_meridionalT13 = w.gradient(avrt, truncation=13)
"""
try:
chi = chi.filled(fill_value=np.nan)
except AttributeError:
pass
if np.isnan(chi).any():
raise ValueError('chi cannot contain missing values')
try:
chispec = self.s.grdtospec(chi, ntrunc=truncation)
except ValueError:
raise ValueError('input field is not compatitble')
uchi, vchi = self.s.getgrad(chispec)
return uchi, vchi
def truncate(self, field, truncation=None):
"""Apply spectral truncation to a scalar field.
This is useful to represent other fields in a way consistent
with the output of other `VectorWind` methods.
**Argument:**
*field*
A scalar field. Its shape must be either (nlat, nlon) or
(nlat, nlon, nfields) where nlat and nlon are the same
as those for the vector wind components that initialized the
`VectorWind` instance.
**Optional argument:**
*truncation*
Truncation limit (triangular truncation) for the spherical
harmonic computation. If not specified it will default to
*nlats - 1* where *nlats* is the number of latitudes.
**Returns:**
*truncated_field*
The field with spectral truncation applied.
**Examples:**
Truncate a scalar field to the computational resolution of the
`VectorWind` instance::
scalar_field_truncated = w.truncate(scalar_field)
Truncate a scalar field to T21::
scalar_field_T21 = w.truncate(scalar_field, truncation=21)
"""
try:
field = field.filled(fill_value=np.nan)
except AttributeError:
pass
if np.isnan(field).any():
raise ValueError('field cannot contain missing values')
try:
fieldspec = self.s.grdtospec(field, ntrunc=truncation)
except ValueError:
raise ValueError('field is not compatible')
fieldtrunc = self.s.spectogrd(fieldspec)
return fieldtrunc
| [
"spharm.gaussian_lats_wts",
"spharm.Spharmt",
"numpy.ones",
"numpy.linspace",
"numpy.deg2rad",
"numpy.isnan",
"numpy.arange"
] | [((4311, 4371), 'spharm.Spharmt', 'Spharmt', (['nlon', 'nlat'], {'gridtype': 'self.gridtype', 'rsphere': 'rsphere'}), '(nlon, nlat, gridtype=self.gridtype, rsphere=rsphere)\n', (4318, 4371), False, 'from spharm import Spharmt, gaussian_lats_wts\n'), ((8759, 8782), 'spharm.gaussian_lats_wts', 'gaussian_lats_wts', (['nlat'], {}), '(nlat)\n', (8776, 8782), False, 'from spharm import Spharmt, gaussian_lats_wts\n'), ((9269, 9308), 'numpy.ones', 'np.ones', (['self.u.shape'], {'dtype': 'np.float32'}), '(self.u.shape, dtype=np.float32)\n', (9276, 9308), True, 'import numpy as np\n'), ((8844, 8870), 'numpy.linspace', 'np.linspace', (['(90)', '(-90)', 'nlat'], {}), '(90, -90, nlat)\n', (8855, 8870), True, 'import numpy as np\n'), ((8946, 8984), 'numpy.arange', 'np.arange', (['(90 - dlat / 2.0)', '(-90)', '(-dlat)'], {}), '(90 - dlat / 2.0, -90, -dlat)\n', (8955, 8984), True, 'import numpy as np\n'), ((17573, 17586), 'numpy.isnan', 'np.isnan', (['chi'], {}), '(chi)\n', (17581, 17586), True, 'import numpy as np\n'), ((19218, 19233), 'numpy.isnan', 'np.isnan', (['field'], {}), '(field)\n', (19226, 19233), True, 'import numpy as np\n'), ((3738, 3754), 'numpy.isnan', 'np.isnan', (['self.u'], {}), '(self.u)\n', (3746, 3754), True, 'import numpy as np\n'), ((3764, 3780), 'numpy.isnan', 'np.isnan', (['self.v'], {}), '(self.v)\n', (3772, 3780), True, 'import numpy as np\n'), ((9034, 9049), 'numpy.deg2rad', 'np.deg2rad', (['lat'], {}), '(lat)\n', (9044, 9049), True, 'import numpy as np\n')] |
"""Define networks for dannce."""
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D
from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda
from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose
from tensorflow.keras.layers import Add
from tensorflow.keras.layers import Activation
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras import backend as K
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras import regularizers
from dannce.engine import ops as ops
import numpy as np
import h5py
def unet2d_fullbn(
lossfunc, lr, input_dim, feature_num, metric="mse", multigpu=False, include_top=True
):
"""Initialize 2D U-net.
Uses the Keras functional API to construct a U-Net. The net is fully
convolutional, so it can be trained and tested on variable size input
(thus the x-y input dimensions are undefined)
inputs--
lossfunc: loss function
lr: float; learning rate
input_dim: int; number of feature channels in input
feature_num: int; number of output features
outputs--
model: Keras model object
"""
inputs = Input((None, None, input_dim))
conv1 = Conv2D(32, (3, 3), padding="same")(inputs)
conv1 = Activation("relu")(BatchNormalization()(conv1))
conv1 = Conv2D(32, (3, 3), padding="same")(conv1)
conv1 = Activation("relu")(BatchNormalization()(conv1))
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), padding="same")(pool1)
conv2 = Activation("relu")(BatchNormalization()(conv2))
conv2 = Conv2D(64, (3, 3), padding="same")(conv2)
conv2 = Activation("relu")(BatchNormalization()(conv2))
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), padding="same")(pool2)
conv3 = Activation("relu")(BatchNormalization()(conv3))
conv3 = Conv2D(128, (3, 3), padding="same")(conv3)
conv3 = Activation("relu")(BatchNormalization()(conv3))
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), padding="same")(pool3)
conv4 = Activation("relu")(BatchNormalization()(conv4))
conv4 = Conv2D(256, (3, 3), padding="same")(conv4)
conv4 = Activation("relu")(BatchNormalization()(conv4))
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), padding="same")(pool4)
conv5 = Activation("relu")(BatchNormalization()(conv5))
conv5 = Conv2D(512, (3, 3), padding="same")(conv5)
conv5 = Activation("relu")(BatchNormalization()(conv5))
up6 = concatenate(
[Conv2DTranspose(256, (2, 2), strides=(2, 2), padding="same")(conv5), conv4],
axis=3,
)
conv6 = Conv2D(256, (3, 3), padding="same")(up6)
conv6 = Activation("relu")(BatchNormalization()(conv6))
conv6 = Conv2D(256, (3, 3), padding="same")(conv6)
conv6 = Activation("relu")(BatchNormalization()(conv6))
up7 = concatenate(
[Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv6), conv3],
axis=3,
)
conv7 = Conv2D(128, (3, 3), padding="same")(up7)
conv7 = Activation("relu")(BatchNormalization()(conv7))
conv7 = Conv2D(128, (3, 3), padding="same")(conv7)
conv7 = Activation("relu")(BatchNormalization()(conv7))
up8 = concatenate(
[Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv7), conv2],
axis=3,
)
conv8 = Conv2D(64, (3, 3), padding="same")(up8)
conv8 = Activation("relu")(BatchNormalization()(conv8))
conv8 = Conv2D(64, (3, 3), padding="same")(conv8)
conv8 = Activation("relu")(BatchNormalization()(conv8))
up9 = concatenate(
[Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv8), conv1],
axis=3,
)
conv9 = Conv2D(32, (3, 3), padding="same")(up9)
conv9 = Activation("relu")(BatchNormalization()(conv9))
conv9 = Conv2D(32, (3, 3), padding="same")(conv9)
conv9 = Activation("relu")(BatchNormalization()(conv9))
conv10 = Conv2D(feature_num, (1, 1), activation="sigmoid")(conv9)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv9])
if multigpu:
model = multi_gpu_model(model, gpus=2)
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric])
return model
def unet2d_fullIN(
lossfunc, lr, input_dim, feature_num, metric="mse", multigpu=False, include_top=True
):
"""
Initialize 2D U-net
Uses the Keras functional API to construct a U-Net. The net is fully convolutional, so it can be trained
and tested on variable size input (thus the x-y input dimensions are undefined)
inputs--
lossfunc: loss function
lr: float; learning rate
input_dim: int; number of feature channels in input
feature_num: int; number of output features
outputs--
model: Keras model object
"""
inputs = Input((None, None, input_dim))
conv1 = Conv2D(32, (3, 3), padding="same")(inputs)
conv1 = Activation("relu")(ops.InstanceNormalization()(conv1))
conv1 = Conv2D(32, (3, 3), padding="same")(conv1)
conv1 = Activation("relu")(ops.InstanceNormalization()(conv1))
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), padding="same")(pool1)
conv2 = Activation("relu")(ops.InstanceNormalization()(conv2))
conv2 = Conv2D(64, (3, 3), padding="same")(conv2)
conv2 = Activation("relu")(ops.InstanceNormalization()(conv2))
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), padding="same")(pool2)
conv3 = Activation("relu")(ops.InstanceNormalization()(conv3))
conv3 = Conv2D(128, (3, 3), padding="same")(conv3)
conv3 = Activation("relu")(ops.InstanceNormalization()(conv3))
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), padding="same")(pool3)
conv4 = Activation("relu")(ops.InstanceNormalization()(conv4))
conv4 = Conv2D(256, (3, 3), padding="same")(conv4)
conv4 = Activation("relu")(ops.InstanceNormalization()(conv4))
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), padding="same")(pool4)
conv5 = Activation("relu")(ops.InstanceNormalization()(conv5))
conv5 = Conv2D(512, (3, 3), padding="same")(conv5)
conv5 = Activation("relu")(ops.InstanceNormalization()(conv5))
up6 = concatenate(
[Conv2DTranspose(256, (2, 2), strides=(2, 2), padding="same")(conv5), conv4],
axis=3,
)
conv6 = Conv2D(256, (3, 3), padding="same")(up6)
conv6 = Activation("relu")(ops.InstanceNormalization()(conv6))
conv6 = Conv2D(256, (3, 3), padding="same")(conv6)
conv6 = Activation("relu")(ops.InstanceNormalization()(conv6))
up7 = concatenate(
[Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv6), conv3],
axis=3,
)
conv7 = Conv2D(128, (3, 3), padding="same")(up7)
conv7 = Activation("relu")(ops.InstanceNormalization()(conv7))
conv7 = Conv2D(128, (3, 3), padding="same")(conv7)
conv7 = Activation("relu")(ops.InstanceNormalization()(conv7))
up8 = concatenate(
[Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv7), conv2],
axis=3,
)
conv8 = Conv2D(64, (3, 3), padding="same")(up8)
conv8 = Activation("relu")(ops.InstanceNormalization()(conv8))
conv8 = Conv2D(64, (3, 3), padding="same")(conv8)
conv8 = Activation("relu")(ops.InstanceNormalization()(conv8))
up9 = concatenate(
[Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv8), conv1],
axis=3,
)
conv9 = Conv2D(32, (3, 3), padding="same")(up9)
conv9 = Activation("relu")(ops.InstanceNormalization()(conv9))
conv9 = Conv2D(32, (3, 3), padding="same")(conv9)
conv9 = Activation("relu")(ops.InstanceNormalization()(conv9))
conv10 = Conv2D(feature_num, (1, 1), activation="sigmoid")(conv9)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv9])
if multigpu:
model = multi_gpu_model(model, gpus=2)
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric])
return model
def unet2d_fullIN(lossfunc, lr, input_dim, feature_num, metric='mse',multigpu=False, include_top = True):
"""
Initialize 2D U-net
Uses the Keras functional API to construct a U-Net. The net is fully convolutional, so it can be trained
and tested on variable size input (thus the x-y input dimensions are undefined)
inputs--
lossfunc: loss function
lr: float; learning rate
input_dim: int; number of feature channels in input
feature_num: int; number of output features
outputs--
model: Keras model object
"""
inputs = Input((None, None, input_dim))
conv1 = Conv2D(32, (3, 3), padding='same')(inputs)
conv1 = Activation('relu')(ops.InstanceNormalization()(conv1))
conv1 = Conv2D(32, (3, 3), padding='same')(conv1)
conv1 = Activation('relu')(ops.InstanceNormalization()(conv1))
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(64, (3, 3), padding='same')(pool1)
conv2 = Activation('relu')(ops.InstanceNormalization()(conv2))
conv2 = Conv2D(64, (3, 3), padding='same')(conv2)
conv2 = Activation('relu')(ops.InstanceNormalization()(conv2))
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(128, (3, 3), padding='same')(pool2)
conv3 = Activation('relu')(ops.InstanceNormalization()(conv3))
conv3 = Conv2D(128, (3, 3), padding='same')(conv3)
conv3 = Activation('relu')(ops.InstanceNormalization()(conv3))
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(256, (3, 3), padding='same')(pool3)
conv4 = Activation('relu')(ops.InstanceNormalization()(conv4))
conv4 = Conv2D(256, (3, 3), padding='same')(conv4)
conv4 = Activation('relu')(ops.InstanceNormalization()(conv4))
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(512, (3, 3), padding='same')(pool4)
conv5 = Activation('relu')(ops.InstanceNormalization()(conv5))
conv5 = Conv2D(512, (3, 3), padding='same')(conv5)
conv5 = Activation('relu')(ops.InstanceNormalization()(conv5))
up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3)
conv6 = Conv2D(256, (3, 3), padding='same')(up6)
conv6 = Activation('relu')(ops.InstanceNormalization()(conv6))
conv6 = Conv2D(256, (3, 3), padding='same')(conv6)
conv6 = Activation('relu')(ops.InstanceNormalization()(conv6))
up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3)
conv7 = Conv2D(128, (3, 3), padding='same')(up7)
conv7 = Activation('relu')(ops.InstanceNormalization()(conv7))
conv7 = Conv2D(128, (3, 3), padding='same')(conv7)
conv7 = Activation('relu')(ops.InstanceNormalization()(conv7))
up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3)
conv8 = Conv2D(64, (3, 3), padding='same')(up8)
conv8 = Activation('relu')(ops.InstanceNormalization()(conv8))
conv8 = Conv2D(64, (3, 3), padding='same')(conv8)
conv8 = Activation('relu')(ops.InstanceNormalization()(conv8))
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3)
conv9 = Conv2D(32, (3, 3), padding='same')(up9)
conv9 = Activation('relu')(ops.InstanceNormalization()(conv9))
conv9 = Conv2D(32, (3, 3), padding='same')(conv9)
conv9 = Activation('relu')(ops.InstanceNormalization()(conv9))
conv10 = Conv2D(feature_num, (1, 1), activation='sigmoid')(conv9)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv9])
if multigpu:
model = multi_gpu_model(model,gpus=2)
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric])
return model
def unet3d_big_expectedvalue(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
gridsize=(64, 64, 64),
batch_norm=False,
instance_norm=False,
include_top=True,
regularize_var=False,
loss_weights=None,
metric=["mse"],
out_kernel=(1, 1, 1),
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((*gridsize, input_dim * num_cams))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1 = conv1_layer(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, out_kernel, activation="linear", padding="same")(conv8)
grid_centers = Input((None, 3))
conv10 = Lambda(lambda x: ops.spatial_softmax(x))(conv10)
output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))([conv10, grid_centers])
# Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss
output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))(
[conv10, grid_centers, output]
)
if include_top:
if regularize_var:
model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var])
else:
model = Model(inputs=[inputs, grid_centers], outputs=[output])
else:
model = Model(inputs=[inputs], outputs=[conv8])
# model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse'])
model.compile(
optimizer=Adam(lr=lr), loss=lossfunc, metrics=metric, loss_weights=loss_weights
)
return model
def slice_input(inp, k):
print(K.int_shape(inp))
return inp[:, :, :, :, k * 3 : (k + 1) * 3]
def unet3d_big_tiedfirstlayer_expectedvalue(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
gridsize=(64, 64, 64),
batch_norm=False,
instance_norm=False,
include_top=True,
regularize_var=False,
loss_weights=None,
metric="mse",
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
def slice_input(inp, k):
print(K.int_shape(inp))
return inp[:, :, :, :, k * input_dim : (k + 1) * input_dim]
inputs = Input((*gridsize, input_dim * num_cams))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1_in = []
for i in range(num_cams):
# conv1_in.append(conv1_layer(inputs[:,:,:,:,i*input_dim:(i+1)*input_dim]))
conv1_in.append(conv1_layer(Lambda(lambda x: slice_input(x, i))(inputs)))
conv1 = Add()(conv1_in)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, (1, 1, 1), activation="linear")(conv8)
grid_centers = Input((None, 3))
conv10 = Lambda(lambda x: ops.spatial_softmax(x))(conv10)
output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))([conv10, grid_centers])
# Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss
output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))(
[conv10, grid_centers, output]
)
if include_top:
if regularize_var:
model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var])
else:
model = Model(inputs=[inputs, grid_centers], outputs=[output])
else:
model = Model(inputs=[inputs], outputs=[conv8])
# model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse'])
model.compile(
optimizer=Adam(lr=lr),
loss=lossfunc,
metrics=[metric],
loss_weights=loss_weights,
)
return model
def unet3d_big_1cam(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1 = conv1_layer(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big_tiedfirstlayer(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
bs=6,
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
def slice_input(inp, k):
print(K.int_shape(inp))
return inp[:, :, :, :, k * input_dim : (k + 1) * input_dim]
inputs = Input((None, None, None, input_dim * num_cams))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1_in = []
for i in range(num_cams):
# conv1_in.append(conv1_layer(inputs[:,:,:,:,i*input_dim:(i+1)*input_dim]))
conv1_in.append(conv1_layer(Lambda(lambda x: slice_input(x, i))(inputs)))
conv1 = Add()(conv1_in)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
include_top=True,
last_kern_size=(1, 1, 1),
gridsize=None,
):
# Gridsize unused, necessary for argument consistency with other nets
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim * num_cams))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv8])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big_IN_BN(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
include_top=True,
last_kern_size=(1, 1, 1),
gridsize=None,
):
# Gridsize unused, necessary for argument consistency with other nets
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim * num_cams))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(BatchNormalization()(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(BatchNormalization()(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(BatchNormalization()(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(BatchNormalization()(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(BatchNormalization()(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(BatchNormalization()(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(BatchNormalization()(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(BatchNormalization()(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(BatchNormalization()(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(BatchNormalization()(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(BatchNormalization()(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(BatchNormalization()(conv8))
conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv8])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big_regularized(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
include_top=True,
last_kern_size=(1, 1, 1),
gridsize=None,
regularizer=regularizers.l2(0.005),
):
# Gridsize unused, necessary for argument consistency with other nets
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim * num_cams))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
pool1
)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv2
)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
pool2
)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv3
)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
pool3
)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv4
)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv6
)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv7
)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv8])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def finetune_AVG(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the spatial average version of the network.
num_layers_locked (int) is the number of layers, starting from the input layer,
that will be locked (non-trainable) during fine-tuning.
"""
model = unet3d_big_expectedvalue(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
gridsize,
batch_norm,
instance_norm,
include_top=False,
)
pre = model.get_weights()
# Load weights
model = renameLayers(model, weightspath)
post = model.get_weights()
print("evaluating weight deltas in the first conv layer")
print("pre-weights")
print(pre[1][0])
print("post-weights")
print(post[1][0])
print("delta:")
print(np.sum(pre[1][0] - post[1][0]))
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((*gridsize, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="linear", padding="same"
)(old_out)
grid_centers = Input((None, 3))
new_conv2 = Lambda(lambda x: ops.spatial_softmax(x))(new_conv)
output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))(
[new_conv2, grid_centers]
)
model = Model(inputs=[input_, grid_centers], outputs=[output])
return model
def load_attributes_from_hdf5_group(group, name):
"""Loads attributes of the specified name from the HDF5 group.
This method deals with an inherent problem
of HDF5 file which is not able to store
data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Arguments:
group: A pointer to a HDF5 group.
name: A name of the attributes to load.
Returns:
data: Attributes data.
From the TF/keras hdf5_format.py
"""
if name not in group.attrs:
group = group["model_weights"]
data = [n.decode("utf8") for n in group.attrs[name]]
return data
def renameLayers(model, weightspath):
"""
Rename layers in the model if we detect differences from the layer names in
the weights file.
"""
with h5py.File(weightspath, "r") as f:
lnames = load_attributes_from_hdf5_group(f, "layer_names")
tf2_names = []
for (i, layer) in enumerate(model.layers):
tf2_names.append(layer.name)
if layer.name != lnames[i]:
print(
"Correcting mismatch in layer name, model: {}, weights: {}".format(
layer.name, lnames[i]
)
)
layer._name = lnames[i]
model.load_weights(weightspath, by_name=True)
# We need to change the model layer names back to the TF2 version otherwise the model
# won't save
# If no layer names were changed, this won't do anything.
for (i, layer) in enumerate(model.layers):
layer._name = tf2_names[i]
return model
def finetune_MAX(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the argmax version of the network.
"""
model = unet3d_big(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm,
instance_norm,
include_top=False,
)
# If a model was created with TF1, it will not load by name into a TF2
# model because TF2 changing the naming convention.
# here, we call a function to change the names of the layers in the model
# to match what's contained in the weights file
model = renameLayers(model, weightspath)
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((None, None, None, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same"
)(old_out)
model = Model(inputs=[input_], outputs=[new_conv])
return model
def finetune_MAX_IN_BN(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the argmax version of the network.
"""
model = unet3d_big_IN_BN(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm,
instance_norm,
include_top=False,
)
# Load weights
model.load_weights(weightspath, by_name=True)
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((None, None, None, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same"
)(old_out)
model = Model(inputs=[input_], outputs=[new_conv])
return model
def finetune_MAX_regularized(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the argmax version of the network.
"""
model = unet3d_big_regularized(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm,
instance_norm,
include_top=False,
)
# Load weights
model.load_weights(weightspath, by_name=True)
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((None, None, None, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same"
)(old_out)
model = Model(inputs=[input_], outputs=[new_conv])
return model
| [
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.BatchNormalization",
"dannce.engine.ops.InstanceNormalization",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Conv2D",
"dannce.engine.ops.spatial_softmax",
"tensorflow.keras.utils.multi_gpu_model",
"tensorflow.keras.models.Model",
... | [((1283, 1313), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, input_dim)'], {}), '((None, None, input_dim))\n', (1288, 1313), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5074, 5104), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, input_dim)'], {}), '((None, None, input_dim))\n', (5079, 5104), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((8985, 9015), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, input_dim)'], {}), '((None, None, input_dim))\n', (8990, 9015), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((12889, 12929), 'tensorflow.keras.layers.Input', 'Input', (['(*gridsize, input_dim * num_cams)'], {}), '((*gridsize, input_dim * num_cams))\n', (12894, 12929), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((12948, 12985), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (12954, 12985), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((15156, 15172), 'tensorflow.keras.layers.Input', 'Input', (['(None, 3)'], {}), '((None, 3))\n', (15161, 15172), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((16991, 17031), 'tensorflow.keras.layers.Input', 'Input', (['(*gridsize, input_dim * num_cams)'], {}), '((*gridsize, input_dim * num_cams))\n', (16996, 17031), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((17050, 17087), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (17056, 17087), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((19452, 19468), 'tensorflow.keras.layers.Input', 'Input', (['(None, 3)'], {}), '((None, 3))\n', (19457, 19468), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((20941, 20977), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim)'], {}), '((None, None, None, input_dim))\n', (20946, 20977), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((20996, 21033), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (21002, 21033), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((23181, 23221), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (23186, 23221), False, 'from tensorflow.keras.models import Model\n'), ((23993, 24040), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (23998, 24040), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((24059, 24096), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (24065, 24096), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((26455, 26495), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (26460, 26495), False, 'from tensorflow.keras.models import Model\n'), ((27256, 27303), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (27261, 27303), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((30378, 30425), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (30383, 30425), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((33262, 33284), 'tensorflow.keras.regularizers.l2', 'regularizers.l2', (['(0.005)'], {}), '(0.005)\n', (33277, 33284), False, 'from tensorflow.keras import regularizers\n'), ((33750, 33797), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (33755, 33797), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((37989, 38029), 'tensorflow.keras.layers.Input', 'Input', (['(*gridsize, input_dim * num_cams)'], {}), '((*gridsize, input_dim * num_cams))\n', (37994, 38029), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((38235, 38251), 'tensorflow.keras.layers.Input', 'Input', (['(None, 3)'], {}), '((None, 3))\n', (38240, 38251), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((38440, 38494), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input_, grid_centers]', 'outputs': '[output]'}), '(inputs=[input_, grid_centers], outputs=[output])\n', (38445, 38494), False, 'from tensorflow.keras.models import Model\n'), ((41125, 41172), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (41130, 41172), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((41372, 41414), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input_]', 'outputs': '[new_conv]'}), '(inputs=[input_], outputs=[new_conv])\n', (41377, 41414), False, 'from tensorflow.keras.models import Model\n'), ((42277, 42324), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (42282, 42324), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((42524, 42566), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input_]', 'outputs': '[new_conv]'}), '(inputs=[input_], outputs=[new_conv])\n', (42529, 42566), False, 'from tensorflow.keras.models import Model\n'), ((43441, 43488), 'tensorflow.keras.layers.Input', 'Input', (['(None, None, None, input_dim * num_cams)'], {}), '((None, None, None, input_dim * num_cams))\n', (43446, 43488), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((43688, 43730), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input_]', 'outputs': '[new_conv]'}), '(inputs=[input_], outputs=[new_conv])\n', (43693, 43730), False, 'from tensorflow.keras.models import Model\n'), ((1326, 1360), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (1332, 1360), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1381, 1399), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1391, 1399), False, 'from tensorflow.keras.layers import Activation\n'), ((1441, 1475), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (1447, 1475), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1495, 1513), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1505, 1513), False, 'from tensorflow.keras.layers import Activation\n'), ((1555, 1585), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1567, 1585), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1606, 1640), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (1612, 1640), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1660, 1678), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1670, 1678), False, 'from tensorflow.keras.layers import Activation\n'), ((1720, 1754), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (1726, 1754), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1774, 1792), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1784, 1792), False, 'from tensorflow.keras.layers import Activation\n'), ((1834, 1864), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1846, 1864), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1885, 1920), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (1891, 1920), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((1940, 1958), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1950, 1958), False, 'from tensorflow.keras.layers import Activation\n'), ((2000, 2035), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (2006, 2035), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2055, 2073), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2065, 2073), False, 'from tensorflow.keras.layers import Activation\n'), ((2115, 2145), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2127, 2145), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2166, 2201), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (2172, 2201), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2221, 2239), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2231, 2239), False, 'from tensorflow.keras.layers import Activation\n'), ((2281, 2316), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (2287, 2316), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2336, 2354), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2346, 2354), False, 'from tensorflow.keras.layers import Activation\n'), ((2396, 2426), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2408, 2426), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2447, 2482), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3), padding='same')\n", (2453, 2482), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2502, 2520), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2512, 2520), False, 'from tensorflow.keras.layers import Activation\n'), ((2562, 2597), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3), padding='same')\n", (2568, 2597), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2617, 2635), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2627, 2635), False, 'from tensorflow.keras.layers import Activation\n'), ((2809, 2844), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (2815, 2844), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2862, 2880), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2872, 2880), False, 'from tensorflow.keras.layers import Activation\n'), ((2922, 2957), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (2928, 2957), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((2977, 2995), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2987, 2995), False, 'from tensorflow.keras.layers import Activation\n'), ((3169, 3204), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (3175, 3204), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((3222, 3240), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3232, 3240), False, 'from tensorflow.keras.layers import Activation\n'), ((3282, 3317), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (3288, 3317), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((3337, 3355), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3347, 3355), False, 'from tensorflow.keras.layers import Activation\n'), ((3528, 3562), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (3534, 3562), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((3580, 3598), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3590, 3598), False, 'from tensorflow.keras.layers import Activation\n'), ((3640, 3674), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (3646, 3674), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((3694, 3712), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3704, 3712), False, 'from tensorflow.keras.layers import Activation\n'), ((3885, 3919), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (3891, 3919), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((3937, 3955), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3947, 3955), False, 'from tensorflow.keras.layers import Activation\n'), ((3997, 4031), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (4003, 4031), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((4051, 4069), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4061, 4069), False, 'from tensorflow.keras.layers import Activation\n'), ((4113, 4162), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['feature_num', '(1, 1)'], {'activation': '"""sigmoid"""'}), "(feature_num, (1, 1), activation='sigmoid')\n", (4119, 4162), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((4207, 4247), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (4212, 4247), False, 'from tensorflow.keras.models import Model\n'), ((4274, 4313), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv9]'}), '(inputs=[inputs], outputs=[conv9])\n', (4279, 4313), False, 'from tensorflow.keras.models import Model\n'), ((4348, 4378), 'tensorflow.keras.utils.multi_gpu_model', 'multi_gpu_model', (['model'], {'gpus': '(2)'}), '(model, gpus=2)\n', (4363, 4378), False, 'from tensorflow.keras.utils import multi_gpu_model\n'), ((5117, 5151), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (5123, 5151), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5172, 5190), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5182, 5190), False, 'from tensorflow.keras.layers import Activation\n'), ((5239, 5273), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (5245, 5273), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5293, 5311), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5303, 5311), False, 'from tensorflow.keras.layers import Activation\n'), ((5360, 5390), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5372, 5390), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5411, 5445), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (5417, 5445), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5465, 5483), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5475, 5483), False, 'from tensorflow.keras.layers import Activation\n'), ((5532, 5566), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (5538, 5566), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5586, 5604), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5596, 5604), False, 'from tensorflow.keras.layers import Activation\n'), ((5653, 5683), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5665, 5683), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5704, 5739), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (5710, 5739), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5759, 5777), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5769, 5777), False, 'from tensorflow.keras.layers import Activation\n'), ((5826, 5861), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (5832, 5861), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5881, 5899), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5891, 5899), False, 'from tensorflow.keras.layers import Activation\n'), ((5948, 5978), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5960, 5978), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((5999, 6034), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (6005, 6034), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6054, 6072), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6064, 6072), False, 'from tensorflow.keras.layers import Activation\n'), ((6121, 6156), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (6127, 6156), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6176, 6194), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6186, 6194), False, 'from tensorflow.keras.layers import Activation\n'), ((6243, 6273), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6255, 6273), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6294, 6329), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3), padding='same')\n", (6300, 6329), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6349, 6367), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6359, 6367), False, 'from tensorflow.keras.layers import Activation\n'), ((6416, 6451), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3), padding='same')\n", (6422, 6451), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6471, 6489), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6481, 6489), False, 'from tensorflow.keras.layers import Activation\n'), ((6670, 6705), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (6676, 6705), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6723, 6741), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6733, 6741), False, 'from tensorflow.keras.layers import Activation\n'), ((6790, 6825), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (6796, 6825), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((6845, 6863), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (6855, 6863), False, 'from tensorflow.keras.layers import Activation\n'), ((7044, 7079), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (7050, 7079), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((7097, 7115), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7107, 7115), False, 'from tensorflow.keras.layers import Activation\n'), ((7164, 7199), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (7170, 7199), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((7219, 7237), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7229, 7237), False, 'from tensorflow.keras.layers import Activation\n'), ((7417, 7451), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (7423, 7451), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((7469, 7487), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7479, 7487), False, 'from tensorflow.keras.layers import Activation\n'), ((7536, 7570), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (7542, 7570), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((7590, 7608), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7600, 7608), False, 'from tensorflow.keras.layers import Activation\n'), ((7788, 7822), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (7794, 7822), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((7840, 7858), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7850, 7858), False, 'from tensorflow.keras.layers import Activation\n'), ((7907, 7941), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (7913, 7941), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((7961, 7979), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7971, 7979), False, 'from tensorflow.keras.layers import Activation\n'), ((8030, 8079), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['feature_num', '(1, 1)'], {'activation': '"""sigmoid"""'}), "(feature_num, (1, 1), activation='sigmoid')\n", (8036, 8079), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((8124, 8164), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (8129, 8164), False, 'from tensorflow.keras.models import Model\n'), ((8191, 8230), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv9]'}), '(inputs=[inputs], outputs=[conv9])\n', (8196, 8230), False, 'from tensorflow.keras.models import Model\n'), ((8265, 8295), 'tensorflow.keras.utils.multi_gpu_model', 'multi_gpu_model', (['model'], {'gpus': '(2)'}), '(model, gpus=2)\n', (8280, 8295), False, 'from tensorflow.keras.utils import multi_gpu_model\n'), ((9028, 9062), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (9034, 9062), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9083, 9101), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9093, 9101), False, 'from tensorflow.keras.layers import Activation\n'), ((9150, 9184), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (9156, 9184), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9204, 9222), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9214, 9222), False, 'from tensorflow.keras.layers import Activation\n'), ((9271, 9301), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (9283, 9301), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9322, 9356), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (9328, 9356), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9376, 9394), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9386, 9394), False, 'from tensorflow.keras.layers import Activation\n'), ((9443, 9477), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (9449, 9477), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9497, 9515), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9507, 9515), False, 'from tensorflow.keras.layers import Activation\n'), ((9564, 9594), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (9576, 9594), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9615, 9650), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (9621, 9650), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9670, 9688), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9680, 9688), False, 'from tensorflow.keras.layers import Activation\n'), ((9737, 9772), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (9743, 9772), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9792, 9810), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9802, 9810), False, 'from tensorflow.keras.layers import Activation\n'), ((9859, 9889), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (9871, 9889), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9910, 9945), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (9916, 9945), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((9965, 9983), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (9975, 9983), False, 'from tensorflow.keras.layers import Activation\n'), ((10032, 10067), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (10038, 10067), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10087, 10105), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10097, 10105), False, 'from tensorflow.keras.layers import Activation\n'), ((10154, 10184), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (10166, 10184), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10205, 10240), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3), padding='same')\n", (10211, 10240), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10260, 10278), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10270, 10278), False, 'from tensorflow.keras.layers import Activation\n'), ((10327, 10362), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(512)', '(3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3), padding='same')\n", (10333, 10362), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10382, 10400), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10392, 10400), False, 'from tensorflow.keras.layers import Activation\n'), ((10558, 10593), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (10564, 10593), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10611, 10629), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10621, 10629), False, 'from tensorflow.keras.layers import Activation\n'), ((10678, 10713), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3), padding='same')\n", (10684, 10713), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10733, 10751), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10743, 10751), False, 'from tensorflow.keras.layers import Activation\n'), ((10909, 10944), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (10915, 10944), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((10962, 10980), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (10972, 10980), False, 'from tensorflow.keras.layers import Activation\n'), ((11029, 11064), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3), padding='same')\n", (11035, 11064), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((11084, 11102), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11094, 11102), False, 'from tensorflow.keras.layers import Activation\n'), ((11259, 11293), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (11265, 11293), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((11311, 11329), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11321, 11329), False, 'from tensorflow.keras.layers import Activation\n'), ((11378, 11412), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (11384, 11412), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((11432, 11450), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11442, 11450), False, 'from tensorflow.keras.layers import Activation\n'), ((11607, 11641), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (11613, 11641), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((11659, 11677), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11669, 11677), False, 'from tensorflow.keras.layers import Activation\n'), ((11726, 11760), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""'}), "(32, (3, 3), padding='same')\n", (11732, 11760), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((11780, 11798), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11790, 11798), False, 'from tensorflow.keras.layers import Activation\n'), ((11849, 11898), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['feature_num', '(1, 1)'], {'activation': '"""sigmoid"""'}), "(feature_num, (1, 1), activation='sigmoid')\n", (11855, 11898), False, 'from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D\n'), ((11944, 11984), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (11949, 11984), False, 'from tensorflow.keras.models import Model\n'), ((12011, 12050), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv9]'}), '(inputs=[inputs], outputs=[conv9])\n', (12016, 12050), False, 'from tensorflow.keras.models import Model\n'), ((12085, 12115), 'tensorflow.keras.utils.multi_gpu_model', 'multi_gpu_model', (['model'], {'gpus': '(2)'}), '(model, gpus=2)\n', (12100, 12115), False, 'from tensorflow.keras.utils import multi_gpu_model\n'), ((13031, 13049), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13041, 13049), False, 'from tensorflow.keras.layers import Activation\n'), ((13074, 13111), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (13080, 13111), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13131, 13149), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13141, 13149), False, 'from tensorflow.keras.layers import Activation\n'), ((13174, 13207), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (13186, 13207), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((13228, 13266), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (13234, 13266), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13286, 13304), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13296, 13304), False, 'from tensorflow.keras.layers import Activation\n'), ((13329, 13367), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (13335, 13367), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13387, 13405), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13397, 13405), False, 'from tensorflow.keras.layers import Activation\n'), ((13430, 13463), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (13442, 13463), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((13484, 13522), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (13490, 13522), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13542, 13560), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13552, 13560), False, 'from tensorflow.keras.layers import Activation\n'), ((13585, 13623), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (13591, 13623), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13643, 13661), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13653, 13661), False, 'from tensorflow.keras.layers import Activation\n'), ((13686, 13719), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (13698, 13719), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((13740, 13778), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (13746, 13778), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13798, 13816), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13808, 13816), False, 'from tensorflow.keras.layers import Activation\n'), ((13841, 13879), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (13847, 13879), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((13899, 13917), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (13909, 13917), False, 'from tensorflow.keras.layers import Activation\n'), ((14115, 14153), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (14121, 14153), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((14171, 14189), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14181, 14189), False, 'from tensorflow.keras.layers import Activation\n'), ((14214, 14252), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (14220, 14252), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((14272, 14290), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14282, 14290), False, 'from tensorflow.keras.layers import Activation\n'), ((14488, 14526), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (14494, 14526), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((14544, 14562), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14554, 14562), False, 'from tensorflow.keras.layers import Activation\n'), ((14587, 14625), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (14593, 14625), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((14645, 14663), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14655, 14663), False, 'from tensorflow.keras.layers import Activation\n'), ((14860, 14897), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (14866, 14897), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((14915, 14933), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (14925, 14933), False, 'from tensorflow.keras.layers import Activation\n'), ((14958, 14995), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (14964, 14995), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((15015, 15033), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (15025, 15033), False, 'from tensorflow.keras.layers import Activation\n'), ((15060, 15128), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', 'out_kernel'], {'activation': '"""linear"""', 'padding': '"""same"""'}), "(feature_num, out_kernel, activation='linear', padding='same')\n", (15066, 15128), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((15824, 15863), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv8]'}), '(inputs=[inputs], outputs=[conv8])\n', (15829, 15863), False, 'from tensorflow.keras.models import Model\n'), ((16126, 16142), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['inp'], {}), '(inp)\n', (16137, 16142), True, 'from tensorflow.keras import backend as K\n'), ((17316, 17321), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (17319, 17321), False, 'from tensorflow.keras.layers import Add\n'), ((17344, 17362), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17354, 17362), False, 'from tensorflow.keras.layers import Activation\n'), ((17387, 17424), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (17393, 17424), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((17444, 17462), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17454, 17462), False, 'from tensorflow.keras.layers import Activation\n'), ((17487, 17520), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (17499, 17520), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((17541, 17579), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (17547, 17579), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((17599, 17617), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17609, 17617), False, 'from tensorflow.keras.layers import Activation\n'), ((17642, 17680), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (17648, 17680), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((17700, 17718), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17710, 17718), False, 'from tensorflow.keras.layers import Activation\n'), ((17743, 17776), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (17755, 17776), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((17797, 17835), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (17803, 17835), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((17855, 17873), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17865, 17873), False, 'from tensorflow.keras.layers import Activation\n'), ((17898, 17936), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (17904, 17936), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((17956, 17974), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (17966, 17974), False, 'from tensorflow.keras.layers import Activation\n'), ((17999, 18032), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (18011, 18032), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((18053, 18091), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (18059, 18091), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((18111, 18129), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18121, 18129), False, 'from tensorflow.keras.layers import Activation\n'), ((18154, 18192), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (18160, 18192), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((18212, 18230), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18222, 18230), False, 'from tensorflow.keras.layers import Activation\n'), ((18428, 18466), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (18434, 18466), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((18484, 18502), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18494, 18502), False, 'from tensorflow.keras.layers import Activation\n'), ((18527, 18565), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (18533, 18565), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((18585, 18603), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18595, 18603), False, 'from tensorflow.keras.layers import Activation\n'), ((18801, 18839), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (18807, 18839), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((18857, 18875), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18867, 18875), False, 'from tensorflow.keras.layers import Activation\n'), ((18900, 18938), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (18906, 18938), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((18958, 18976), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (18968, 18976), False, 'from tensorflow.keras.layers import Activation\n'), ((19173, 19210), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (19179, 19210), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((19228, 19246), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (19238, 19246), False, 'from tensorflow.keras.layers import Activation\n'), ((19271, 19308), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (19277, 19308), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((19328, 19346), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (19338, 19346), False, 'from tensorflow.keras.layers import Activation\n'), ((19373, 19424), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', '(1, 1, 1)'], {'activation': '"""linear"""'}), "(feature_num, (1, 1, 1), activation='linear')\n", (19379, 19424), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((20120, 20159), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv8]'}), '(inputs=[inputs], outputs=[conv8])\n', (20125, 20159), False, 'from tensorflow.keras.models import Model\n'), ((21079, 21097), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21089, 21097), False, 'from tensorflow.keras.layers import Activation\n'), ((21122, 21159), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (21128, 21159), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21179, 21197), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21189, 21197), False, 'from tensorflow.keras.layers import Activation\n'), ((21222, 21255), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (21234, 21255), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((21276, 21314), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (21282, 21314), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21334, 21352), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21344, 21352), False, 'from tensorflow.keras.layers import Activation\n'), ((21377, 21415), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (21383, 21415), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21435, 21453), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21445, 21453), False, 'from tensorflow.keras.layers import Activation\n'), ((21478, 21511), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (21490, 21511), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((21532, 21570), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (21538, 21570), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21590, 21608), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21600, 21608), False, 'from tensorflow.keras.layers import Activation\n'), ((21633, 21671), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (21639, 21671), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21691, 21709), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21701, 21709), False, 'from tensorflow.keras.layers import Activation\n'), ((21734, 21767), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (21746, 21767), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((21788, 21826), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (21794, 21826), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21846, 21864), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21856, 21864), False, 'from tensorflow.keras.layers import Activation\n'), ((21889, 21927), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (21895, 21927), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((21947, 21965), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (21957, 21965), False, 'from tensorflow.keras.layers import Activation\n'), ((22163, 22201), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (22169, 22201), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((22219, 22237), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22229, 22237), False, 'from tensorflow.keras.layers import Activation\n'), ((22262, 22300), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (22268, 22300), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((22320, 22338), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22330, 22338), False, 'from tensorflow.keras.layers import Activation\n'), ((22536, 22574), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (22542, 22574), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((22592, 22610), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22602, 22610), False, 'from tensorflow.keras.layers import Activation\n'), ((22635, 22673), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (22641, 22673), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((22693, 22711), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22703, 22711), False, 'from tensorflow.keras.layers import Activation\n'), ((22908, 22945), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (22914, 22945), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((22963, 22981), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (22973, 22981), False, 'from tensorflow.keras.layers import Activation\n'), ((23006, 23043), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (23012, 23043), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((23063, 23081), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (23073, 23081), False, 'from tensorflow.keras.layers import Activation\n'), ((23108, 23160), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', '(1, 1, 1)'], {'activation': '"""sigmoid"""'}), "(feature_num, (1, 1, 1), activation='sigmoid')\n", (23114, 23160), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((24325, 24330), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (24328, 24330), False, 'from tensorflow.keras.layers import Add\n'), ((24353, 24371), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (24363, 24371), False, 'from tensorflow.keras.layers import Activation\n'), ((24396, 24433), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (24402, 24433), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((24453, 24471), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (24463, 24471), False, 'from tensorflow.keras.layers import Activation\n'), ((24496, 24529), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (24508, 24529), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((24550, 24588), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (24556, 24588), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((24608, 24626), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (24618, 24626), False, 'from tensorflow.keras.layers import Activation\n'), ((24651, 24689), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (24657, 24689), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((24709, 24727), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (24719, 24727), False, 'from tensorflow.keras.layers import Activation\n'), ((24752, 24785), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (24764, 24785), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((24806, 24844), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (24812, 24844), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((24864, 24882), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (24874, 24882), False, 'from tensorflow.keras.layers import Activation\n'), ((24907, 24945), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (24913, 24945), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((24965, 24983), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (24975, 24983), False, 'from tensorflow.keras.layers import Activation\n'), ((25008, 25041), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (25020, 25041), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((25062, 25100), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (25068, 25100), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((25120, 25138), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25130, 25138), False, 'from tensorflow.keras.layers import Activation\n'), ((25163, 25201), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (25169, 25201), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((25221, 25239), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25231, 25239), False, 'from tensorflow.keras.layers import Activation\n'), ((25437, 25475), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (25443, 25475), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((25493, 25511), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25503, 25511), False, 'from tensorflow.keras.layers import Activation\n'), ((25536, 25574), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (25542, 25574), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((25594, 25612), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25604, 25612), False, 'from tensorflow.keras.layers import Activation\n'), ((25810, 25848), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (25816, 25848), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((25866, 25884), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25876, 25884), False, 'from tensorflow.keras.layers import Activation\n'), ((25909, 25947), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (25915, 25947), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((25967, 25985), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (25977, 25985), False, 'from tensorflow.keras.layers import Activation\n'), ((26182, 26219), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (26188, 26219), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((26237, 26255), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (26247, 26255), False, 'from tensorflow.keras.layers import Activation\n'), ((26280, 26317), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (26286, 26317), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((26337, 26355), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (26347, 26355), False, 'from tensorflow.keras.layers import Activation\n'), ((26382, 26434), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', '(1, 1, 1)'], {'activation': '"""sigmoid"""'}), "(feature_num, (1, 1, 1), activation='sigmoid')\n", (26388, 26434), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27316, 27353), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (27322, 27353), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27374, 27392), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (27384, 27392), False, 'from tensorflow.keras.layers import Activation\n'), ((27417, 27454), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (27423, 27454), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27474, 27492), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (27484, 27492), False, 'from tensorflow.keras.layers import Activation\n'), ((27517, 27550), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (27529, 27550), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((27571, 27609), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (27577, 27609), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27629, 27647), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (27639, 27647), False, 'from tensorflow.keras.layers import Activation\n'), ((27672, 27710), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (27678, 27710), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27730, 27748), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (27740, 27748), False, 'from tensorflow.keras.layers import Activation\n'), ((27773, 27806), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (27785, 27806), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((27827, 27865), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (27833, 27865), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27885, 27903), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (27895, 27903), False, 'from tensorflow.keras.layers import Activation\n'), ((27928, 27966), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (27934, 27966), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((27986, 28004), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (27996, 28004), False, 'from tensorflow.keras.layers import Activation\n'), ((28029, 28062), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (28041, 28062), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((28083, 28121), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (28089, 28121), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((28141, 28159), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (28151, 28159), False, 'from tensorflow.keras.layers import Activation\n'), ((28184, 28222), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (28190, 28222), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((28242, 28260), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (28252, 28260), False, 'from tensorflow.keras.layers import Activation\n'), ((28458, 28496), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (28464, 28496), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((28514, 28532), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (28524, 28532), False, 'from tensorflow.keras.layers import Activation\n'), ((28557, 28595), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (28563, 28595), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((28615, 28633), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (28625, 28633), False, 'from tensorflow.keras.layers import Activation\n'), ((28831, 28869), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (28837, 28869), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((28887, 28905), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (28897, 28905), False, 'from tensorflow.keras.layers import Activation\n'), ((28930, 28968), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (28936, 28968), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((28988, 29006), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (28998, 29006), False, 'from tensorflow.keras.layers import Activation\n'), ((29203, 29240), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (29209, 29240), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((29258, 29276), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (29268, 29276), False, 'from tensorflow.keras.layers import Activation\n'), ((29301, 29338), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (29307, 29338), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((29358, 29376), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (29368, 29376), False, 'from tensorflow.keras.layers import Activation\n'), ((29403, 29460), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', 'last_kern_size'], {'activation': '"""sigmoid"""'}), "(feature_num, last_kern_size, activation='sigmoid')\n", (29409, 29460), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((29505, 29545), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (29510, 29545), False, 'from tensorflow.keras.models import Model\n'), ((29572, 29611), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv8]'}), '(inputs=[inputs], outputs=[conv8])\n', (29577, 29611), False, 'from tensorflow.keras.models import Model\n'), ((30438, 30475), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (30444, 30475), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((30496, 30514), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (30506, 30514), False, 'from tensorflow.keras.layers import Activation\n'), ((30539, 30576), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (30545, 30576), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((30596, 30614), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (30606, 30614), False, 'from tensorflow.keras.layers import Activation\n'), ((30639, 30672), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (30651, 30672), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((30693, 30731), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (30699, 30731), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((30751, 30769), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (30761, 30769), False, 'from tensorflow.keras.layers import Activation\n'), ((30811, 30849), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (30817, 30849), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((30869, 30887), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (30879, 30887), False, 'from tensorflow.keras.layers import Activation\n'), ((30929, 30962), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (30941, 30962), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((30983, 31021), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (30989, 31021), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((31041, 31059), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (31051, 31059), False, 'from tensorflow.keras.layers import Activation\n'), ((31101, 31139), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (31107, 31139), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((31159, 31177), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (31169, 31177), False, 'from tensorflow.keras.layers import Activation\n'), ((31219, 31252), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (31231, 31252), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((31273, 31311), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (31279, 31311), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((31331, 31349), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (31341, 31349), False, 'from tensorflow.keras.layers import Activation\n'), ((31391, 31429), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(512, (3, 3, 3), padding='same')\n", (31397, 31429), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((31449, 31467), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (31459, 31467), False, 'from tensorflow.keras.layers import Activation\n'), ((31682, 31720), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (31688, 31720), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((31738, 31756), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (31748, 31756), False, 'from tensorflow.keras.layers import Activation\n'), ((31798, 31836), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(256, (3, 3, 3), padding='same')\n", (31804, 31836), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((31856, 31874), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (31866, 31874), False, 'from tensorflow.keras.layers import Activation\n'), ((32089, 32127), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (32095, 32127), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((32145, 32163), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (32155, 32163), False, 'from tensorflow.keras.layers import Activation\n'), ((32205, 32243), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(128, (3, 3, 3), padding='same')\n", (32211, 32243), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((32263, 32281), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (32273, 32281), False, 'from tensorflow.keras.layers import Activation\n'), ((32495, 32532), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (32501, 32532), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((32550, 32568), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (32560, 32568), False, 'from tensorflow.keras.layers import Activation\n'), ((32610, 32647), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (32616, 32647), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((32667, 32685), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (32677, 32685), False, 'from tensorflow.keras.layers import Activation\n'), ((32729, 32786), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', 'last_kern_size'], {'activation': '"""sigmoid"""'}), "(feature_num, last_kern_size, activation='sigmoid')\n", (32735, 32786), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((32831, 32871), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (32836, 32871), False, 'from tensorflow.keras.models import Model\n'), ((32898, 32937), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv8]'}), '(inputs=[inputs], outputs=[conv8])\n', (32903, 32937), False, 'from tensorflow.keras.models import Model\n'), ((33810, 33847), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (33816, 33847), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((33868, 33886), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (33878, 33886), False, 'from tensorflow.keras.layers import Activation\n'), ((33911, 33948), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3, 3), padding='same')\n", (33917, 33948), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((33968, 33986), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (33978, 33986), False, 'from tensorflow.keras.layers import Activation\n'), ((34011, 34044), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (34023, 34044), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((34065, 34135), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(128, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (34071, 34135), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((34169, 34187), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (34179, 34187), False, 'from tensorflow.keras.layers import Activation\n'), ((34212, 34282), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(128, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (34218, 34282), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((34316, 34334), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (34326, 34334), False, 'from tensorflow.keras.layers import Activation\n'), ((34359, 34392), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (34371, 34392), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((34413, 34483), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(256, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (34419, 34483), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((34517, 34535), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (34527, 34535), False, 'from tensorflow.keras.layers import Activation\n'), ((34560, 34630), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(256, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (34566, 34630), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((34664, 34682), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (34674, 34682), False, 'from tensorflow.keras.layers import Activation\n'), ((34707, 34740), 'tensorflow.keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (34719, 34740), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((34761, 34831), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(512, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (34767, 34831), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((34865, 34883), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (34875, 34883), False, 'from tensorflow.keras.layers import Activation\n'), ((34908, 34978), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(512)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(512, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (34914, 34978), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((35012, 35030), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (35022, 35030), False, 'from tensorflow.keras.layers import Activation\n'), ((35228, 35298), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(256, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (35234, 35298), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((35316, 35334), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (35326, 35334), False, 'from tensorflow.keras.layers import Activation\n'), ((35359, 35429), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(256)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(256, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (35365, 35429), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((35463, 35481), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (35473, 35481), False, 'from tensorflow.keras.layers import Activation\n'), ((35679, 35749), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(128, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (35685, 35749), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((35767, 35785), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (35777, 35785), False, 'from tensorflow.keras.layers import Activation\n'), ((35810, 35880), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(128)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(128, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (35816, 35880), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((35914, 35932), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (35924, 35932), False, 'from tensorflow.keras.layers import Activation\n'), ((36129, 36198), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(64, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (36135, 36198), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((36216, 36234), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (36226, 36234), False, 'from tensorflow.keras.layers import Activation\n'), ((36259, 36328), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['(64)', '(3, 3, 3)'], {'padding': '"""same"""', 'kernel_regularizer': 'regularizer'}), "(64, (3, 3, 3), padding='same', kernel_regularizer=regularizer)\n", (36265, 36328), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((36348, 36366), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (36358, 36366), False, 'from tensorflow.keras.layers import Activation\n'), ((36393, 36450), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['feature_num', 'last_kern_size'], {'activation': '"""sigmoid"""'}), "(feature_num, last_kern_size, activation='sigmoid')\n", (36399, 36450), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((36495, 36535), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv10]'}), '(inputs=[inputs], outputs=[conv10])\n', (36500, 36535), False, 'from tensorflow.keras.models import Model\n'), ((36562, 36601), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs]', 'outputs': '[conv8]'}), '(inputs=[inputs], outputs=[conv8])\n', (36567, 36601), False, 'from tensorflow.keras.models import Model\n'), ((37775, 37805), 'numpy.sum', 'np.sum', (['(pre[1][0] - post[1][0])'], {}), '(pre[1][0] - post[1][0])\n', (37781, 37805), True, 'import numpy as np\n'), ((38108, 38196), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['new_n_channels_out', 'new_last_kern_size'], {'activation': '"""linear"""', 'padding': '"""same"""'}), "(new_n_channels_out, new_last_kern_size, activation='linear', padding\n ='same')\n", (38114, 38196), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((39281, 39308), 'h5py.File', 'h5py.File', (['weightspath', '"""r"""'], {}), "(weightspath, 'r')\n", (39290, 39308), False, 'import h5py\n'), ((41251, 41339), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['new_n_channels_out', 'new_last_kern_size'], {'activation': '"""sigmoid"""', 'padding': '"""same"""'}), "(new_n_channels_out, new_last_kern_size, activation='sigmoid',\n padding='same')\n", (41257, 41339), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((42403, 42491), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['new_n_channels_out', 'new_last_kern_size'], {'activation': '"""sigmoid"""', 'padding': '"""same"""'}), "(new_n_channels_out, new_last_kern_size, activation='sigmoid',\n padding='same')\n", (42409, 42491), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((43567, 43655), 'tensorflow.keras.layers.Conv3D', 'Conv3D', (['new_n_channels_out', 'new_last_kern_size'], {'activation': '"""sigmoid"""', 'padding': '"""same"""'}), "(new_n_channels_out, new_last_kern_size, activation='sigmoid',\n padding='same')\n", (43573, 43655), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((1400, 1420), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1418, 1420), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1514, 1534), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1532, 1534), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1679, 1699), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1697, 1699), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1793, 1813), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1811, 1813), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((1959, 1979), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1977, 1979), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2074, 2094), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2092, 2094), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2240, 2260), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2258, 2260), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2355, 2375), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2373, 2375), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2521, 2541), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2539, 2541), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2636, 2656), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2654, 2656), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2881, 2901), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2899, 2901), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((2996, 3016), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3014, 3016), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((3241, 3261), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3259, 3261), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((3356, 3376), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3374, 3376), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((3599, 3619), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3617, 3619), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((3713, 3733), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3731, 3733), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((3956, 3976), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3974, 3976), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((4070, 4090), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4088, 4090), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((4408, 4419), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (4412, 4419), False, 'from tensorflow.keras.optimizers import Adam\n'), ((5191, 5218), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (5216, 5218), True, 'from dannce.engine import ops as ops\n'), ((5312, 5339), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (5337, 5339), True, 'from dannce.engine import ops as ops\n'), ((5484, 5511), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (5509, 5511), True, 'from dannce.engine import ops as ops\n'), ((5605, 5632), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (5630, 5632), True, 'from dannce.engine import ops as ops\n'), ((5778, 5805), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (5803, 5805), True, 'from dannce.engine import ops as ops\n'), ((5900, 5927), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (5925, 5927), True, 'from dannce.engine import ops as ops\n'), ((6073, 6100), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (6098, 6100), True, 'from dannce.engine import ops as ops\n'), ((6195, 6222), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (6220, 6222), True, 'from dannce.engine import ops as ops\n'), ((6368, 6395), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (6393, 6395), True, 'from dannce.engine import ops as ops\n'), ((6490, 6517), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (6515, 6517), True, 'from dannce.engine import ops as ops\n'), ((6742, 6769), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (6767, 6769), True, 'from dannce.engine import ops as ops\n'), ((6864, 6891), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (6889, 6891), True, 'from dannce.engine import ops as ops\n'), ((7116, 7143), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (7141, 7143), True, 'from dannce.engine import ops as ops\n'), ((7238, 7265), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (7263, 7265), True, 'from dannce.engine import ops as ops\n'), ((7488, 7515), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (7513, 7515), True, 'from dannce.engine import ops as ops\n'), ((7609, 7636), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (7634, 7636), True, 'from dannce.engine import ops as ops\n'), ((7859, 7886), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (7884, 7886), True, 'from dannce.engine import ops as ops\n'), ((7980, 8007), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (8005, 8007), True, 'from dannce.engine import ops as ops\n'), ((8325, 8336), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (8329, 8336), False, 'from tensorflow.keras.optimizers import Adam\n'), ((9102, 9129), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (9127, 9129), True, 'from dannce.engine import ops as ops\n'), ((9223, 9250), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (9248, 9250), True, 'from dannce.engine import ops as ops\n'), ((9395, 9422), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (9420, 9422), True, 'from dannce.engine import ops as ops\n'), ((9516, 9543), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (9541, 9543), True, 'from dannce.engine import ops as ops\n'), ((9689, 9716), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (9714, 9716), True, 'from dannce.engine import ops as ops\n'), ((9811, 9838), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (9836, 9838), True, 'from dannce.engine import ops as ops\n'), ((9984, 10011), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (10009, 10011), True, 'from dannce.engine import ops as ops\n'), ((10106, 10133), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (10131, 10133), True, 'from dannce.engine import ops as ops\n'), ((10279, 10306), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (10304, 10306), True, 'from dannce.engine import ops as ops\n'), ((10401, 10428), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (10426, 10428), True, 'from dannce.engine import ops as ops\n'), ((10630, 10657), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (10655, 10657), True, 'from dannce.engine import ops as ops\n'), ((10752, 10779), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (10777, 10779), True, 'from dannce.engine import ops as ops\n'), ((10981, 11008), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (11006, 11008), True, 'from dannce.engine import ops as ops\n'), ((11103, 11130), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (11128, 11130), True, 'from dannce.engine import ops as ops\n'), ((11330, 11357), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (11355, 11357), True, 'from dannce.engine import ops as ops\n'), ((11451, 11478), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (11476, 11478), True, 'from dannce.engine import ops as ops\n'), ((11678, 11705), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (11703, 11705), True, 'from dannce.engine import ops as ops\n'), ((11799, 11826), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (11824, 11826), True, 'from dannce.engine import ops as ops\n'), ((12144, 12155), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (12148, 12155), False, 'from tensorflow.keras.optimizers import Adam\n'), ((15642, 15708), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs, grid_centers]', 'outputs': '[output, output_var]'}), '(inputs=[inputs, grid_centers], outputs=[output, output_var])\n', (15647, 15708), False, 'from tensorflow.keras.models import Model\n'), ((15743, 15797), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs, grid_centers]', 'outputs': '[output]'}), '(inputs=[inputs, grid_centers], outputs=[output])\n', (15748, 15797), False, 'from tensorflow.keras.models import Model\n'), ((15995, 16006), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (15999, 16006), False, 'from tensorflow.keras.optimizers import Adam\n'), ((16891, 16907), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['inp'], {}), '(inp)\n', (16902, 16907), True, 'from tensorflow.keras import backend as K\n'), ((19938, 20004), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs, grid_centers]', 'outputs': '[output, output_var]'}), '(inputs=[inputs, grid_centers], outputs=[output, output_var])\n', (19943, 20004), False, 'from tensorflow.keras.models import Model\n'), ((20039, 20093), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[inputs, grid_centers]', 'outputs': '[output]'}), '(inputs=[inputs, grid_centers], outputs=[output])\n', (20044, 20093), False, 'from tensorflow.keras.models import Model\n'), ((20291, 20302), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (20295, 20302), False, 'from tensorflow.keras.optimizers import Adam\n'), ((23251, 23262), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (23255, 23262), False, 'from tensorflow.keras.optimizers import Adam\n'), ((23893, 23909), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['inp'], {}), '(inp)\n', (23904, 23909), True, 'from tensorflow.keras import backend as K\n'), ((26525, 26536), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (26529, 26536), False, 'from tensorflow.keras.optimizers import Adam\n'), ((29641, 29652), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (29645, 29652), False, 'from tensorflow.keras.optimizers import Adam\n'), ((30770, 30790), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (30788, 30790), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((30888, 30908), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (30906, 30908), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31060, 31080), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (31078, 31080), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31178, 31198), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (31196, 31198), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31350, 31370), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (31368, 31370), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31468, 31488), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (31486, 31488), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31757, 31777), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (31775, 31777), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31875, 31895), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (31893, 31895), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((32164, 32184), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (32182, 32184), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((32282, 32302), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (32300, 32302), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((32569, 32589), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (32587, 32589), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((32686, 32706), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (32704, 32706), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((32967, 32978), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (32971, 32978), False, 'from tensorflow.keras.optimizers import Adam\n'), ((36631, 36642), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (36635, 36642), False, 'from tensorflow.keras.optimizers import Adam\n'), ((2698, 2758), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(256, (2, 2), strides=(2, 2), padding='same')\n", (2713, 2758), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((3058, 3118), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (2, 2), strides=(2, 2), padding='same')\n", (3073, 3118), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((3418, 3477), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (2, 2), strides=(2, 2), padding='same')\n", (3433, 3477), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((3775, 3834), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(32)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(32, (2, 2), strides=(2, 2), padding='same')\n", (3790, 3834), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((6559, 6619), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(256, (2, 2), strides=(2, 2), padding='same')\n", (6574, 6619), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((6933, 6993), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (2, 2), strides=(2, 2), padding='same')\n", (6948, 6993), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((7307, 7366), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (2, 2), strides=(2, 2), padding='same')\n", (7322, 7366), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((7678, 7737), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(32)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(32, (2, 2), strides=(2, 2), padding='same')\n", (7693, 7737), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((10461, 10521), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(256)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(256, (2, 2), strides=(2, 2), padding='same')\n", (10476, 10521), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((10812, 10872), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(128, (2, 2), strides=(2, 2), padding='same')\n", (10827, 10872), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((11163, 11222), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(64, (2, 2), strides=(2, 2), padding='same')\n", (11178, 11222), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((11511, 11570), 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(32)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), "(32, (2, 2), strides=(2, 2), padding='same')\n", (11526, 11570), False, 'from tensorflow.keras.layers import Conv2DTranspose, Conv3D, Lambda\n'), ((12631, 12651), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (12649, 12651), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((13976, 14042), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (13991, 14042), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((14349, 14415), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (14364, 14415), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((14722, 14787), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (14737, 14787), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((15204, 15226), 'dannce.engine.ops.spatial_softmax', 'ops.spatial_softmax', (['x'], {}), '(x)\n', (15223, 15226), True, 'from dannce.engine import ops as ops\n'), ((15267, 15300), 'dannce.engine.ops.expected_value_3d', 'ops.expected_value_3d', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (15288, 15300), True, 'from dannce.engine import ops as ops\n'), ((15498, 15526), 'dannce.engine.ops.var_3d', 'ops.var_3d', (['x[0]', 'x[1]', 'x[2]'], {}), '(x[0], x[1], x[2])\n', (15508, 15526), True, 'from dannce.engine import ops as ops\n'), ((16603, 16623), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (16621, 16623), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((18289, 18355), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (18304, 18355), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((18662, 18728), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (18677, 18728), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((19035, 19100), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (19050, 19100), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((19500, 19522), 'dannce.engine.ops.spatial_softmax', 'ops.spatial_softmax', (['x'], {}), '(x)\n', (19519, 19522), True, 'from dannce.engine import ops as ops\n'), ((19563, 19596), 'dannce.engine.ops.expected_value_3d', 'ops.expected_value_3d', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (19584, 19596), True, 'from dannce.engine import ops as ops\n'), ((19794, 19822), 'dannce.engine.ops.var_3d', 'ops.var_3d', (['x[0]', 'x[1]', 'x[2]'], {}), '(x[0], x[1], x[2])\n', (19804, 19822), True, 'from dannce.engine import ops as ops\n'), ((20683, 20703), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (20701, 20703), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((22024, 22090), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (22039, 22090), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((22397, 22463), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (22412, 22463), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((22770, 22835), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (22785, 22835), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((23605, 23625), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (23623, 23625), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((25298, 25364), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (25313, 25364), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((25671, 25737), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (25686, 25737), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((26044, 26109), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (26059, 26109), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((26998, 27018), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (27016, 27018), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((28319, 28385), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (28334, 28385), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((28692, 28758), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (28707, 28758), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((29065, 29130), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (29080, 29130), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((30120, 30140), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (30138, 30140), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((31543, 31609), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (31558, 31609), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((31950, 32016), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (31965, 32016), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((32357, 32422), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (32372, 32422), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((33492, 33512), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (33510, 33512), False, 'from tensorflow.keras.layers import BatchNormalization\n'), ((35089, 35155), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(256)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(256, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (35104, 35155), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((35540, 35606), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(128)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(128, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (35555, 35606), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((35991, 36056), 'tensorflow.keras.layers.Conv3DTranspose', 'Conv3DTranspose', (['(64)', '(2, 2, 2)'], {'strides': '(2, 2, 2)', 'padding': '"""same"""'}), "(64, (2, 2, 2), strides=(2, 2, 2), padding='same')\n", (36006, 36056), False, 'from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose\n'), ((38286, 38308), 'dannce.engine.ops.spatial_softmax', 'ops.spatial_softmax', (['x'], {}), '(x)\n', (38305, 38308), True, 'from dannce.engine import ops as ops\n'), ((38351, 38384), 'dannce.engine.ops.expected_value_3d', 'ops.expected_value_3d', (['x[0]', 'x[1]'], {}), '(x[0], x[1])\n', (38372, 38384), True, 'from dannce.engine import ops as ops\n'), ((12776, 12803), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (12801, 12803), True, 'from dannce.engine import ops as ops\n'), ((16748, 16775), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (16773, 16775), True, 'from dannce.engine import ops as ops\n'), ((20828, 20855), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (20853, 20855), True, 'from dannce.engine import ops as ops\n'), ((23750, 23777), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (23775, 23777), True, 'from dannce.engine import ops as ops\n'), ((27143, 27170), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (27168, 27170), True, 'from dannce.engine import ops as ops\n'), ((30265, 30292), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (30290, 30292), True, 'from dannce.engine import ops as ops\n'), ((33637, 33664), 'dannce.engine.ops.InstanceNormalization', 'ops.InstanceNormalization', ([], {}), '()\n', (33662, 33664), True, 'from dannce.engine import ops as ops\n')] |
import librosa
import numpy as np
import extract_feat as ef
local_config = {
'batch_size': 1,
'eps': 1e-5,
'sample_rate': 22050,
'load_size': 22050*20,
'name_scope': 'SoundNet',
'phase': 'extract',
}
def analyse_sound(path):
x, sr = librosa.load(path)
td = librosa.get_duration(x)
print("Time duration of audio: ", td)
x = np.reshape(x, [1,-1,1,1])
print("Shape of input waveform: ", x.shape)
# Setup visible device
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_device
# Load pre-trained model
G_name = './models/sound8.npy'
param_G = np.load(G_name, encoding = 'latin1').item()
# Init. Session
sess_config = tf.ConfigProto()
sess_config.allow_soft_placement=True
sess_config.gpu_options.allow_growth = True
with tf.Session(config=sess_config) as session:
# Build model
model = Model(session, config=local_config, param_G=param_G)
init = tf.global_variables_initializer()
session.run(init)
model.load()
features = ef.extract_feat(model, x, local_config)
print("Shape of feature: ", features.shape)
if __name__ == "__main__":
analyse_sound("data/x.mp3")
| [
"numpy.reshape",
"librosa.get_duration",
"numpy.load",
"extract_feat.extract_feat",
"librosa.load"
] | [((326, 344), 'librosa.load', 'librosa.load', (['path'], {}), '(path)\n', (338, 344), False, 'import librosa\n'), ((354, 377), 'librosa.get_duration', 'librosa.get_duration', (['x'], {}), '(x)\n', (374, 377), False, 'import librosa\n'), ((428, 456), 'numpy.reshape', 'np.reshape', (['x', '[1, -1, 1, 1]'], {}), '(x, [1, -1, 1, 1])\n', (438, 456), True, 'import numpy as np\n'), ((1148, 1187), 'extract_feat.extract_feat', 'ef.extract_feat', (['model', 'x', 'local_config'], {}), '(model, x, local_config)\n', (1163, 1187), True, 'import extract_feat as ef\n'), ((667, 701), 'numpy.load', 'np.load', (['G_name'], {'encoding': '"""latin1"""'}), "(G_name, encoding='latin1')\n", (674, 701), True, 'import numpy as np\n')] |
import numpy as np
from system_env import system_models
def random_intelligent_agent(network_hp, system, controller, channels, policy, horizon, epochs, system_noise,
channel_init, channel_transitions):
resources = sum(network_hp['model_quantities'])
action_size = system.subsystems
network = system_models.BaseNetworkGE(channels)
avg_loss = []
noise_idx = 0
for e in range(epochs):
for c, channel in enumerate(network.channels):
channel.initialize(channel_init[c, e])
system.reset_state()
system_state = system.state
cum_loss = 0
for time in range(horizon):
# Action
schedule = np.zeros((resources, action_size))
for j in range(resources):
action = np.random.choice(action_size, 1, p=policy)
schedule[j, action] = 1
transmission_outcomes = network.output(schedule,
np.reshape(channel_transitions[:, noise_idx], (resources, 2)))
# Apply control
actor_decision = [1 if x != 0 else 0 for x in np.sum(schedule, axis=0)]
inputs = controller.controller_one_step(actor_decision) @ system_state
# for know single input system
active_inputs = np.array([a * b for a, b in zip(transmission_outcomes, inputs)])
next_system_state = system.state_update(active_inputs, np.array(system_noise[noise_idx, :]))
# Compute loss
system_loss \
= system_state.transpose() @ system.q_system @ system_state \
+ active_inputs.transpose() @ system.r_system @ active_inputs
system_state = next_system_state
noise_idx += 1
# Cumulative loss
cum_loss += system_loss
avg_loss.append(cum_loss / horizon)
return avg_loss, None
| [
"system_env.system_models.BaseNetworkGE",
"numpy.reshape",
"numpy.random.choice",
"numpy.array",
"numpy.zeros",
"numpy.sum"
] | [((347, 384), 'system_env.system_models.BaseNetworkGE', 'system_models.BaseNetworkGE', (['channels'], {}), '(channels)\n', (374, 384), False, 'from system_env import system_models\n'), ((736, 770), 'numpy.zeros', 'np.zeros', (['(resources, action_size)'], {}), '((resources, action_size))\n', (744, 770), True, 'import numpy as np\n'), ((837, 879), 'numpy.random.choice', 'np.random.choice', (['action_size', '(1)'], {'p': 'policy'}), '(action_size, 1, p=policy)\n', (853, 879), True, 'import numpy as np\n'), ((1037, 1098), 'numpy.reshape', 'np.reshape', (['channel_transitions[:, noise_idx]', '(resources, 2)'], {}), '(channel_transitions[:, noise_idx], (resources, 2))\n', (1047, 1098), True, 'import numpy as np\n'), ((1508, 1544), 'numpy.array', 'np.array', (['system_noise[noise_idx, :]'], {}), '(system_noise[noise_idx, :])\n', (1516, 1544), True, 'import numpy as np\n'), ((1190, 1214), 'numpy.sum', 'np.sum', (['schedule'], {'axis': '(0)'}), '(schedule, axis=0)\n', (1196, 1214), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import collections
from copy import deepcopy
import json
import numpy as np
from .skeleton import Skeleton
from .skeleton_node import SkeletonRootNode, SkeletonJointNode, SkeletonEndSiteNode, SKELETON_NODE_TYPE_JOINT, SKELETON_NODE_TYPE_END_SITE
from .quaternion_frame import convert_euler_to_quaternion_frame
from .joint_constraints import HingeConstraint2
from .acclaim import asf_to_bvh_channels
DEFAULT_ROOT_DIR = [0, 0, 1]
def create_identity_frame(skeleton):
skeleton.identity_frame = np.zeros(skeleton.reference_frame_length)
offset = 3
for j in skeleton.animated_joints:
skeleton.identity_frame[offset:offset + 4] = [1, 0, 0, 0]
offset += 4
def create_euler_frame_indices(skeleton):
nodes_without_endsite = [node for node in list(skeleton.nodes.values()) if node.node_type != SKELETON_NODE_TYPE_END_SITE]
for node in nodes_without_endsite:
node.euler_frame_index = nodes_without_endsite.index(node)
def read_reference_frame_from_bvh_reader(bvh_reader, frame_index=0):
quat_frame = convert_euler_to_quaternion_frame(bvh_reader, bvh_reader.frames[frame_index], False, animated_joints=None)
quat_frames = list(quat_frame.values())
quaternion_frame = np.array(quat_frames).flatten()
return np.array(bvh_reader.frames[0][:3].tolist() + quaternion_frame.tolist())
def add_tool_nodes(skeleton, node_names, new_tool_bones):
for b in new_tool_bones:
add_new_end_site(skeleton, node_names, b["parent_node_name"], b["new_node_offset"])
skeleton.tool_nodes.append(b["new_node_name"])
def add_new_end_site(skeleton, node_names, parent_node_name, offset):
if parent_node_name in list(node_names.keys()):
level = node_names[parent_node_name]["level"] + 1
node_desc = dict()
node_desc["level"] = level
node_desc["offset"] = offset
def reference_frame_from_unity(data):
n_j = len(data["rotations"])
q_frame = np.zeros(n_j*4+3)
q_frame[:3] = arr_from_unity_t(data["translations"][0])
o = 3
for q in data["rotations"]:
q_frame[o:o+4] = arr_from_unity_q(q)
o+=4
return q_frame
def generate_reference_frame(skeleton, animated_joints):
identity_frame = [0,0,0]
frame = [0, 0, 0]
joint_idx = 0
node_list = [(skeleton.nodes[n].index, n) for n in skeleton.nodes.keys() if skeleton.nodes[n].index >= 0]
node_list.sort()
for idx, node in node_list:
frame += list(skeleton.nodes[node].rotation)
if node in animated_joints:
identity_frame += [1.0,0.0,0.0,0.0]
skeleton.nodes[node].quaternion_frame_index = joint_idx
joint_idx += 1
else:
skeleton.nodes[node].quaternion_frame_index = -1
skeleton.reference_frame = np.array(frame)
skeleton.reference_frame_length = len(frame)
skeleton.identity_frame = np.array(identity_frame)
def arr_from_unity_q(_q):
q = np.zeros(4)
q[0] = - _q["w"]
q[1] = - _q["x"]
q[2] = _q["y"]
q[3] = _q["z"]
return q
def arr_from_unity_t(_t):
t = np.zeros(3)
t[0] = - _t["x"]
t[1] = _t["y"]
t[2] = _t["z"]
return t
class SkeletonBuilder(object):
def load_from_bvh(self, bvh_reader, animated_joints=None, reference_frame=None, skeleton_model=None, tool_bones=None):
skeleton = Skeleton()
if animated_joints is None:
animated_joints = list(bvh_reader.get_animated_joints())
skeleton.animated_joints = animated_joints
skeleton.frame_time = deepcopy(bvh_reader.frame_time)
skeleton.root = deepcopy(bvh_reader.root)
skeleton.aligning_root_node = skeleton.root
skeleton.aligning_root_dir = DEFAULT_ROOT_DIR
if reference_frame is None:
skeleton.reference_frame = read_reference_frame_from_bvh_reader(bvh_reader)
else:
skeleton.reference_frame = reference_frame
skeleton.reference_frame_length = len(skeleton.reference_frame)
skeleton.node_channels = collections.OrderedDict()
skeleton.nodes = collections.OrderedDict()
skeleton.tool_nodes = []
if tool_bones is not None:
add_tool_nodes(skeleton, bvh_reader.node_names, tool_bones)
skeleton.max_level = skeleton._get_max_level()
skeleton._set_joint_weights()
skeleton.nodes = collections.OrderedDict()
joint_list = [k for k in bvh_reader.node_names if
"children" in list(bvh_reader.node_names[k].keys()) and
len(bvh_reader.node_names[k]["children"]) > 0]
self.construct_hierarchy_from_bvh(skeleton, joint_list, bvh_reader.node_names, skeleton.root, 0)
skeleton.parent_dict = skeleton._get_parent_dict()
skeleton._chain_names = skeleton._generate_chain_names()
create_euler_frame_indices(skeleton)
create_identity_frame(skeleton)
if skeleton_model is not None:
skeleton.skeleton_model = skeleton_model
#skeleton.add_heels(skeleton_model)
return skeleton
def construct_hierarchy_from_bvh(self, skeleton, joint_list, node_info, node_name, level):
if "channels" in node_info[node_name]:
channels = node_info[node_name]["channels"]
else:
channels = []
if node_name in joint_list:
joint_index = joint_list.index(node_name)
else:
joint_index = -1
if node_name == skeleton.root:
node = SkeletonRootNode(node_name, channels, None, level)
if node_name in skeleton.animated_joints:
node.fixed = False
node.quaternion_frame_index = skeleton.animated_joints.index(node_name)
else:
node.fixed = True
node.index = joint_index
elif "children" in list(node_info[node_name].keys()) and len(node_info[node_name]["children"]) > 0:
node = SkeletonJointNode(node_name, channels, None, level)
if node_name in skeleton.animated_joints:
node.fixed = False
node.quaternion_frame_index = skeleton.animated_joints.index(node_name)
else:
node.fixed = True
offset = joint_index * 4 + 3
node.rotation = skeleton.reference_frame[offset: offset + 4]
node.index = joint_index
else:
node = SkeletonEndSiteNode(node_name, channels, None, level)
node.index = joint_index
node.offset = node_info[node_name]["offset"]
skeleton.nodes[node_name] = node
if "children" in list(node_info[node_name].keys()):
for c in node_info[node_name]["children"]:
c_node = self.construct_hierarchy_from_bvh(skeleton, joint_list, node_info, c, level+1)
c_node.parent = node
node.children.append(c_node)
return node
def load_from_json_file(self, filename):
with open(filename) as infile:
data = json.load(infile)
skeleton = self.load_from_custom_unity_format(data)
return skeleton
def load_from_custom_unity_format(self, data, frame_time=1.0/30, add_extra_end_site=False):
skeleton = Skeleton()
animated_joints = data["jointSequence"]
if len(animated_joints) == 0:
print("Error no joints defined")
return skeleton
print("load from json", len(animated_joints))
skeleton.animated_joints = animated_joints
skeleton.skeleton_model = collections.OrderedDict()
skeleton.skeleton_model["joints"] = dict()
if "head_joint" in list(data.keys()):
skeleton.skeleton_model["joints"]["head"] = data["head_joint"]
if "neck_joint" in list(data.keys()):
skeleton.skeleton_model["joints"]["neck"] = data["neck_joint"]
skeleton.frame_time = frame_time
skeleton.nodes = collections.OrderedDict()
skeleton.root = animated_joints[0]
root = self._create_node_from_unity_desc(skeleton, skeleton.root, data, None, 0, add_extra_end_site)
skeleton.max_level = skeleton._get_max_level()
skeleton._set_joint_weights()
skeleton.parent_dict = skeleton._get_parent_dict()
skeleton._chain_names = skeleton._generate_chain_names()
create_euler_frame_indices(skeleton)
if "root" in data:
skeleton.aligning_root_node = data["root"]
else:
skeleton.aligning_root_node = skeleton.root
skeleton.aligning_root_dir = DEFAULT_ROOT_DIR
skeleton.reference_frame = reference_frame_from_unity(data["referencePose"])
print("reference", skeleton.reference_frame[3:7],data["referencePose"]["rotations"][0])
skeleton.reference_frame_length = len(skeleton.reference_frame)
create_identity_frame(skeleton)
return skeleton
def load_from_json_data(self, data, animated_joints=None, use_all_joints=False):
def extract_animated_joints(node, animated_joints):
animated_joints.append(node["name"])
for c in node["children"]:
if c["index"] >= 0 and len(c["children"]) > 0:
extract_animated_joints(c, animated_joints)
skeleton = Skeleton()
print("load from json")
if animated_joints is not None:
skeleton.animated_joints = animated_joints
elif "animated_joints" in data.keys():
skeleton.animated_joints = data["animated_joints"]
else:
animated_joints = list()
extract_animated_joints(data["root"], animated_joints)
skeleton.animated_joints = animated_joints
if "free_joints_map" in list(data.keys()):
skeleton.free_joints_map = data["free_joints_map"]
if "skeleton_model" in list(data.keys()):
skeleton.skeleton_model = data["skeleton_model"]
else:
skeleton.skeleton_model = collections.OrderedDict()
skeleton.skeleton_model["joints"] = dict()
if "head_joint" in list(data.keys()):
skeleton.skeleton_model["joints"]["head"] = data["head_joint"]
if "neck_joint" in list(data.keys()):
skeleton.skeleton_model["joints"]["neck"] = data["neck_joint"]
skeleton.frame_time = data["frame_time"]
skeleton.nodes = collections.OrderedDict()
root = self._create_node_from_desc(skeleton, data["root"], None, 0)
skeleton.root = root.node_name
if "tool_nodes" in list(data.keys()):
skeleton.tool_nodes = data["tool_nodes"]
skeleton.max_level = skeleton._get_max_level()
skeleton._set_joint_weights()
skeleton.parent_dict = skeleton._get_parent_dict()
skeleton._chain_names = skeleton._generate_chain_names()
create_euler_frame_indices(skeleton)
if "aligning_root_node" in list(data.keys()):
skeleton.aligning_root_node = data["aligning_root_node"]
else:
skeleton.aligning_root_node = skeleton.root
if "aligning_root_dir" in list(data.keys()):
skeleton.aligning_root_dir = data["aligning_root_dir"]
else:
skeleton.aligning_root_dir = DEFAULT_ROOT_DIR
#skeleton.reference_frame = data["reference_frame"]
skeleton.reference_frame = None
if "reference_frame" in data:
skeleton.reference_frame = data["reference_frame"]
skeleton.reference_frame_length = len(skeleton.reference_frame)
if skeleton.reference_frame is None or use_all_joints:
generate_reference_frame(skeleton, skeleton.animated_joints)
create_identity_frame(skeleton)
return skeleton
def load_from_fbx_data(self, data):
skeleton = Skeleton()
skeleton.nodes = collections.OrderedDict()
skeleton.animated_joints = data["animated_joints"]
# self.inv_bind_poses = [self._create_node_from_desc(node, None) for node in data["nodes"].values()]
skeleton.root = data["root"]
self._create_node_from_desc2(skeleton, data, skeleton.root, None, 0)
skeleton.frame_time = data["frame_time"]
skeleton.parent_dict = skeleton._get_parent_dict()
skeleton._chain_names = skeleton._generate_chain_names()
n_params = len(skeleton.animated_joints) * 4 + 3
skeleton.reference_frame = np.zeros(n_params)
offset = 3
for node_name in skeleton.animated_joints:
skeleton.reference_frame[offset:offset + 4] = data["nodes"][node_name]["rotation"]
offset += 4
skeleton.reference_frame_length = len(skeleton.reference_frame)
create_identity_frame(skeleton)
return skeleton
def _create_node_from_desc(self, skeleton, data, parent, level):
node_name = data["name"]
channels = data["channels"]
if parent is None:
node = SkeletonRootNode(node_name, channels, parent, level)
elif data["node_type"] == SKELETON_NODE_TYPE_JOINT:
node = SkeletonJointNode(node_name, channels, parent, level)
else:
node = SkeletonEndSiteNode(node_name, channels, parent, level)
# node.fixed = data["fixed"]
node.index = data["index"]
node.offset = np.array(data["offset"])
node.rotation = np.array(data["rotation"])
if node_name in skeleton.animated_joints:
node.quaternion_frame_index = skeleton.animated_joints.index(node_name)
node.fixed = False
else:
node.quaternion_frame_index = -1
node.fixed = True
skeleton.nodes[node_name] = node
skeleton.nodes[node_name].children = []
for c_desc in data["children"]:
skeleton.nodes[node_name].children.append(self._create_node_from_desc(skeleton, c_desc, node, level+1))
return node
def _create_node_from_desc2(self, skeleton, data, node_name, parent, level=0):
node_data = data["nodes"][node_name]
channels = node_data["channels"]
if parent is None:
node = SkeletonRootNode(node_name, channels, parent, level)
elif node_data["node_type"] == SKELETON_NODE_TYPE_JOINT:
node = SkeletonJointNode(node_name, channels, parent, level)
else:
node = SkeletonEndSiteNode(node_name, channels, parent, level)
node.fixed = node_data["fixed"]
node.index = node_data["index"]
node.offset = np.array(node_data["offset"])
node.rotation = np.array(node_data["rotation"])
if node_name in skeleton.animated_joints:
node.quaternion_frame_index = skeleton.animated_joints.index(node_name)
else:
node.quaternion_frame_index = -1
node.children = []
skeleton.nodes[node_name] = node
for c_name in node_data["children"]:
c_node = self._create_node_from_desc2(skeleton, data, c_name, node, level+1)
node.children.append(c_node)
return node
def get_joint_desc(self, data, name):
for idx in range(len(data["jointDescs"])):
if data["jointDescs"][idx]["name"] == name:
return data["jointDescs"][idx]
return None
def _create_node_from_unity_desc(self, skeleton, node_name, data, parent, level, add_extra_end_site=False):
node_data = self.get_joint_desc(data, node_name)
if node_data is None:
return
if parent is None:
channels = ["Xposition","Yposition","Zposition", "Xrotation","Yrotation","Zrotation"]
else:# len(node_data["children"]) > 0:
channels = ["Xrotation","Yrotation","Zrotation"]
if parent is None:
node = SkeletonRootNode(node_name, channels, parent, level)
else:# len(node_data["children"]) > 0:
node = SkeletonJointNode(node_name, channels, parent, level)
node.offset = np.array(node_data["offset"])
node.offset[0] *= -1
node.rotation = np.array(node_data["rotation"])
node.rotation[0] *= -1
node.rotation[1] *= -1
if node_name in skeleton.animated_joints:
node.quaternion_frame_index = skeleton.animated_joints.index(node_name)
node.fixed = False
else:
node.quaternion_frame_index = -1
node.fixed = True
skeleton.nodes[node_name] = node
skeleton.nodes[node_name].children = []
if len(node_data["children"]) > 0:
node.index = node.quaternion_frame_index
for c_name in node_data["children"]:
c_node = self._create_node_from_unity_desc(skeleton, c_name, data, node, level+1)
if c_node is not None:
skeleton.nodes[node_name].children.append(c_node)
if add_extra_end_site:
print("add extra end site")
node.index = -1
channels = []
c_name = node_name+"_EndSite"
c_node = SkeletonEndSiteNode(c_name, channels, node, level+1)
skeleton.nodes[node_name].children.append(c_node)
skeleton.nodes[c_name] = c_node
return node
@classmethod
def construct_arm_with_constraints(cls, n_joints, length):
skeleton = Skeleton()
skeleton.frame_time = 1 / 30
animated_joints = []
animated_joints.append("root")
skeleton.root = "root"
channels = ["rotationX", "rotationY", "rotationZ", "rotationW"]
node = SkeletonRootNode("root", channels, None, 0)
node.fixed = False
node.index = 0
node.offset = [0, 0, 0]
node.rotation = [1, 0, 0, 0]
node.quaternion_frame_index = 0
skeleton.nodes["root"] = node
parent = node
swing_axis = np.array([0,0,1])
twist_axis = np.array([0, 1, 0])
angle_range = [0,90]
for n in range(1, n_joints + 1): # start after the root joint and add one endsite
if n + 1 < n_joints + 1:
node_name = "joint" + str(n)
node = SkeletonJointNode(node_name, channels, parent, n)
animated_joints.append(node_name)
node.fixed = False
node.quaternion_frame_index = n
node.index = n
node.offset = np.array([0, length, 0], dtype=np.float)
print("create", node_name, node.offset)
if n in [1]:
node.joint_constraint = HingeConstraint2(swing_axis, twist_axis)
else:
node_name = "joint" + str(n - 1) + "_EndSite"
node = SkeletonEndSiteNode(node_name, channels, parent, n)
node.fixed = True
node.quaternion_frame_index = -1
node.index = -1
node.offset = np.array([0, 0, 0], dtype=np.float)
print("create", node_name, node.offset)
parent.children.append(node)
node.rotation = [1, 0, 0, 0]
skeleton.nodes[node_name] = node
parent = node
skeleton.animated_joints = animated_joints
skeleton.reference_frame = cls.get_reference_frame(animated_joints)
return skeleton
@classmethod
def get_reference_frame(cls, animated_joints):
n_animated_joints = len(animated_joints)
reference_frame = np.zeros(n_animated_joints * 4 + 3)
o = 3
for n in range(n_animated_joints):
reference_frame[o:o + 4] = [1, 0, 0, 0]
o += 4
return reference_frame
def load_from_asf_data(self, data, frame_time=1.0/30):
skeleton = Skeleton()
animated_joints = ["root"]+list(data["bones"].keys())
print("load from asf", len(animated_joints))
skeleton.animated_joints =animated_joints
skeleton.skeleton_model = collections.OrderedDict()
skeleton.skeleton_model["joints"] = dict()
skeleton.frame_time = frame_time
skeleton.nodes = collections.OrderedDict()
skeleton.root = "root"
if skeleton.root is None:
skeleton.root = animated_joints[0]
root = self._create_node_from_asf_data(skeleton, skeleton.root, data, None, 0)
skeleton.max_level = skeleton._get_max_level()
skeleton._set_joint_weights()
skeleton.parent_dict = skeleton._get_parent_dict()
skeleton._chain_names = skeleton._generate_chain_names()
skeleton.aligning_root_node = skeleton.root
skeleton.reference_frame = self.get_reference_frame(animated_joints)
skeleton.reference_frame_length = len(skeleton.reference_frame)
create_identity_frame(skeleton)
return skeleton
def _create_node_from_asf_data(self, skeleton, node_name, data, parent, level):
if parent is None:
channels = asf_to_bvh_channels(data["root"]["order"])
node = SkeletonRootNode(node_name, channels, parent, level)
node.format_meta_data = data["root"]
node.quaternion_frame_index = 0
node.fixed = False
elif "dof" in data["bones"][node_name]:
channels = asf_to_bvh_channels(data["bones"][node_name]["dof"])
node = SkeletonJointNode(node_name, channels, parent, level)
if "offset" in data["bones"][node_name]:
node.offset = data["bones"][node_name]["offset"]
#if parent.node_name is not "root":
# node.offset = np.array(data["bones"][parent.node_name]["direction"])
# node.offset *= data["bones"][parent.node_name]["length"]
node.format_meta_data = data["bones"][node_name]
if node_name in skeleton.animated_joints:
node.quaternion_frame_index = skeleton.animated_joints.index(node_name)
node.fixed = False
else:
channels = []
node = SkeletonJointNode(node_name, channels, parent, level)
node.format_meta_data = data["bones"][node_name]
node.quaternion_frame_index = -1
node.fixed = True
skeleton.nodes[node_name] = node
skeleton.nodes[node_name].children = []
if node_name in data["children"] and len(data["children"][node_name]) > 0:
node.index = node.quaternion_frame_index
for c_name in data["children"][node_name]:
c_node = self._create_node_from_asf_data(skeleton, c_name, data, node, level+1)
if c_node is not None:
skeleton.nodes[node_name].children.append(c_node)
return node
| [
"collections.OrderedDict",
"numpy.array",
"numpy.zeros",
"copy.deepcopy",
"json.load"
] | [((1610, 1651), 'numpy.zeros', 'np.zeros', (['skeleton.reference_frame_length'], {}), '(skeleton.reference_frame_length)\n', (1618, 1651), True, 'import numpy as np\n'), ((3048, 3069), 'numpy.zeros', 'np.zeros', (['(n_j * 4 + 3)'], {}), '(n_j * 4 + 3)\n', (3056, 3069), True, 'import numpy as np\n'), ((3874, 3889), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (3882, 3889), True, 'import numpy as np\n'), ((3969, 3993), 'numpy.array', 'np.array', (['identity_frame'], {}), '(identity_frame)\n', (3977, 3993), True, 'import numpy as np\n'), ((4030, 4041), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (4038, 4041), True, 'import numpy as np\n'), ((4171, 4182), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4179, 4182), True, 'import numpy as np\n'), ((4627, 4658), 'copy.deepcopy', 'deepcopy', (['bvh_reader.frame_time'], {}), '(bvh_reader.frame_time)\n', (4635, 4658), False, 'from copy import deepcopy\n'), ((4683, 4708), 'copy.deepcopy', 'deepcopy', (['bvh_reader.root'], {}), '(bvh_reader.root)\n', (4691, 4708), False, 'from copy import deepcopy\n'), ((5113, 5138), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5136, 5138), False, 'import collections\n'), ((5164, 5189), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5187, 5189), False, 'import collections\n'), ((5448, 5473), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (5471, 5473), False, 'import collections\n'), ((8642, 8667), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (8665, 8667), False, 'import collections\n'), ((9028, 9053), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9051, 9053), False, 'import collections\n'), ((11487, 11512), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11510, 11512), False, 'import collections\n'), ((12947, 12972), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (12970, 12972), False, 'import collections\n'), ((13522, 13540), 'numpy.zeros', 'np.zeros', (['n_params'], {}), '(n_params)\n', (13530, 13540), True, 'import numpy as np\n'), ((14420, 14444), 'numpy.array', 'np.array', (["data['offset']"], {}), "(data['offset'])\n", (14428, 14444), True, 'import numpy as np\n'), ((14469, 14495), 'numpy.array', 'np.array', (["data['rotation']"], {}), "(data['rotation'])\n", (14477, 14495), True, 'import numpy as np\n'), ((15614, 15643), 'numpy.array', 'np.array', (["node_data['offset']"], {}), "(node_data['offset'])\n", (15622, 15643), True, 'import numpy as np\n'), ((15668, 15699), 'numpy.array', 'np.array', (["node_data['rotation']"], {}), "(node_data['rotation'])\n", (15676, 15699), True, 'import numpy as np\n'), ((17077, 17106), 'numpy.array', 'np.array', (["node_data['offset']"], {}), "(node_data['offset'])\n", (17085, 17106), True, 'import numpy as np\n'), ((17160, 17191), 'numpy.array', 'np.array', (["node_data['rotation']"], {}), "(node_data['rotation'])\n", (17168, 17191), True, 'import numpy as np\n'), ((18945, 18964), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (18953, 18964), True, 'import numpy as np\n'), ((18984, 19003), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (18992, 19003), True, 'import numpy as np\n'), ((20528, 20563), 'numpy.zeros', 'np.zeros', (['(n_animated_joints * 4 + 3)'], {}), '(n_animated_joints * 4 + 3)\n', (20536, 20563), True, 'import numpy as np\n'), ((21015, 21040), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (21038, 21040), False, 'import collections\n'), ((21159, 21184), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (21182, 21184), False, 'import collections\n'), ((2330, 2351), 'numpy.array', 'np.array', (['quat_frames'], {}), '(quat_frames)\n', (2338, 2351), True, 'import numpy as np\n'), ((8105, 8122), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (8114, 8122), False, 'import json\n'), ((11073, 11098), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (11096, 11098), False, 'import collections\n'), ((19474, 19514), 'numpy.array', 'np.array', (['[0, length, 0]'], {'dtype': 'np.float'}), '([0, length, 0], dtype=np.float)\n', (19482, 19514), True, 'import numpy as np\n'), ((19985, 20020), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'np.float'}), '([0, 0, 0], dtype=np.float)\n', (19993, 20020), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def calc_model(indexes, dt, n_preds, mvals, parvals, mults, heats, heatsinks):
deriv = np.zeros(n_preds)
def dT_dt(j, y):
deriv[:] = 0.0
# Couplings with other nodes
for i in xrange(len(mults)):
i1 = mults[i, 0]
i2 = mults[i, 1]
tau = parvals[mults[i, 2]]
if i2 < n_preds and i1 < n_preds:
deriv[i1] += (y[i2] - y[i1]) / tau
else:
deriv[i1] += (mvals[i2, j] - y[i1]) / tau
# Direct heat inputs (e.g. Solar, Earth)
for i in xrange(len(heats)):
i1 = heats[i, 0]
if i1 < n_preds:
i2 = heats[i, 1]
deriv[i1] += mvals[i2, j]
# Couplings to heat sinks
for i in xrange(len(heatsinks)):
i1 = heatsinks[i, 0]
if i1 < n_preds:
T = parvals[heatsinks[i, 1]]
tau = parvals[heatsinks[i, 2]]
deriv[i1] += (T - y[i1]) / tau
return deriv
for j in indexes:
# 2nd order Runge-Kutta (do 4th order later as needed)
y = mvals[:n_preds, j]
k1 = dt * dT_dt(j, y)
k2 = dt * dT_dt(j+1, y + k1 / 2.0)
mvals[:n_preds, j+1] = y + k2 / 2.0
mvals[:n_preds, j+2] = y + k2
| [
"numpy.zeros"
] | [((175, 192), 'numpy.zeros', 'np.zeros', (['n_preds'], {}), '(n_preds)\n', (183, 192), True, 'import numpy as np\n')] |
import logging
import numpy as np
import sklearn.preprocessing as prep
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.utils import check_random_state
from csrank.objectranking.object_ranker import ObjectRanker
from csrank.tunable import Tunable
from csrank.util import print_dictionary
from ..dataset_reader.objectranking.util import \
generate_complete_pairwise_dataset
__all__ = ['RankSVM']
class RankSVM(ObjectRanker, Tunable):
_tunable = None
def __init__(self, n_features, C=1.0, tol=1e-4, normalize=True,
fit_intercept=True, random_state=None, **kwargs):
""" Create an instance of the RankSVM model.
Parameters
----------
n_features : int
Number of features of the object space
C : float, optional
Penalty parameter of the error term
tol : float, optional
Optimization tolerance
normalize : bool, optional
If True, the regressors will be normalized before fitting.
fit_intercept : bool, optional
If True, the linear model will also fit an intercept.
random_state : int, RandomState instance or None, optional
Seed of the pseudorandom generator or a RandomState instance
**kwargs
Keyword arguments for the algorithms
References
----------
.. [1] <NAME>. (2002, July).
"Optimizing search engines using clickthrough data.",
Proceedings of the eighth ACM SIGKDD international conference on
Knowledge discovery and data mining (pp. 133-142). ACM.
"""
self.normalize = normalize
self.n_features = n_features
self.C = C
self.tol = tol
self.logger = logging.getLogger('RankSVM')
self.random_state = check_random_state(random_state)
self.threshold_instances = 1000000
self.fit_intercept = fit_intercept
def fit(self, X, Y, **kwargs):
self.logger.debug('Creating the Dataset')
X_train, garbage, garbage, garbage, Y_single = generate_complete_pairwise_dataset(
X, Y)
del garbage
assert X_train.shape[1] == self.n_features
self.logger.debug(
'Finished the Dataset with instances {}'.format(X_train.shape[0]))
if (X_train.shape[0] > self.threshold_instances):
self.model = LogisticRegression(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept,
random_state=self.random_state)
else:
self.model = LinearSVC(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept,
random_state=self.random_state)
if (self.normalize):
scaler = prep.StandardScaler()
X_train = scaler.fit_transform(X_train)
self.logger.debug('Finished Creating the model, now fitting started')
self.model.fit(X_train, Y_single)
self.weights = self.model.coef_.flatten()
if (self.fit_intercept):
self.weights = np.append(self.weights, self.model.intercept_)
self.logger.debug('Fitting Complete')
def _predict_scores_fixed(self, X, **kwargs):
n_instances, n_objects, n_features = X.shape
self.logger.info(
"For Test instances {} objects {} features {}".format(n_instances,
n_objects,
n_features))
scores = []
for data_test in X:
assert data_test.shape[1] == self.n_features
weights = np.array(self.model.coef_)[0]
try:
score = np.sum(weights * data_test, axis=1)
except ValueError:
score = np.sum(weights[1:] * data_test, axis=1)
scores.append(score)
self.logger.info("Done predicting scores")
return np.array(scores)
def predict_scores(self, X, **kwargs):
return super().predict_scores(X, **kwargs)
def predict(self, X, **kwargs):
return super().predict(X, **kwargs)
def predict_pair(self, a, b, **kwargs):
weights = np.array(self.model.coef_)[0]
score_a = np.sum(weights * a, axis=1)
score_b = np.sum(weights * b, axis=1)
return [score_a / (score_a + score_b), score_b / (score_a + score_b)]
def set_tunable_parameters(self, C=1.0, tol=1e-4, **point):
self.tol = tol
self.C = C
if len(point) > 0:
self.logger.warning('This ranking algorithm does not support'
' tunable parameters'
' called: {}'.format(print_dictionary(point)))
| [
"logging.getLogger",
"sklearn.utils.check_random_state",
"sklearn.svm.LinearSVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.sum",
"numpy.append",
"csrank.util.print_dictionary"
] | [((1818, 1846), 'logging.getLogger', 'logging.getLogger', (['"""RankSVM"""'], {}), "('RankSVM')\n", (1835, 1846), False, 'import logging\n'), ((1875, 1907), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1893, 1907), False, 'from sklearn.utils import check_random_state\n'), ((4098, 4114), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (4106, 4114), True, 'import numpy as np\n'), ((4402, 4429), 'numpy.sum', 'np.sum', (['(weights * a)'], {'axis': '(1)'}), '(weights * a, axis=1)\n', (4408, 4429), True, 'import numpy as np\n'), ((4448, 4475), 'numpy.sum', 'np.sum', (['(weights * b)'], {'axis': '(1)'}), '(weights * b, axis=1)\n', (4454, 4475), True, 'import numpy as np\n'), ((2450, 2562), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'self.C', 'tol': 'self.tol', 'fit_intercept': 'self.fit_intercept', 'random_state': 'self.random_state'}), '(C=self.C, tol=self.tol, fit_intercept=self.fit_intercept,\n random_state=self.random_state)\n', (2468, 2562), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2686, 2789), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': 'self.C', 'tol': 'self.tol', 'fit_intercept': 'self.fit_intercept', 'random_state': 'self.random_state'}), '(C=self.C, tol=self.tol, fit_intercept=self.fit_intercept,\n random_state=self.random_state)\n', (2695, 2789), False, 'from sklearn.svm import LinearSVC\n'), ((2907, 2928), 'sklearn.preprocessing.StandardScaler', 'prep.StandardScaler', ([], {}), '()\n', (2926, 2928), True, 'import sklearn.preprocessing as prep\n'), ((3212, 3258), 'numpy.append', 'np.append', (['self.weights', 'self.model.intercept_'], {}), '(self.weights, self.model.intercept_)\n', (3221, 3258), True, 'import numpy as np\n'), ((4354, 4380), 'numpy.array', 'np.array', (['self.model.coef_'], {}), '(self.model.coef_)\n', (4362, 4380), True, 'import numpy as np\n'), ((3797, 3823), 'numpy.array', 'np.array', (['self.model.coef_'], {}), '(self.model.coef_)\n', (3805, 3823), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.sum', 'np.sum', (['(weights * data_test)'], {'axis': '(1)'}), '(weights * data_test, axis=1)\n', (3874, 3903), True, 'import numpy as np\n'), ((3959, 3998), 'numpy.sum', 'np.sum', (['(weights[1:] * data_test)'], {'axis': '(1)'}), '(weights[1:] * data_test, axis=1)\n', (3965, 3998), True, 'import numpy as np\n'), ((4869, 4892), 'csrank.util.print_dictionary', 'print_dictionary', (['point'], {}), '(point)\n', (4885, 4892), False, 'from csrank.util import print_dictionary\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import copy
import torchvision
import torchvision.transforms as transforms
import numpy as np
from PyHessian.pyhessian import hessian # Hessian computation
from ResNet_CIFAR10 import *
from VGG_model import *
import os
import argparse
#=====================================================================
#============== ANVITHS HELPERS FOR L2 NORM OF WEIGHTS ===============
#=====================================================================
def set_weights_fast(x, weights):
with torch.no_grad():
start = 0
#index = 0
for weight in weights:
length = len(weight.view(-1))
array = x[start:start+length]
weight_new = torch.Tensor(array).view(*weight.shape)
weight.data.copy_(weight_new)
#index +=1
start += length
#puts the weights into a list, but faster
def weights_to_list_fast(weights):
with torch.no_grad():
weights_list = []
for weight in weights:
list_t = weight.view(-1).tolist()
weights_list = weights_list + list_t
return weights_list
#=====================================================================
#=====================================================================
#=====================================================================
#####################################################################
################### Some Loss functions things ######################
#####################################################################
def std_loss(x,y):
log_prob = -1.0 * F.log_softmax(x, 1)
loss = log_prob.gather(1, y.unsqueeze(1))
loss = loss.mean()
avg_std = torch.sum(torch.std(x, dim=1))/(len(x.view(-1)))
loss = loss + std_reg*avg_std
return loss
std_reg =1.0
parser = argparse.ArgumentParser(description='Finetuning for verification error')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--model', default='resnet', type=str, help='resnet or vgg')
parser.add_argument('--loss_func', default='regular', type=str, help='loss function: regular,hessian, hessianv2, std_loss')
parser.add_argument('--dataset', default = 'cifar10', type=str, help ='cifar10, cifar100')
parser.add_argument('--pretrain_epochs', default=10, type=int, help='number of pretraining epochs')
parser.add_argument('--pretrain_batch_size', default=32, type=int, help='pretraining batch size')
parser.add_argument('--finetune_batch_size', default=32, type=int, help='finetuning batch size')
parser.add_argument('--unlearn_batch', default=114, type=int, help='what batch of data should be unlearned')
parser.add_argument('--finetune_epochs', default=1, type=int, help='number of finetuning epochs')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#=====================================================================
#==================== CONSTRUCTING ALL THE DIFFERENT DATA SETS =======
#=====================================================================
print('==> Preparing Data...')
if args.dataset == 'cifar10':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.finetune_batch_size, shuffle=False, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.finetune_batch_size, shuffle=False, num_workers=2)
num_classes = 10
if args.dataset == 'cifar100':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor(),
transforms.Normalize( (0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404))
])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, shuffle=False, num_workers=2, batch_size=args.finetune_batch_size)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5070751592371323, 0.48654887331495095, 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, 0.27615047132568404))])
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
testloader= torch.utils.data.DataLoader(testset, shuffle=False, num_workers=2, batch_size=args.finetune_batch_size)
num_classes = 100
#transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
#trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
#trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.finetune_batch_size, shuffle=True, num_workers=2)
#transform_test = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),])
#testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
#testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
print('==> Preparing Hessian data..')
trainset_list = list(trainset)
hessian_loader = trainset_list[:512]
hessian_loader = torch.utils.data.DataLoader(hessian_loader, batch_size=512, shuffle=False, num_workers=2)
print('==> Preparing Finetuning data...')
batch_star = trainset_list[args.finetune_batch_size * args.unlearn_batch: args.finetune_batch_size * (args.unlearn_batch+1)]
data_no_unlearned = trainset_list[:args.finetune_batch_size * args.unlearn_batch] + trainset_list[args.finetune_batch_size * (args.unlearn_batch+1):]
unlearned_loader = torch.utils.data.DataLoader(batch_star, batch_size=args.finetune_batch_size, shuffle=False, num_workers=2)
#Getting model
#=====================================================================
#============== GETTING PRETRAINED MODEL ============= ===============
#=====================================================================
print('==> Building model..')
if args.model == 'resnet':
net = ResNet18(num_classes)
if args.model == 'vgg':
net = VGG('VGG19',num_classes)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
print('==> Loading in pre-trained model..')
assert os.path.isdir('Final_pretrained_models'), 'Error: no checkpoint directory found!'
checkpoint = torch.load(f'./Final_pretrained_models/{args.model}_{args.dataset}_{args.loss_func}_{args.pretrain_batch_size}_{args.pretrain_epochs}.pth')
net.load_state_dict(checkpoint['net'])
#saving the weights of the pretrained model
M_pretrain = copy.deepcopy(net)
w_pretrain_weights_tensor = [param for param in M_pretrain.parameters()]
w_pretrain_weights = weights_to_list_fast(w_pretrain_weights_tensor)
print('==> Beginning iteration over T=0 to T=500...')
data_ret = {}
#1. Initialize your 2 models: M and M'
M = copy.deepcopy(M_pretrain)
M_retrained = copy.deepcopy(M_pretrain)
M.train()
M_retrained.train()
criterion_unl = nn.CrossEntropyLoss()
#initialize the loss functions, optimizers and schedulers for both models
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(M.parameters(), lr=args.lr,momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
criterion_retrain = nn.CrossEntropyLoss()
optimizer_retrain = optim.SGD(M_retrained.parameters(), lr=args.lr,momentum=0.9, weight_decay=5e-4)
scheduler_retrain = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_retrain, T_max=200)
#the data that we are finetuning on (with x* at the beginning)
data = batch_star + data_no_unlearned
data_loader = torch.utils.data.DataLoader(data, batch_size = args.finetune_batch_size, shuffle = False, num_workers = 2)
sigma_list = []
print(' T = ',len(data_loader))
#we need some lists for statistics
sigma_list = []
delta_weights_list = []
unl_error_list = []
rolling_unl_error_list = []
ver_error_list = []
hessian_criterion = nn.CrossEntropyLoss()
for ep in range(0,args.finetune_epochs):
for main_idx,(inputs, targets) in enumerate(data_loader):
M.train()
print('Epoch = ',ep, 'on t = ',main_idx)
inputs, targets = inputs.to(device), targets.to(device)
if main_idx ==0:
#only update M:
optimizer.zero_grad()
outputs_M = M(inputs)
if args.loss_func == 'regular':
loss_M = criterion(outputs_M, targets)
if args.loss_func == 'std':
loss_M = std_loss(outputs_M,targets)
loss_M.backward()
optimizer.step()
if main_idx !=0:
#update both M and M'
optimizer.zero_grad()
outputs_M = M(inputs)
if args.loss_func == 'regular':
loss_M = criterion(outputs_M, targets)
if args.loss_func == 'std':
loss_M = std_loss(outputs_M,targets)
loss_M.backward()
optimizer.step()
optimizer_retrain.zero_grad()
outputs_retrain = M_retrained(inputs)
if args.loss_func == 'regular':
loss_retrain = criterion(outputs_retrain, targets)
if args.loss_func == 'std':
loss_retrain = std_loss(outputs_retrain,targets)
loss_retrain.backward()
optimizer_retrain.step()
M.eval()
for i,(img,label) in enumerate(hessian_loader):
img = img.cuda()
label = label.cuda()
break
if args.loss_func == 'regular':
hessian_comp = hessian(M, hessian_criterion, data=(img, label), cuda=True)
if args.loss_func == 'std':
hessian_comp = hessian(M, std_loss, data=(img, label), cuda=True)
top_eigenvalues, top_eigenvector = hessian_comp.eigenvalues()
sigma = np.sqrt(top_eigenvalues[-1])
sigma_list.append(sigma)
#Now, save the weights of both M_(N+t) and M'_(N+t)
M_weights_tensor = [param for param in M.parameters()]
w_M_weights = weights_to_list_fast(M_weights_tensor)
M_retrain_tensor = [param for param in M_retrained.parameters()]
w_M_retrain_weights = weights_to_list_fast(M_retrain_tensor)
#Now, get M''_(N+t)
M_unlearned = copy.deepcopy(M)
optimizer_unlearned = optim.SGD(M_unlearned.parameters(), lr=args.lr,momentum=0.9, weight_decay=5e-4)
scheduler_unlearned = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_unlearned, T_max=200)
M_unlearned.train()
M_pretrain.train()
for i,(img,label) in enumerate(unlearned_loader):
img = img.cuda()
label = label.cuda()
output_pre = M_pretrain(img)
if args.loss_func == 'regular':
loss_unl = criterion(output_pre, label)
if args.loss_func == 'std':
loss_unl = std_loss(output_pre,label)
loss_unl.backward(retain_graph=True)
grads = torch.autograd.grad(loss_unl, [param for param in M_pretrain.parameters()],create_graph = True)
old_params = {}
for i, (name, params) in enumerate(M.named_parameters()):
old_params[name] = params.clone()
old_params[name] += (ep+1) * args.lr * grads[i]
for name, params in M_unlearned.named_parameters():
params.data.copy_(old_params[name])
M_unlearned_tensor = [param for param in M_unlearned.parameters()]
w_M_unlearned_weights = weights_to_list_fast(M_unlearned_tensor)
#Now that we have the 3 models, M_(N+t), M'_(N+t) and M''_(N+t), let's compute the unlearning error of M. We will do this two ways: one with a running average sigma, one without
#print('calculating unl error....')
delta_weights = np.linalg.norm((np.array(w_M_weights) - np.array(w_pretrain_weights)))
#print('wow, sigma is = ',sigma)
unl_error = (args.lr * args.lr) *((len(data_loader)*ep)+ main_idx) *(1/2) * delta_weights *sigma
rolling_unl_error = (args.lr * args.lr) *((len(data_loader)*ep)+ main_idx) *(1/2) * delta_weights * (sum(sigma_list)/len(sigma_list))
#now compute the verification error
verification_error = np.linalg.norm((np.array(w_M_retrain_weights) - np.array(w_M_unlearned_weights)))
delta_weights_list.append(delta_weights)
unl_error_list.append(unl_error)
rolling_unl_error_list.append(rolling_unl_error)
ver_error_list.append(verification_error)
#print('rolling unlearning error is = ', rolling_unl_error)
#print('unl error is ', unl_error)
#print('sigma is ', sum(sigma_list)/len(sigma_list))
#print('delta weights is', delta_weights)
ret = {}
ret['sigma'] = sigma_list
ret['delta weights'] = delta_weights_list
ret['verification error'] = ver_error_list
ret['unlearning error'] = unl_error_list
ret['rolling unlearning error'] = rolling_unl_error_list
import pickle
if not os.path.isdir('final_correlation_results'):
os.mkdir('final_correlation_results')
path = f'./final_correlation_results/{args.model}_{args.dataset}_{args.loss_func}_{args.pretrain_batch_size}_{args.pretrain_epochs}_{args.finetune_batch_size}_{args.finetune_epochs}.p'
pickle.dump(ret, open(path, 'wb'))
| [
"torchvision.datasets.CIFAR100",
"torch.nn.CrossEntropyLoss",
"numpy.sqrt",
"numpy.array",
"torch.cuda.is_available",
"copy.deepcopy",
"argparse.ArgumentParser",
"os.path.isdir",
"os.mkdir",
"torchvision.transforms.ToTensor",
"torchvision.transforms.RandomHorizontalFlip",
"torch.Tensor",
"to... | [((1852, 1924), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Finetuning for verification error"""'}), "(description='Finetuning for verification error')\n", (1875, 1924), False, 'import argparse\n'), ((6082, 6175), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['hessian_loader'], {'batch_size': '(512)', 'shuffle': '(False)', 'num_workers': '(2)'}), '(hessian_loader, batch_size=512, shuffle=False,\n num_workers=2)\n', (6109, 6175), False, 'import torch\n'), ((6510, 6620), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['batch_star'], {'batch_size': 'args.finetune_batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(batch_star, batch_size=args.finetune_batch_size,\n shuffle=False, num_workers=2)\n', (6537, 6620), False, 'import torch\n'), ((7156, 7196), 'os.path.isdir', 'os.path.isdir', (['"""Final_pretrained_models"""'], {}), "('Final_pretrained_models')\n", (7169, 7196), False, 'import os\n'), ((7251, 7400), 'torch.load', 'torch.load', (['f"""./Final_pretrained_models/{args.model}_{args.dataset}_{args.loss_func}_{args.pretrain_batch_size}_{args.pretrain_epochs}.pth"""'], {}), "(\n f'./Final_pretrained_models/{args.model}_{args.dataset}_{args.loss_func}_{args.pretrain_batch_size}_{args.pretrain_epochs}.pth'\n )\n", (7261, 7400), False, 'import torch\n'), ((7490, 7508), 'copy.deepcopy', 'copy.deepcopy', (['net'], {}), '(net)\n', (7503, 7508), False, 'import copy\n'), ((7764, 7789), 'copy.deepcopy', 'copy.deepcopy', (['M_pretrain'], {}), '(M_pretrain)\n', (7777, 7789), False, 'import copy\n'), ((7804, 7829), 'copy.deepcopy', 'copy.deepcopy', (['M_pretrain'], {}), '(M_pretrain)\n', (7817, 7829), False, 'import copy\n'), ((7879, 7900), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7898, 7900), True, 'import torch.nn as nn\n'), ((7987, 8008), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8006, 8008), True, 'import torch.nn as nn\n'), ((8103, 8167), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': '(200)'}), '(optimizer, T_max=200)\n', (8145, 8167), False, 'import torch\n'), ((8189, 8210), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8208, 8210), True, 'import torch.nn as nn\n'), ((8331, 8403), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer_retrain'], {'T_max': '(200)'}), '(optimizer_retrain, T_max=200)\n', (8373, 8403), False, 'import torch\n'), ((8520, 8624), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['data'], {'batch_size': 'args.finetune_batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(data, batch_size=args.finetune_batch_size,\n shuffle=False, num_workers=2)\n', (8547, 8624), False, 'import torch\n'), ((8842, 8863), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8861, 8863), True, 'import torch.nn as nn\n'), ((2846, 2871), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2869, 2871), False, 'import torch\n'), ((3581, 3682), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (3609, 3682), False, 'import torchvision\n'), ((3697, 3805), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'args.finetune_batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(trainset, batch_size=args.finetune_batch_size,\n shuffle=False, num_workers=2)\n', (3724, 3805), False, 'import torch\n'), ((3817, 3918), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (3845, 3918), False, 'import torchvision\n'), ((3932, 4039), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'args.finetune_batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(testset, batch_size=args.finetune_batch_size,\n shuffle=False, num_workers=2)\n', (3959, 4039), False, 'import torch\n'), ((4469, 4571), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='./data', train=True, download=True,\n transform=transform_train)\n", (4498, 4571), False, 'import torchvision\n'), ((4586, 4694), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'shuffle': '(False)', 'num_workers': '(2)', 'batch_size': 'args.finetune_batch_size'}), '(trainset, shuffle=False, num_workers=2,\n batch_size=args.finetune_batch_size)\n', (4613, 4694), False, 'import torch\n'), ((4936, 5038), 'torchvision.datasets.CIFAR100', 'torchvision.datasets.CIFAR100', ([], {'root': '"""./data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='./data', train=False, download=True,\n transform=transform_test)\n", (4965, 5038), False, 'import torchvision\n'), ((5051, 5158), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'shuffle': '(False)', 'num_workers': '(2)', 'batch_size': 'args.finetune_batch_size'}), '(testset, shuffle=False, num_workers=2,\n batch_size=args.finetune_batch_size)\n', (5078, 5158), False, 'import torch\n'), ((7050, 7076), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (7071, 7076), False, 'import torch\n'), ((13846, 13888), 'os.path.isdir', 'os.path.isdir', (['"""final_correlation_results"""'], {}), "('final_correlation_results')\n", (13859, 13888), False, 'import os\n'), ((13894, 13931), 'os.mkdir', 'os.mkdir', (['"""final_correlation_results"""'], {}), "('final_correlation_results')\n", (13902, 13931), False, 'import os\n'), ((622, 637), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (635, 637), False, 'import torch\n'), ((988, 1003), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1001, 1003), False, 'import torch\n'), ((1627, 1646), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x', '(1)'], {}), '(x, 1)\n', (1640, 1646), True, 'import torch.nn.functional as F\n'), ((10714, 10742), 'numpy.sqrt', 'np.sqrt', (['top_eigenvalues[-1]'], {}), '(top_eigenvalues[-1])\n', (10721, 10742), True, 'import numpy as np\n'), ((11156, 11172), 'copy.deepcopy', 'copy.deepcopy', (['M'], {}), '(M)\n', (11169, 11172), False, 'import copy\n'), ((11313, 11387), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer_unlearned'], {'T_max': '(200)'}), '(optimizer_unlearned, T_max=200)\n', (11355, 11387), False, 'import torch\n'), ((1740, 1759), 'torch.std', 'torch.std', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (1749, 1759), False, 'import torch\n'), ((3211, 3247), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (3232, 3247), True, 'import torchvision.transforms as transforms\n'), ((3257, 3290), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (3288, 3290), True, 'import torchvision.transforms as transforms\n'), ((3300, 3321), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3319, 3321), True, 'import torchvision.transforms as transforms\n'), ((3331, 3402), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (3351, 3402), True, 'import torchvision.transforms as transforms\n'), ((3458, 3479), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3477, 3479), True, 'import torchvision.transforms as transforms\n'), ((3489, 3560), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (3509, 3560), True, 'import torchvision.transforms as transforms\n'), ((4140, 4176), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (4161, 4176), True, 'import torchvision.transforms as transforms\n'), ((4186, 4219), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (4217, 4219), True, 'import torchvision.transforms as transforms\n'), ((4229, 4258), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(15)'], {}), '(15)\n', (4254, 4258), True, 'import torchvision.transforms as transforms\n'), ((4268, 4289), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4287, 4289), True, 'import torchvision.transforms as transforms\n'), ((4299, 4455), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5070751592371323, 0.48654887331495095, 0.4409178433670343)', '(0.2673342858792401, 0.2564384629170883, 0.27615047132568404)'], {}), '((0.5070751592371323, 0.48654887331495095, \n 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, \n 0.27615047132568404))\n', (4319, 4455), True, 'import torchvision.transforms as transforms\n'), ((4742, 4763), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4761, 4763), True, 'import torchvision.transforms as transforms\n'), ((4773, 4929), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5070751592371323, 0.48654887331495095, 0.4409178433670343)', '(0.2673342858792401, 0.2564384629170883, 0.27615047132568404)'], {}), '((0.5070751592371323, 0.48654887331495095, \n 0.4409178433670343), (0.2673342858792401, 0.2564384629170883, \n 0.27615047132568404))\n', (4793, 4929), True, 'import torchvision.transforms as transforms\n'), ((10454, 10513), 'PyHessian.pyhessian.hessian', 'hessian', (['M', 'hessian_criterion'], {'data': '(img, label)', 'cuda': '(True)'}), '(M, hessian_criterion, data=(img, label), cuda=True)\n', (10461, 10513), False, 'from PyHessian.pyhessian import hessian\n'), ((10577, 10627), 'PyHessian.pyhessian.hessian', 'hessian', (['M', 'std_loss'], {'data': '(img, label)', 'cuda': '(True)'}), '(M, std_loss, data=(img, label), cuda=True)\n', (10584, 10627), False, 'from PyHessian.pyhessian import hessian\n'), ((12687, 12708), 'numpy.array', 'np.array', (['w_M_weights'], {}), '(w_M_weights)\n', (12695, 12708), True, 'import numpy as np\n'), ((12711, 12739), 'numpy.array', 'np.array', (['w_pretrain_weights'], {}), '(w_pretrain_weights)\n', (12719, 12739), True, 'import numpy as np\n'), ((13120, 13149), 'numpy.array', 'np.array', (['w_M_retrain_weights'], {}), '(w_M_retrain_weights)\n', (13128, 13149), True, 'import numpy as np\n'), ((13152, 13183), 'numpy.array', 'np.array', (['w_M_unlearned_weights'], {}), '(w_M_unlearned_weights)\n', (13160, 13183), True, 'import numpy as np\n'), ((786, 805), 'torch.Tensor', 'torch.Tensor', (['array'], {}), '(array)\n', (798, 805), False, 'import torch\n')] |
'''
Created on 29 Mar 2019
@author: ssg37927
'''
from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16
def build_cubemap_vector_array(com, mesh_size=10,
min_radius=200,
max_radius=800,
radial_steps=1000):
# create the tiles to build the x, y and z
xt, yt , rt = meshgrid(linspace(-1.0, 1.0, mesh_size),
linspace(-1.0, 1.0, mesh_size),
linspace(min_radius, max_radius, radial_steps))
zt = zeros_like(xt)
ot = zt+1.0
nt = zt-1.0
xmap = concatenate(
(concatenate((zt, xt, zt, zt), axis=1),
concatenate((nt, xt, ot, xt[:, ::-1]), axis=1),
concatenate((zt, xt, zt, zt), axis=1)))
ymap = concatenate(
(concatenate((zt, ot, zt, zt), axis=1),
concatenate((yt[::-1, :], yt[::-1, :], yt[::-1, :], yt[::-1, :]), axis=1),
concatenate((zt, nt, zt, zt), axis=1)))
zmap = concatenate(
(concatenate((zt, yt, zt, zt), axis=1),
concatenate((xt, ot, xt[:, ::-1], nt), axis=1),
concatenate((zt, yt[::-1, :], zt, zt), axis=1)))
rmap = concatenate(
(concatenate((rt, rt, rt, rt), axis=1),
concatenate((rt, rt, rt, rt), axis=1),
concatenate((rt, rt, rt, rt), axis=1)))
scale = sqrt(xmap**2 + ymap**2 + zmap**2)
return (((xmap/scale)*rmap)+com[0], ((ymap/scale)*rmap)+com[1], ((zmap/scale)*rmap)+com[2], rmap)
| [
"numpy.sqrt",
"numpy.linspace",
"numpy.zeros_like",
"numpy.concatenate"
] | [((577, 591), 'numpy.zeros_like', 'zeros_like', (['xt'], {}), '(xt)\n', (587, 591), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1380, 1419), 'numpy.sqrt', 'sqrt', (['(xmap ** 2 + ymap ** 2 + zmap ** 2)'], {}), '(xmap ** 2 + ymap ** 2 + zmap ** 2)\n', (1384, 1419), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((402, 432), 'numpy.linspace', 'linspace', (['(-1.0)', '(1.0)', 'mesh_size'], {}), '(-1.0, 1.0, mesh_size)\n', (410, 432), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((461, 491), 'numpy.linspace', 'linspace', (['(-1.0)', '(1.0)', 'mesh_size'], {}), '(-1.0, 1.0, mesh_size)\n', (469, 491), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((520, 566), 'numpy.linspace', 'linspace', (['min_radius', 'max_radius', 'radial_steps'], {}), '(min_radius, max_radius, radial_steps)\n', (528, 566), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((658, 695), 'numpy.concatenate', 'concatenate', (['(zt, xt, zt, zt)'], {'axis': '(1)'}), '((zt, xt, zt, zt), axis=1)\n', (669, 695), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((706, 752), 'numpy.concatenate', 'concatenate', (['(nt, xt, ot, xt[:, ::-1])'], {'axis': '(1)'}), '((nt, xt, ot, xt[:, ::-1]), axis=1)\n', (717, 752), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((763, 800), 'numpy.concatenate', 'concatenate', (['(zt, xt, zt, zt)'], {'axis': '(1)'}), '((zt, xt, zt, zt), axis=1)\n', (774, 800), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((837, 874), 'numpy.concatenate', 'concatenate', (['(zt, ot, zt, zt)'], {'axis': '(1)'}), '((zt, ot, zt, zt), axis=1)\n', (848, 874), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((885, 958), 'numpy.concatenate', 'concatenate', (['(yt[::-1, :], yt[::-1, :], yt[::-1, :], yt[::-1, :])'], {'axis': '(1)'}), '((yt[::-1, :], yt[::-1, :], yt[::-1, :], yt[::-1, :]), axis=1)\n', (896, 958), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((969, 1006), 'numpy.concatenate', 'concatenate', (['(zt, nt, zt, zt)'], {'axis': '(1)'}), '((zt, nt, zt, zt), axis=1)\n', (980, 1006), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1043, 1080), 'numpy.concatenate', 'concatenate', (['(zt, yt, zt, zt)'], {'axis': '(1)'}), '((zt, yt, zt, zt), axis=1)\n', (1054, 1080), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1091, 1137), 'numpy.concatenate', 'concatenate', (['(xt, ot, xt[:, ::-1], nt)'], {'axis': '(1)'}), '((xt, ot, xt[:, ::-1], nt), axis=1)\n', (1102, 1137), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1148, 1194), 'numpy.concatenate', 'concatenate', (['(zt, yt[::-1, :], zt, zt)'], {'axis': '(1)'}), '((zt, yt[::-1, :], zt, zt), axis=1)\n', (1159, 1194), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1231, 1268), 'numpy.concatenate', 'concatenate', (['(rt, rt, rt, rt)'], {'axis': '(1)'}), '((rt, rt, rt, rt), axis=1)\n', (1242, 1268), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1279, 1316), 'numpy.concatenate', 'concatenate', (['(rt, rt, rt, rt)'], {'axis': '(1)'}), '((rt, rt, rt, rt), axis=1)\n', (1290, 1316), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n'), ((1327, 1364), 'numpy.concatenate', 'concatenate', (['(rt, rt, rt, rt)'], {'axis': '(1)'}), '((rt, rt, rt, rt), axis=1)\n', (1338, 1364), False, 'from numpy import meshgrid, linspace, concatenate, zeros_like, sqrt, int16\n')] |
# Keplerian fit configuration file for HIP11915
# 15 Mar 2022
# Packages
import pandas as pd
import os
import numpy as np
import radvel
import astropy.units as u
# Global planetary system and datasets parameters
starname = 'HIP11915'
nplanets = 2
instnames = ['HARPS-A', 'HARPS-B']
ntels = len(instnames)
fitting_basis = 'per tc secosw sesinw k'
planet_letters = {1: 'b', 2: 'c'}
# Load data
# ASCII file with columns named: "time,mnvel,errvel,tel"
# RV are expected to be in m/s
data = pd.read_csv('https://raw.githubusercontent.com/thiagofst/HIP11915/main/A/HIP11915_A.txt', sep = ',')
t = np.array(data['time'])
vel = np.array(data['mnvel'])
errvel = np.array(data['errvel'])
telgrps = data.groupby('tel').groups
bjd = 0.
# Priors (initial guesses)
anybasis_params = radvel.Parameters(nplanets, basis = 'per tc secosw sesinw k')
anybasis_params['per1'] = radvel.Parameter(value = 3703.703) # Orbital period
anybasis_params['tc1'] = radvel.Parameter(value = 2456560.) # Time of inferior conjunction
anybasis_params['secosw1'] = radvel.Parameter(value = 0.1) # sqrt(e)cos(w)
anybasis_params['sesinw1'] = radvel.Parameter(value = 0.1) # sqrt(e)sin(w)
anybasis_params['k1'] = radvel.Parameter(value = 12.5) # RV semi-amplutude
anybasis_params['per2'] = radvel.Parameter(value = 2941.176) # Orbital period
anybasis_params['tc2'] = radvel.Parameter(value = 2457283.) # Time of inferior conjunction
anybasis_params['secosw2'] = radvel.Parameter(value = 0.1) # sqrt(e)cos(w)
anybasis_params['sesinw2'] = radvel.Parameter(value = 0.1) # sqrt(e)sin(w)
anybasis_params['k2'] = radvel.Parameter(value = 5.6) # RV semi-amplutude
time_base = np.median(t)
anybasis_params['dvdt'] = radvel.Parameter(value = 0.0)
anybasis_params['curv'] = radvel.Parameter(value = 0.0)
# Velocity zero-points for each instrument
anybasis_params['gamma_HARPS-B'] = radvel.Parameter(value = 0)
anybasis_params['gamma_HARPS-A'] = radvel.Parameter(value = 0)
# Jitter term for each instrument
anybasis_params['jit_HARPS-B'] = radvel.Parameter(value = 0.)
anybasis_params['jit_HARPS-A'] = radvel.Parameter(value = 0.)
# Convert input orbital parameters into the fitting basis
params = anybasis_params.basis.to_any_basis(anybasis_params, fitting_basis)
# Set vary parameters
anybasis_params['dvdt'].vary = False
anybasis_params['curv'].vary = False
anybasis_params['jit_HARPS-B'].vary = True
anybasis_params['jit_HARPS-A'].vary = True
# Priors and widths
priors = [
radvel.prior.EccentricityPrior(nplanets), # Keeps eccentricity < 1
# Other options:
radvel.prior.Gaussian('tc1', anybasis_params['tc1'].value, 100.0),
radvel.prior.Gaussian('per1', anybasis_params['per1'].value, 100),
radvel.prior.Gaussian('k1', anybasis_params['k1'].value, 0.1),
radvel.prior.Gaussian('sesinw1', anybasis_params['sesinw1'].value, 0.1),
radvel.prior.Gaussian('secosw1', anybasis_params['secosw1'].value, 0.1),
radvel.prior.Gaussian('tc2', anybasis_params['tc2'].value, 100.0),
radvel.prior.Gaussian('per2', anybasis_params['per2'].value, 100),
radvel.prior.Gaussian('k2', anybasis_params['k2'].value, 0.1),
radvel.prior.Gaussian('sesinw2', anybasis_params['sesinw2'].value, 0.3),
radvel.prior.Gaussian('secosw2', anybasis_params['secosw2'].value, 0.3),
]
# Stellar parameters
stellar = dict(mstar=0.993, mstar_err = 0.005) # https://arxiv.org/abs/1408.4130
| [
"numpy.median",
"pandas.read_csv",
"radvel.prior.EccentricityPrior",
"radvel.Parameters",
"numpy.array",
"radvel.Parameter",
"radvel.prior.Gaussian"
] | [((512, 620), 'pandas.read_csv', 'pd.read_csv', (['"""https://raw.githubusercontent.com/thiagofst/HIP11915/main/A/HIP11915_A.txt"""'], {'sep': '""","""'}), "(\n 'https://raw.githubusercontent.com/thiagofst/HIP11915/main/A/HIP11915_A.txt'\n , sep=',')\n", (523, 620), True, 'import pandas as pd\n'), ((620, 642), 'numpy.array', 'np.array', (["data['time']"], {}), "(data['time'])\n", (628, 642), True, 'import numpy as np\n'), ((650, 673), 'numpy.array', 'np.array', (["data['mnvel']"], {}), "(data['mnvel'])\n", (658, 673), True, 'import numpy as np\n'), ((684, 708), 'numpy.array', 'np.array', (["data['errvel']"], {}), "(data['errvel'])\n", (692, 708), True, 'import numpy as np\n'), ((806, 865), 'radvel.Parameters', 'radvel.Parameters', (['nplanets'], {'basis': '"""per tc secosw sesinw k"""'}), "(nplanets, basis='per tc secosw sesinw k')\n", (823, 865), False, 'import radvel\n'), ((897, 929), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(3703.703)'}), '(value=3703.703)\n', (913, 929), False, 'import radvel\n'), ((976, 1009), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(2456560.0)'}), '(value=2456560.0)\n', (992, 1009), False, 'import radvel\n'), ((1072, 1099), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.1)'}), '(value=0.1)\n', (1088, 1099), False, 'import radvel\n'), ((1149, 1176), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.1)'}), '(value=0.1)\n', (1165, 1176), False, 'import radvel\n'), ((1221, 1249), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(12.5)'}), '(value=12.5)\n', (1237, 1249), False, 'import radvel\n'), ((1306, 1338), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(2941.176)'}), '(value=2941.176)\n', (1322, 1338), False, 'import radvel\n'), ((1385, 1418), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(2457283.0)'}), '(value=2457283.0)\n', (1401, 1418), False, 'import radvel\n'), ((1481, 1508), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.1)'}), '(value=0.1)\n', (1497, 1508), False, 'import radvel\n'), ((1558, 1585), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.1)'}), '(value=0.1)\n', (1574, 1585), False, 'import radvel\n'), ((1630, 1657), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(5.6)'}), '(value=5.6)\n', (1646, 1657), False, 'import radvel\n'), ((1700, 1712), 'numpy.median', 'np.median', (['t'], {}), '(t)\n', (1709, 1712), True, 'import numpy as np\n'), ((1740, 1767), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.0)'}), '(value=0.0)\n', (1756, 1767), False, 'import radvel\n'), ((1797, 1824), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.0)'}), '(value=0.0)\n', (1813, 1824), False, 'import radvel\n'), ((1909, 1934), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0)'}), '(value=0)\n', (1925, 1934), False, 'import radvel\n'), ((1973, 1998), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0)'}), '(value=0)\n', (1989, 1998), False, 'import radvel\n'), ((2072, 2099), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.0)'}), '(value=0.0)\n', (2088, 2099), False, 'import radvel\n'), ((2135, 2162), 'radvel.Parameter', 'radvel.Parameter', ([], {'value': '(0.0)'}), '(value=0.0)\n', (2151, 2162), False, 'import radvel\n'), ((2533, 2573), 'radvel.prior.EccentricityPrior', 'radvel.prior.EccentricityPrior', (['nplanets'], {}), '(nplanets)\n', (2563, 2573), False, 'import radvel\n'), ((2627, 2692), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""tc1"""', "anybasis_params['tc1'].value", '(100.0)'], {}), "('tc1', anybasis_params['tc1'].value, 100.0)\n", (2648, 2692), False, 'import radvel\n'), ((2699, 2764), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""per1"""', "anybasis_params['per1'].value", '(100)'], {}), "('per1', anybasis_params['per1'].value, 100)\n", (2720, 2764), False, 'import radvel\n'), ((2771, 2832), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""k1"""', "anybasis_params['k1'].value", '(0.1)'], {}), "('k1', anybasis_params['k1'].value, 0.1)\n", (2792, 2832), False, 'import radvel\n'), ((2839, 2910), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""sesinw1"""', "anybasis_params['sesinw1'].value", '(0.1)'], {}), "('sesinw1', anybasis_params['sesinw1'].value, 0.1)\n", (2860, 2910), False, 'import radvel\n'), ((2917, 2988), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""secosw1"""', "anybasis_params['secosw1'].value", '(0.1)'], {}), "('secosw1', anybasis_params['secosw1'].value, 0.1)\n", (2938, 2988), False, 'import radvel\n'), ((2998, 3063), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""tc2"""', "anybasis_params['tc2'].value", '(100.0)'], {}), "('tc2', anybasis_params['tc2'].value, 100.0)\n", (3019, 3063), False, 'import radvel\n'), ((3070, 3135), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""per2"""', "anybasis_params['per2'].value", '(100)'], {}), "('per2', anybasis_params['per2'].value, 100)\n", (3091, 3135), False, 'import radvel\n'), ((3142, 3203), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""k2"""', "anybasis_params['k2'].value", '(0.1)'], {}), "('k2', anybasis_params['k2'].value, 0.1)\n", (3163, 3203), False, 'import radvel\n'), ((3210, 3281), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""sesinw2"""', "anybasis_params['sesinw2'].value", '(0.3)'], {}), "('sesinw2', anybasis_params['sesinw2'].value, 0.3)\n", (3231, 3281), False, 'import radvel\n'), ((3288, 3359), 'radvel.prior.Gaussian', 'radvel.prior.Gaussian', (['"""secosw2"""', "anybasis_params['secosw2'].value", '(0.3)'], {}), "('secosw2', anybasis_params['secosw2'].value, 0.3)\n", (3309, 3359), False, 'import radvel\n')] |
#!/usr/bin/env python
# coding=utf-8
import numpy as np
def information_entropy(x, p):
ret = [0] * (len(x)+1)
for i in range(len(x)):
ret[i] = -p[i] * np.log2(p[i])
result = np.sum(ret[i])
return result
| [
"numpy.sum",
"numpy.log2"
] | [((200, 214), 'numpy.sum', 'np.sum', (['ret[i]'], {}), '(ret[i])\n', (206, 214), True, 'import numpy as np\n'), ((169, 182), 'numpy.log2', 'np.log2', (['p[i]'], {}), '(p[i])\n', (176, 182), True, 'import numpy as np\n')] |
# (c) 2021 <NAME>
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
from myriad.config import Config, HParams
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep, DStates
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
from myriad.systems import FiniteHorizonControlSystem
from myriad.utils import integrate_time_independent
class TrapezoidalCollocationOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem) -> None:
"""
An optimizer that uses direct trapezoidal collocation.
For reference, see https://epubs.siam.org/doi/10.1137/16M1062569
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
num_intervals = hp.intervals # Segments
h = system.T / num_intervals # Segment length
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
# print("the control shape is", control_shape)
###########################
# State and Control Guess #
###########################
u_guess = jnp.zeros((num_intervals + 1, control_shape))
if system.x_T is not None:
# We need to handle the cases where a terminal bound is specified only for some state variables, not all
row_guesses = []
for i in range(0, len(system.x_T)):
if system.x_T[i] is not None:
row_guess = jnp.linspace(system.x_0[i], system.x_T[i], num=num_intervals + 1).reshape(-1, 1)
else:
_, row_guess = integrate_time_independent(system.dynamics, system.x_0,
u_guess, h, num_intervals, hp.integration_method)
row_guess = row_guess[:, i].reshape(-1, 1)
row_guesses.append(row_guess)
x_guess = jnp.hstack(row_guesses)
else: # no final state requirement
_, x_guess = integrate_time_independent(system.dynamics, system.x_0,
u_guess, h, num_intervals, hp.integration_method)
guess, unravel_decision_variables = ravel_pytree((x_guess, u_guess))
self.x_guess, self.u_guess = x_guess, u_guess
############################
# State and Control Bounds #
############################
# Control bounds
u_bounds = np.empty(((num_intervals + 1) * control_shape, 2))
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (num_intervals + 1)
:(control_shape - i + 1) * (num_intervals + 1)] = system.bounds[-i]
# Reshape to work with NLP solver
u_bounds = u_bounds.reshape((-1, 2))
# State bounds
x_bounds = np.empty((num_intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
x_bounds[0, :, :] = np.expand_dims(system.x_0, 1)
if system.x_T is not None:
x_bounds[-control_shape, :, :] = np.expand_dims(system.x_T, 1)
# Reshape to work with NLP solver
x_bounds = x_bounds.reshape((-1, 2))
# Put control and state bounds together
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
def trapezoid_cost(x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control,
t1: Timestep, t2: Timestep) -> Cost:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
t1: Time at start of interval
t2: Time at end of interval
Returns:
Trapezoid cost of the interval
"""
return (h / 2) * (system.cost(x_t1, u_t1, t1) + system.cost(x_t2, u_t2, t2))
def parametrized_trapezoid_cost(params: Params,
x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control,
t1: Timestep, t2: Timestep) -> Cost:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
t1: Time at start of interval
t2: Time at end of interval
params: Custom model parameters
Returns:
Trapezoid cost of the interval
"""
return (h / 2) * (system.parametrized_cost(params, x_t1, u_t1, t1)
+ system.parametrized_cost(params, x_t2, u_t2, t2))
def objective(variables: jnp.ndarray) -> Cost:
"""
The objective function.
Args:
variables: Raveled state and decision variables
Returns:
The sum of the trapezoid costs across the whole trajectory
"""
x, u = unravel_decision_variables(variables)
t = jnp.linspace(0, system.T, num=num_intervals + 1) # Support cost function with dependency on t
cost = jnp.sum(vmap(trapezoid_cost)(x[:-1], x[1:], u[:-1], u[1:], t[:-1], t[1:]))
if system.terminal_cost:
cost += jnp.sum(system.terminal_cost_fn(x[-1], u[-1]))
return cost
def parametrized_objective(params: Params, variables: jnp.ndarray) -> Cost:
"""
The objective function.
Args:
variables: Raveled state and decision variables
params: Custom model parameters
Returns:
The sum of the trapezoid costs across the whole trajectory
"""
x, u = unravel_decision_variables(variables)
t = jnp.linspace(0, system.T, num=num_intervals + 1) # Support cost function with dependency on t
cost = jnp.sum(vmap(parametrized_trapezoid_cost, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
x[:-1], x[1:],
u[:-1], u[1:],
t[:-1], t[1:]))
if system.terminal_cost:
cost += jnp.sum(system.terminal_cost_fn(x[-1], u[-1]))
return cost
# TODO: should the terminal cost function also take parameters?
# probably yes... (will need to fix this in shooting and hs too then)
def trapezoid_defect(x_t1: State, x_t2: State, u_t1: Control, u_t2: Control) -> DState:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
Returns:
Trapezoid defect of the interval
"""
left = (h / 2) * (system.dynamics(x_t1, u_t1) + system.dynamics(x_t2, u_t2))
right = x_t2 - x_t1
return left - right
def parametrized_trapezoid_defect(params: Params,
x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control) -> DState:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
params: Custom model parameters
Returns:
Trapezoid defect of the interval
"""
left = (h / 2) * (system.parametrized_dynamics(params, x_t1, u_t1)
+ system.parametrized_dynamics(params, x_t2, u_t2))
right = x_t2 - x_t1
return left - right
def constraints(variables: jnp.ndarray) -> DStates:
"""
The constraints function.
Args:
variables: Raveled state and decision variables
Returns:
An array of the defects of the whole trajectory
"""
x, u = unravel_decision_variables(variables)
return jnp.ravel(vmap(trapezoid_defect)(x[:-1], x[1:], u[:-1], u[1:]))
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
The constraints function.
Args:
variables: Raveled state and decision variables
params: Custom model parameters
Returns:
An array of the defects of the whole trajectory
"""
x, u = unravel_decision_variables(variables)
return jnp.ravel(vmap(parametrized_trapezoid_defect, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
x[:-1], x[1:],
u[:-1], u[1:]))
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel_decision_variables)
| [
"jax.numpy.zeros",
"jax.numpy.hstack",
"jax.numpy.vstack",
"jax.flatten_util.ravel_pytree",
"numpy.empty",
"numpy.expand_dims",
"myriad.utils.integrate_time_independent",
"jax.numpy.linspace",
"jax.vmap"
] | [((1217, 1262), 'jax.numpy.zeros', 'jnp.zeros', (['(num_intervals + 1, control_shape)'], {}), '((num_intervals + 1, control_shape))\n', (1226, 1262), True, 'import jax.numpy as jnp\n'), ((2191, 2223), 'jax.flatten_util.ravel_pytree', 'ravel_pytree', (['(x_guess, u_guess)'], {}), '((x_guess, u_guess))\n', (2203, 2223), False, 'from jax.flatten_util import ravel_pytree\n'), ((2410, 2460), 'numpy.empty', 'np.empty', (['((num_intervals + 1) * control_shape, 2)'], {}), '(((num_intervals + 1) * control_shape, 2))\n', (2418, 2460), True, 'import numpy as np\n'), ((2758, 2830), 'numpy.empty', 'np.empty', (['(num_intervals + 1, system.bounds.shape[0] - control_shape, 2)'], {}), '((num_intervals + 1, system.bounds.shape[0] - control_shape, 2))\n', (2766, 2830), True, 'import numpy as np\n'), ((2910, 2939), 'numpy.expand_dims', 'np.expand_dims', (['system.x_0', '(1)'], {}), '(system.x_0, 1)\n', (2924, 2939), True, 'import numpy as np\n'), ((3178, 3210), 'jax.numpy.vstack', 'jnp.vstack', (['(x_bounds, u_bounds)'], {}), '((x_bounds, u_bounds))\n', (3188, 3210), True, 'import jax.numpy as jnp\n'), ((1916, 1939), 'jax.numpy.hstack', 'jnp.hstack', (['row_guesses'], {}), '(row_guesses)\n', (1926, 1939), True, 'import jax.numpy as jnp\n'), ((1999, 2108), 'myriad.utils.integrate_time_independent', 'integrate_time_independent', (['system.dynamics', 'system.x_0', 'u_guess', 'h', 'num_intervals', 'hp.integration_method'], {}), '(system.dynamics, system.x_0, u_guess, h,\n num_intervals, hp.integration_method)\n', (2025, 2108), False, 'from myriad.utils import integrate_time_independent\n'), ((3010, 3039), 'numpy.expand_dims', 'np.expand_dims', (['system.x_T', '(1)'], {}), '(system.x_T, 1)\n', (3024, 3039), True, 'import numpy as np\n'), ((4915, 4963), 'jax.numpy.linspace', 'jnp.linspace', (['(0)', 'system.T'], {'num': '(num_intervals + 1)'}), '(0, system.T, num=num_intervals + 1)\n', (4927, 4963), True, 'import jax.numpy as jnp\n'), ((5592, 5640), 'jax.numpy.linspace', 'jnp.linspace', (['(0)', 'system.T'], {'num': '(num_intervals + 1)'}), '(0, system.T, num=num_intervals + 1)\n', (5604, 5640), True, 'import jax.numpy as jnp\n'), ((1651, 1760), 'myriad.utils.integrate_time_independent', 'integrate_time_independent', (['system.dynamics', 'system.x_0', 'u_guess', 'h', 'num_intervals', 'hp.integration_method'], {}), '(system.dynamics, system.x_0, u_guess, h,\n num_intervals, hp.integration_method)\n', (1677, 1760), False, 'from myriad.utils import integrate_time_independent\n'), ((5031, 5051), 'jax.vmap', 'vmap', (['trapezoid_cost'], {}), '(trapezoid_cost)\n', (5035, 5051), False, 'from jax import vmap\n'), ((5708, 5775), 'jax.vmap', 'vmap', (['parametrized_trapezoid_cost'], {'in_axes': '(None, 0, 0, 0, 0, 0, 0)'}), '(parametrized_trapezoid_cost, in_axes=(None, 0, 0, 0, 0, 0, 0))\n', (5712, 5775), False, 'from jax import vmap\n'), ((7844, 7866), 'jax.vmap', 'vmap', (['trapezoid_defect'], {}), '(trapezoid_defect)\n', (7848, 7866), False, 'from jax import vmap\n'), ((8289, 8358), 'jax.vmap', 'vmap', (['parametrized_trapezoid_defect'], {'in_axes': '(None, 0, 0, 0, 0, 0, 0)'}), '(parametrized_trapezoid_defect, in_axes=(None, 0, 0, 0, 0, 0, 0))\n', (8293, 8358), False, 'from jax import vmap\n'), ((1531, 1596), 'jax.numpy.linspace', 'jnp.linspace', (['system.x_0[i]', 'system.x_T[i]'], {'num': '(num_intervals + 1)'}), '(system.x_0[i], system.x_T[i], num=num_intervals + 1)\n', (1543, 1596), True, 'import jax.numpy as jnp\n')] |
"""
Missile Defence Game
Building generation, drawing and destruction module.
Copyright (C) 2011-2012 <NAME>.
See LICENSE (GNU GPL version 3 or later).
"""
import numpy
from random import uniform
def generate_city(resolution):
# create a byte array for buildings info
# we will use 0 for empty, 1 for present
pixeldata = numpy.zeros(resolution, numpy.int8)
x = 100
while x < resolution[0] - 120:
gap = int(uniform(0, 5))
x += gap
width = int(uniform(5, 20))
height = int(uniform(10, 18))
add_building(pixeldata, x + (width / 2), width, height)
x = 120
while x < resolution[0] - 180:
gap = int(uniform(5, 20))
x += gap
width = int(uniform(10, 40))
height = int(uniform(20, 50))
add_building(pixeldata, x + (width / 2), width, height)
x = 160
while x < resolution[0] - 220:
gap = int(uniform(30, 125))
x += gap
width = int(uniform(8, 20))
height = int(uniform(70, 90))
add_building(pixeldata, x + (width / 2), width, height)
# add support for the cannon
add_building(pixeldata, resolution[0] / 2, 20, 100)
return pixeldata
def add_building(pixeldata, x_mid, width, height):
# todo: add windows
resolution = numpy.ma.shape(pixeldata)
for x in range(x_mid - (width/2), x_mid + (width/2)):
for y in range(resolution[1] - height, resolution[1]):
try:
pixeldata[x][y] = 1
except IndexError:
pass
class Buildings(object):
def __init__(self, pixeldata, resolution):
self.dirty_set = set()
self.pixeldata = pixeldata
self.resolution = resolution
def get(self, x, y):
x, y = int(x), int(y)
if x < 0 or y < 0:
return 0
try:
return self.pixeldata[x][y]
except IndexError:
return 0
def destroy_circle(self, position, radius):
x_min = max(0, int(position[0] - radius))
x_max = min(self.resolution[0], int(position[0] + radius + 1))
y_min = max(0, int(position[1] - radius))
y_max = min(self.resolution[1], int(position[1] + radius + 1))
radius_squared = int(radius * radius)
# destroy buildings in the blast radius
for x in range(x_min, x_max):
for y in range(y_min, y_max):
dist_x = position[0] - x
dist_y = position[1] - y
if dist_x * dist_x + dist_y * dist_y < radius_squared:
if self.pixeldata[x][y] == 1:
self.pixeldata[x][y] = 0
self.dirty_set.add((x,y))
def apply_physics(self):
ignore_set = set()
new_dirty_set = set()
for (x,y) in self.dirty_set:
assert x >=0 and y >=0 and type(x) == type(1) and type(y) == type(1)
assert self.pixeldata[x][y] == 0
if (x,y) in ignore_set:
new_dirty_set.add((x,y))
else:
y2 = y - 1
any_falling = False
while y2 > 0 and self.pixeldata[x][y2] == 1:
y2 -= 1
any_falling = True
if any_falling:
self.pixeldata[x][y2 + 1] = 0
self.pixeldata[x][y] = 1
assert (x,y) not in new_dirty_set
if y + 1 < self.resolution[1] and self.pixeldata[x][y + 1] == 0:
new_dirty_set.add((x, y + 1))
ignore_set.add((x, y + 1)) # don't want it falling any further
self.dirty_set = new_dirty_set
| [
"numpy.ma.shape",
"numpy.zeros",
"random.uniform"
] | [((356, 391), 'numpy.zeros', 'numpy.zeros', (['resolution', 'numpy.int8'], {}), '(resolution, numpy.int8)\n', (367, 391), False, 'import numpy\n'), ((1365, 1390), 'numpy.ma.shape', 'numpy.ma.shape', (['pixeldata'], {}), '(pixeldata)\n', (1379, 1390), False, 'import numpy\n'), ((458, 471), 'random.uniform', 'uniform', (['(0)', '(5)'], {}), '(0, 5)\n', (465, 471), False, 'from random import uniform\n'), ((516, 530), 'random.uniform', 'uniform', (['(5)', '(20)'], {}), '(5, 20)\n', (523, 530), False, 'from random import uniform\n'), ((553, 568), 'random.uniform', 'uniform', (['(10)', '(18)'], {}), '(10, 18)\n', (560, 568), False, 'from random import uniform\n'), ((703, 717), 'random.uniform', 'uniform', (['(5)', '(20)'], {}), '(5, 20)\n', (710, 717), False, 'from random import uniform\n'), ((762, 777), 'random.uniform', 'uniform', (['(10)', '(40)'], {}), '(10, 40)\n', (769, 777), False, 'from random import uniform\n'), ((800, 815), 'random.uniform', 'uniform', (['(20)', '(50)'], {}), '(20, 50)\n', (807, 815), False, 'from random import uniform\n'), ((962, 978), 'random.uniform', 'uniform', (['(30)', '(125)'], {}), '(30, 125)\n', (969, 978), False, 'from random import uniform\n'), ((1023, 1037), 'random.uniform', 'uniform', (['(8)', '(20)'], {}), '(8, 20)\n', (1030, 1037), False, 'from random import uniform\n'), ((1060, 1075), 'random.uniform', 'uniform', (['(70)', '(90)'], {}), '(70, 90)\n', (1067, 1075), False, 'from random import uniform\n')] |
"""
============
Radian ticks
============
Plot with radians from the basic_units mockup example package.
This example shows how the unit class can determine the tick locating,
formatting and axis labeling.
"""
import numpy as np
from basic_units import radians, degrees, cos
from matplotlib.pyplot import figure, show
x = [val*radians for val in np.arange(0, 15, 0.01)]
fig = figure()
fig.subplots_adjust(hspace=0.3)
ax = fig.add_subplot(211)
line1, = ax.plot(x, cos(x), xunits=radians)
ax = fig.add_subplot(212)
line2, = ax.plot(x, cos(x), xunits=degrees)
show()
| [
"matplotlib.pyplot.figure",
"basic_units.cos",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((381, 389), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (387, 389), False, 'from matplotlib.pyplot import figure, show\n'), ((565, 571), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (569, 571), False, 'from matplotlib.pyplot import figure, show\n'), ((469, 475), 'basic_units.cos', 'cos', (['x'], {}), '(x)\n', (472, 475), False, 'from basic_units import radians, degrees, cos\n'), ((540, 546), 'basic_units.cos', 'cos', (['x'], {}), '(x)\n', (543, 546), False, 'from basic_units import radians, degrees, cos\n'), ((350, 372), 'numpy.arange', 'np.arange', (['(0)', '(15)', '(0.01)'], {}), '(0, 15, 0.01)\n', (359, 372), True, 'import numpy as np\n')] |
from typing import List, cast
import hither2 as hi
import numpy as np
import spikeextractors as se
import kachery_client as kc
from sortingview.config import job_cache, job_handler
from sortingview.extractors import LabboxEphysSortingExtractor
@hi.function(
'sorting_info', '0.1.3'
)
def sorting_info(sorting_uri):
sorting = LabboxEphysSortingExtractor(sorting_uri)
return dict(
unit_ids=_to_int_list(sorting.get_unit_ids()),
samplerate=sorting.get_sampling_frequency(),
sorting_object=sorting.object()
)
@kc.taskfunction('sorting_info.3', type='pure-calculation')
def task_sorting_info(sorting_uri: str):
with hi.Config(job_handler=job_handler.misc, job_cache=job_cache):
return hi.Job(sorting_info, {'sorting_uri': sorting_uri})
def _to_int_list(x):
return np.array(x).astype(int).tolist() | [
"hither2.Job",
"hither2.Config",
"numpy.array",
"hither2.function",
"kachery_client.taskfunction",
"sortingview.extractors.LabboxEphysSortingExtractor"
] | [((246, 282), 'hither2.function', 'hi.function', (['"""sorting_info"""', '"""0.1.3"""'], {}), "('sorting_info', '0.1.3')\n", (257, 282), True, 'import hither2 as hi\n'), ((548, 606), 'kachery_client.taskfunction', 'kc.taskfunction', (['"""sorting_info.3"""'], {'type': '"""pure-calculation"""'}), "('sorting_info.3', type='pure-calculation')\n", (563, 606), True, 'import kachery_client as kc\n'), ((334, 374), 'sortingview.extractors.LabboxEphysSortingExtractor', 'LabboxEphysSortingExtractor', (['sorting_uri'], {}), '(sorting_uri)\n', (361, 374), False, 'from sortingview.extractors import LabboxEphysSortingExtractor\n'), ((657, 717), 'hither2.Config', 'hi.Config', ([], {'job_handler': 'job_handler.misc', 'job_cache': 'job_cache'}), '(job_handler=job_handler.misc, job_cache=job_cache)\n', (666, 717), True, 'import hither2 as hi\n'), ((734, 784), 'hither2.Job', 'hi.Job', (['sorting_info', "{'sorting_uri': sorting_uri}"], {}), "(sorting_info, {'sorting_uri': sorting_uri})\n", (740, 784), True, 'import hither2 as hi\n'), ((819, 830), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (827, 830), True, 'import numpy as np\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Non-differentiable utility functions."""
import collections
from concurrent import futures
import contextlib
import functools
import time
from typing import List, Union
import jax
from jax import tree_util
import jax.numpy as jnp
import numpy as np
from scipy import interpolate
from scipy.spatial import transform as scipy_transform
def clip_gradients(grad, grad_max_val=0.0, grad_max_norm=0.0, eps=1e-7):
"""Gradient clipping."""
# Clip the gradient by value.
if grad_max_val > 0:
clip_fn = lambda z: jnp.clip(z, -grad_max_val, grad_max_val)
grad = jax.tree_util.tree_map(clip_fn, grad)
# Clip the (possibly value-clipped) gradient by norm.
if grad_max_norm > 0:
grad_norm = safe_sqrt(
jax.tree_util.tree_reduce(
lambda x, y: x + jnp.sum(y**2), grad, initializer=0))
mult = jnp.minimum(1, grad_max_norm / (eps + grad_norm))
grad = jax.tree_util.tree_map(lambda z: mult * z, grad)
return grad
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
# pylint: disable=unused-argument
@functools.partial(jax.custom_jvp, nondiff_argnums=(1, 2, 3))
def safe_norm(x, axis=-1, keepdims=False, tol=1e-9):
"""Calculates a np.linalg.norm(d) that's safe for gradients at d=0.
These gymnastics are to avoid a poorly defined gradient for np.linal.norm(0)
see https://github.com/google/jax/issues/3058 for details
Args:
x: A np.array
axis: The axis along which to compute the norm
keepdims: if True don't squeeze the axis.
tol: the absolute threshold within which to zero out the gradient.
Returns:
Equivalent to np.linalg.norm(d)
"""
return jnp.linalg.norm(x, axis=axis, keepdims=keepdims)
@safe_norm.defjvp
def _safe_norm_jvp(axis, keepdims, tol, primals, tangents):
"""Custom JVP rule for safe_norm."""
x, = primals
x_dot, = tangents
safe_tol = max(tol, 1e-30)
y = safe_norm(x, tol=safe_tol, axis=axis, keepdims=True)
y_safe = jnp.maximum(y, tol) # Prevent divide by zero.
y_dot = jnp.where(y > safe_tol, x_dot * x / y_safe, jnp.zeros_like(x))
y_dot = jnp.sum(y_dot, axis=axis, keepdims=True)
# Squeeze the axis if `keepdims` is True.
if not keepdims:
y = jnp.squeeze(y, axis=axis)
y_dot = jnp.squeeze(y_dot, axis=axis)
return y, y_dot
def jacobian_to_curl(jacobian):
"""Computes the curl from the Jacobian."""
dfx_dy = jacobian[..., 0, 1]
dfx_dz = jacobian[..., 0, 2]
dfy_dx = jacobian[..., 1, 0]
dfy_dz = jacobian[..., 1, 2]
dfz_dx = jacobian[..., 2, 0]
dfz_dy = jacobian[..., 2, 1]
return jnp.stack([
dfz_dy - dfy_dz,
dfx_dz - dfz_dx,
dfy_dx - dfx_dy,
], axis=-1)
def jacobian_to_div(jacobian):
"""Computes the divergence from the Jacobian."""
# If F : x -> x + f(x) then dF/dx = 1 + df/dx, so subtract 1 for each
# diagonal of the Jacobian.
return jnp.trace(jacobian, axis1=-2, axis2=-1) - 3.0
def compute_psnr(mse):
"""Compute psnr value given mse (we assume the maximum pixel value is 1).
Args:
mse: float, mean square error of pixels.
Returns:
psnr: float, the psnr value.
"""
return -10. * jnp.log(mse) / jnp.log(10.)
@jax.jit
def robust_whiten(x):
median = jnp.nanmedian(x)
mad = jnp.nanmean(jnp.abs(x - median))
return (x - median) / mad
def interpolate_codes(codes: Union[np.ndarray, List[np.ndarray]],
num_samples: int,
method='spline',
bc_type='natural'):
"""Interpolates latent codes.
Args:
codes: the codes to interpolate.
num_samples: the number of samples to interpolate to.
method: which method to use for interpolation.
bc_type: interpolation type for spline interpolation.
Returns:
(np.ndarray): the interpolated codes.
"""
if isinstance(codes, list):
codes = np.array(codes)
t = np.arange(len(codes))
xs = np.linspace(0, len(codes) - 1, num_samples)
if method == 'spline':
cs = interpolate.CubicSpline(t, codes, bc_type=bc_type)
return cs(xs).astype(np.float32)
elif method in {'linear', 'cubic', 'quadratic', 'slinear'}:
interp = interpolate.interp1d(t, codes, axis=0)
return interp(xs).astype(np.float32)
raise ValueError(f'Unknown method {method!r}')
def interpolate_cameras(cameras, num_samples: int):
"""Interpolates the cameras to the number of output samples.
Uses a spherical linear interpolation (Slerp) to interpolate the camera
orientations and a cubic spline to interpolate the camera positions.
Args:
cameras: the input cameras to interpolate.
num_samples: the number of output cameras.
Returns:
(List[vision_sfm.Camera]): a list of interpolated cameras.
"""
rotations = []
positions = []
for camera in cameras:
rotations.append(camera.orientation)
positions.append(camera.position)
in_times = np.linspace(0, 1, len(rotations))
slerp = scipy_transform.Slerp(
in_times, scipy_transform.Rotation.from_dcm(rotations))
spline = interpolate.CubicSpline(in_times, positions)
out_times = np.linspace(0, 1, num_samples)
out_rots = slerp(out_times).as_dcm()
out_positions = spline(out_times)
ref_camera = cameras[0]
out_cameras = []
for out_rot, out_pos in zip(out_rots, out_positions):
out_camera = ref_camera.copy()
out_camera.orientation = out_rot
out_camera.position = out_pos
out_cameras.append(out_camera)
return out_cameras
def safe_sqrt(x, eps=1e-7):
safe_x = jnp.where(x == 0, jnp.ones_like(x) * eps, x)
return jnp.sqrt(safe_x)
@jax.jit
def general_loss_with_squared_residual(x_sq, alpha, scale):
r"""Implements the general form of the loss.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", <NAME>,
https://arxiv.org/abs/1701.03077.
Args:
x_sq: The residual for which the loss is being computed. x can have any
shape, and alpha and scale will be broadcasted to match x's shape if
necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * x_sq / (scale**2)
# "Safe" versions of log1p and expm1 that will not NaN-out.
log1p_safe = lambda x: jnp.log1p(jnp.minimum(x, 3e37))
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 87.5))
# The loss when not in one of the special casess.
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(alpha - 2))
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * alpha) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return scale * jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, log1p_safe(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
def points_bound(points):
"""Computes the min and max dims of the points."""
min_dim = np.min(points, axis=0)
max_dim = np.max(points, axis=0)
return np.stack((min_dim, max_dim), axis=1)
def points_centroid(points):
"""Computes the centroid of the points from the bounding box."""
return points_bound(points).mean(axis=1)
def points_bounding_size(points):
"""Computes the bounding size of the points from the bounding box."""
bounds = points_bound(points)
return np.linalg.norm(bounds[:, 1] - bounds[:, 0])
def shard(xs, device_count=None):
"""Split data into shards for multiple devices along the first dimension."""
if device_count is None:
device_count = jax.local_device_count()
return jax.tree_map(lambda x: x.reshape((device_count, -1) + x.shape[1:]), xs)
def to_device(xs):
"""Transfer data to devices (GPU/TPU)."""
return jax.tree_map(jnp.array, xs)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
if padding > 0:
return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))[:-padding]
else:
return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def parallel_map(f, iterable, max_threads=None, show_pbar=False, **kwargs):
"""Parallel version of map()."""
with futures.ThreadPoolExecutor(max_threads) as executor:
if show_pbar:
# pylint: disable=g-import-not-at-top
import tqdm
results = tqdm.tqdm(
executor.map(f, iterable, **kwargs), total=len(iterable))
else:
results = executor.map(f, iterable, **kwargs)
return list(results)
def parallel_tree_map(f, tree, **kwargs):
"""Parallel version of jax.tree_map."""
leaves, treedef = jax.tree_flatten(tree)
results = parallel_map(f, leaves, **kwargs)
return jax.tree_unflatten(treedef, results)
def strided_subset(sequence, count):
"""Returns a strided subset of a list."""
if count < 0 or count >= len(sequence):
return sequence
if count == 0:
return []
ids = np.linspace(0, len(sequence) -1, count).astype(int)
return [sequence[i] for i in ids]
# if count:
# stride = max(1, len(sequence) // count)
# return sequence[::stride]
# return sequence
def tree_collate(list_of_pytrees):
"""Collates a list of pytrees with the same structure."""
return tree_util.tree_multimap(lambda *x: np.stack(x), *list_of_pytrees)
@contextlib.contextmanager
def print_time(name):
"""Records the time elapsed."""
start = time.time()
yield
elapsed = time.time() - start
print(f'[{name}] time elapsed: {elapsed:.04f}')
class ValueMeter:
"""Tracks the average of a value."""
def __init__(self):
self._values = []
def reset(self):
"""Resets the meter."""
self._values.clear()
def update(self, value):
"""Adds a value to the meter."""
self._values.append(value)
def reduce(self, reduction='mean'):
"""Reduces the tracked values."""
if reduction == 'mean':
return np.mean(self._values)
elif reduction == 'std':
return np.std(self._values)
elif reduction == 'last':
return self._values[-1]
else:
raise ValueError(f'Unknown reduction {reduction}')
class TimeTracker:
"""Tracks the average time elapsed over multiple steps."""
def __init__(self):
self._meters = collections.defaultdict(ValueMeter)
self._marked_time = collections.defaultdict(float)
@contextlib.contextmanager
def record_time(self, key: str):
"""Records the time elapsed."""
start = time.time()
yield
elapsed = time.time() - start
self.update(key, elapsed)
def update(self, key, value):
"""Updates the time value for a given key."""
self._meters[key].update(value)
def tic(self, *args):
"""Marks the starting time of an event."""
for key in args:
self._marked_time[key] = time.time()
def toc(self, *args):
"""Records the time elapsed based on the previous call to `tic`."""
for key in args:
self.update(key, time.time() - self._marked_time[key])
del self._marked_time[key]
def reset(self):
"""Resets all time meters."""
for meter in self._meters.values():
meter.reset()
def summary(self, reduction='mean'):
"""Returns a dictionary of reduced times."""
time_dict = {k: v.reduce(reduction) for k, v in self._meters.items()}
if 'total' not in time_dict:
time_dict['total'] = sum(time_dict.values())
time_dict['steps_per_sec'] = 1.0 / time_dict['total']
return time_dict
def summary_str(self, reduction='mean'):
"""Returns a string of reduced times."""
strings = [f'{k}={v:.04f}' for k, v in self.summary(reduction).items()]
return ', '.join(strings)
| [
"jax.numpy.abs",
"jax.numpy.nanmedian",
"jax.numpy.log",
"scipy.interpolate.interp1d",
"jax.tree_map",
"numpy.array",
"numpy.linalg.norm",
"jax.numpy.matmul",
"numpy.mean",
"scipy.spatial.transform.Rotation.from_dcm",
"scipy.interpolate.CubicSpline",
"numpy.max",
"jax.tree_util.tree_map",
... | [((1744, 1804), 'functools.partial', 'functools.partial', (['jax.custom_jvp'], {'nondiff_argnums': '(1, 2, 3)'}), '(jax.custom_jvp, nondiff_argnums=(1, 2, 3))\n', (1761, 1804), False, 'import functools\n'), ((1653, 1706), 'jax.numpy.matmul', 'jnp.matmul', (['a', 'b'], {'precision': 'jax.lax.Precision.HIGHEST'}), '(a, b, precision=jax.lax.Precision.HIGHEST)\n', (1663, 1706), True, 'import jax.numpy as jnp\n'), ((2327, 2375), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (2342, 2375), True, 'import jax.numpy as jnp\n'), ((2629, 2648), 'jax.numpy.maximum', 'jnp.maximum', (['y', 'tol'], {}), '(y, tol)\n', (2640, 2648), True, 'import jax.numpy as jnp\n'), ((2759, 2799), 'jax.numpy.sum', 'jnp.sum', (['y_dot'], {'axis': 'axis', 'keepdims': '(True)'}), '(y_dot, axis=axis, keepdims=True)\n', (2766, 2799), True, 'import jax.numpy as jnp\n'), ((3232, 3303), 'jax.numpy.stack', 'jnp.stack', (['[dfz_dy - dfy_dz, dfx_dz - dfz_dx, dfy_dx - dfx_dy]'], {'axis': '(-1)'}), '([dfz_dy - dfy_dz, dfx_dz - dfz_dx, dfy_dx - dfx_dy], axis=-1)\n', (3241, 3303), True, 'import jax.numpy as jnp\n'), ((3866, 3882), 'jax.numpy.nanmedian', 'jnp.nanmedian', (['x'], {}), '(x)\n', (3879, 3882), True, 'import jax.numpy as jnp\n'), ((5649, 5693), 'scipy.interpolate.CubicSpline', 'interpolate.CubicSpline', (['in_times', 'positions'], {}), '(in_times, positions)\n', (5672, 5693), False, 'from scipy import interpolate\n'), ((5709, 5739), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_samples'], {}), '(0, 1, num_samples)\n', (5720, 5739), True, 'import numpy as np\n'), ((6174, 6190), 'jax.numpy.sqrt', 'jnp.sqrt', (['safe_x'], {}), '(safe_x)\n', (6182, 6190), True, 'import jax.numpy as jnp\n'), ((7715, 7738), 'jax.numpy.maximum', 'jnp.maximum', (['eps', 'scale'], {}), '(eps, scale)\n', (7726, 7738), True, 'import jax.numpy as jnp\n'), ((8921, 8943), 'numpy.min', 'np.min', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (8927, 8943), True, 'import numpy as np\n'), ((8956, 8978), 'numpy.max', 'np.max', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (8962, 8978), True, 'import numpy as np\n'), ((8988, 9024), 'numpy.stack', 'np.stack', (['(min_dim, max_dim)'], {'axis': '(1)'}), '((min_dim, max_dim), axis=1)\n', (8996, 9024), True, 'import numpy as np\n'), ((9315, 9358), 'numpy.linalg.norm', 'np.linalg.norm', (['(bounds[:, 1] - bounds[:, 0])'], {}), '(bounds[:, 1] - bounds[:, 0])\n', (9329, 9358), True, 'import numpy as np\n'), ((9700, 9727), 'jax.tree_map', 'jax.tree_map', (['jnp.array', 'xs'], {}), '(jnp.array, xs)\n', (9712, 9727), False, 'import jax\n'), ((10626, 10648), 'jax.tree_flatten', 'jax.tree_flatten', (['tree'], {}), '(tree)\n', (10642, 10648), False, 'import jax\n'), ((10704, 10740), 'jax.tree_unflatten', 'jax.tree_unflatten', (['treedef', 'results'], {}), '(treedef, results)\n', (10722, 10740), False, 'import jax\n'), ((11396, 11407), 'time.time', 'time.time', ([], {}), '()\n', (11405, 11407), False, 'import time\n'), ((1166, 1203), 'jax.tree_util.tree_map', 'jax.tree_util.tree_map', (['clip_fn', 'grad'], {}), '(clip_fn, grad)\n', (1188, 1203), False, 'import jax\n'), ((1424, 1473), 'jax.numpy.minimum', 'jnp.minimum', (['(1)', '(grad_max_norm / (eps + grad_norm))'], {}), '(1, grad_max_norm / (eps + grad_norm))\n', (1435, 1473), True, 'import jax.numpy as jnp\n'), ((1485, 1533), 'jax.tree_util.tree_map', 'jax.tree_util.tree_map', (['(lambda z: mult * z)', 'grad'], {}), '(lambda z: mult * z, grad)\n', (1507, 1533), False, 'import jax\n'), ((2730, 2747), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['x'], {}), '(x)\n', (2744, 2747), True, 'import jax.numpy as jnp\n'), ((2871, 2896), 'jax.numpy.squeeze', 'jnp.squeeze', (['y'], {'axis': 'axis'}), '(y, axis=axis)\n', (2882, 2896), True, 'import jax.numpy as jnp\n'), ((2909, 2938), 'jax.numpy.squeeze', 'jnp.squeeze', (['y_dot'], {'axis': 'axis'}), '(y_dot, axis=axis)\n', (2920, 2938), True, 'import jax.numpy as jnp\n'), ((3526, 3565), 'jax.numpy.trace', 'jnp.trace', (['jacobian'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(jacobian, axis1=-2, axis2=-1)\n', (3535, 3565), True, 'import jax.numpy as jnp\n'), ((3809, 3822), 'jax.numpy.log', 'jnp.log', (['(10.0)'], {}), '(10.0)\n', (3816, 3822), True, 'import jax.numpy as jnp\n'), ((3903, 3922), 'jax.numpy.abs', 'jnp.abs', (['(x - median)'], {}), '(x - median)\n', (3910, 3922), True, 'import jax.numpy as jnp\n'), ((4488, 4503), 'numpy.array', 'np.array', (['codes'], {}), '(codes)\n', (4496, 4503), True, 'import numpy as np\n'), ((4617, 4667), 'scipy.interpolate.CubicSpline', 'interpolate.CubicSpline', (['t', 'codes'], {'bc_type': 'bc_type'}), '(t, codes, bc_type=bc_type)\n', (4640, 4667), False, 'from scipy import interpolate\n'), ((5592, 5636), 'scipy.spatial.transform.Rotation.from_dcm', 'scipy_transform.Rotation.from_dcm', (['rotations'], {}), '(rotations)\n', (5625, 5636), True, 'from scipy.spatial import transform as scipy_transform\n'), ((7652, 7674), 'jax.numpy.finfo', 'jnp.finfo', (['jnp.float32'], {}), '(jnp.float32)\n', (7661, 7674), True, 'import jax.numpy as jnp\n'), ((8369, 8387), 'jax.numpy.abs', 'jnp.abs', (['(alpha - 2)'], {}), '(alpha - 2)\n', (8376, 8387), True, 'import jax.numpy as jnp\n'), ((9520, 9544), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (9542, 9544), False, 'import jax\n'), ((10067, 10084), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (10081, 10084), True, 'import numpy as np\n'), ((10205, 10244), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['max_threads'], {}), '(max_threads)\n', (10231, 10244), False, 'from concurrent import futures\n'), ((11428, 11439), 'time.time', 'time.time', ([], {}), '()\n', (11437, 11439), False, 'import time\n'), ((12225, 12260), 'collections.defaultdict', 'collections.defaultdict', (['ValueMeter'], {}), '(ValueMeter)\n', (12248, 12260), False, 'import collections\n'), ((12285, 12315), 'collections.defaultdict', 'collections.defaultdict', (['float'], {}), '(float)\n', (12308, 12315), False, 'import collections\n'), ((12429, 12440), 'time.time', 'time.time', ([], {}), '()\n', (12438, 12440), False, 'import time\n'), ((1114, 1154), 'jax.numpy.clip', 'jnp.clip', (['z', '(-grad_max_val)', 'grad_max_val'], {}), '(z, -grad_max_val, grad_max_val)\n', (1122, 1154), True, 'import jax.numpy as jnp\n'), ((3794, 3806), 'jax.numpy.log', 'jnp.log', (['mse'], {}), '(mse)\n', (3801, 3806), True, 'import jax.numpy as jnp\n'), ((4780, 4818), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['t', 'codes'], {'axis': '(0)'}), '(t, codes, axis=0)\n', (4800, 4818), False, 'from scipy import interpolate\n'), ((6138, 6154), 'jax.numpy.ones_like', 'jnp.ones_like', (['x'], {}), '(x)\n', (6151, 6154), True, 'import jax.numpy as jnp\n'), ((7938, 7959), 'jax.numpy.minimum', 'jnp.minimum', (['x', '(3e+37)'], {}), '(x, 3e+37)\n', (7949, 7959), True, 'import jax.numpy as jnp\n'), ((7995, 8015), 'jax.numpy.minimum', 'jnp.minimum', (['x', '(87.5)'], {}), '(x, 87.5)\n', (8006, 8015), True, 'import jax.numpy as jnp\n'), ((8173, 8193), 'jax.numpy.ones_like', 'jnp.ones_like', (['alpha'], {}), '(alpha)\n', (8186, 8193), True, 'import jax.numpy as jnp\n'), ((8253, 8267), 'jax.numpy.abs', 'jnp.abs', (['alpha'], {}), '(alpha)\n', (8260, 8267), True, 'import jax.numpy as jnp\n'), ((11270, 11281), 'numpy.stack', 'np.stack', (['x'], {}), '(x)\n', (11278, 11281), True, 'import numpy as np\n'), ((11889, 11910), 'numpy.mean', 'np.mean', (['self._values'], {}), '(self._values)\n', (11896, 11910), True, 'import numpy as np\n'), ((12465, 12476), 'time.time', 'time.time', ([], {}), '()\n', (12474, 12476), False, 'import time\n'), ((12758, 12769), 'time.time', 'time.time', ([], {}), '()\n', (12767, 12769), False, 'import time\n'), ((8212, 8232), 'jax.numpy.ones_like', 'jnp.ones_like', (['alpha'], {}), '(alpha)\n', (8225, 8232), True, 'import jax.numpy as jnp\n'), ((11953, 11973), 'numpy.std', 'np.std', (['self._values'], {}), '(self._values)\n', (11959, 11973), True, 'import numpy as np\n'), ((12911, 12922), 'time.time', 'time.time', ([], {}), '()\n', (12920, 12922), False, 'import time\n'), ((1376, 1391), 'jax.numpy.sum', 'jnp.sum', (['(y ** 2)'], {}), '(y ** 2)\n', (1383, 1391), True, 'import jax.numpy as jnp\n')] |
import unittest
from nose.tools import (assert_in, assert_raises, assert_equals)
import logging
import numpy
from sknn.mlp import Regressor as MLPR
from sknn.mlp import Layer as L, Convolution as C
class TestDataAugmentation(unittest.TestCase):
def setUp(self):
self.called = 0
self.value = 1.0
self.nn = MLPR(
layers=[L("Linear")],
n_iter=1,
batch_size=2,
mutator=self._mutate_fn)
def _mutate_fn(self, sample):
self.called += 1
sample[sample == 0.0] = self.value
def test_TestCalledOK(self):
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
self.nn._fit(a_in, a_out)
assert_equals(a_in.shape[0], self.called)
def test_DataIsUsed(self):
self.value = float("nan")
a_in, a_out = numpy.zeros((8,16)), numpy.zeros((8,4))
assert_raises(RuntimeError, self.nn._fit, a_in, a_out)
| [
"nose.tools.assert_raises",
"sknn.mlp.Layer",
"numpy.zeros",
"nose.tools.assert_equals"
] | [((737, 778), 'nose.tools.assert_equals', 'assert_equals', (['a_in.shape[0]', 'self.called'], {}), '(a_in.shape[0], self.called)\n', (750, 778), False, 'from nose.tools import assert_in, assert_raises, assert_equals\n'), ((915, 969), 'nose.tools.assert_raises', 'assert_raises', (['RuntimeError', 'self.nn._fit', 'a_in', 'a_out'], {}), '(RuntimeError, self.nn._fit, a_in, a_out)\n', (928, 969), False, 'from nose.tools import assert_in, assert_raises, assert_equals\n'), ((655, 675), 'numpy.zeros', 'numpy.zeros', (['(8, 16)'], {}), '((8, 16))\n', (666, 675), False, 'import numpy\n'), ((676, 695), 'numpy.zeros', 'numpy.zeros', (['(8, 4)'], {}), '((8, 4))\n', (687, 695), False, 'import numpy\n'), ((867, 887), 'numpy.zeros', 'numpy.zeros', (['(8, 16)'], {}), '((8, 16))\n', (878, 887), False, 'import numpy\n'), ((888, 907), 'numpy.zeros', 'numpy.zeros', (['(8, 4)'], {}), '((8, 4))\n', (899, 907), False, 'import numpy\n'), ((373, 384), 'sknn.mlp.Layer', 'L', (['"""Linear"""'], {}), "('Linear')\n", (374, 384), True, 'from sknn.mlp import Layer as L, Convolution as C\n')] |
# -*- coding: utf-8 -*-
import activation as a
import numpy as np
import forwardpropagate as fp
import copy
def back_propagation(X, y, al, L, parameters, caches,dropout=False):
m = X.shape[1]
grads = {}
da3 = (-np.divide(y, caches["a3"]) + np.divide(1 - y, 1 - caches["a3"]))
dz3 = da3 * a.sigmoid_back(caches["a3"])
grads["W3"] = (1/m) * (dz3.dot(caches["a2"].T))
grads["b3"] = (1/m) * np.sum(dz3, axis=1, keepdims=True)
#potential spot for dropout
da2 = parameters["W3"].T.dot(dz3)
if dropout == True:
da2 *= caches["D2"]
da2 /= 0.5
dz2 = da2 * a.relu_back(caches["a2"])
grads["W2"] = (1/m) * dz2.dot(caches['a1'].T)
grads["b2"] = (1/m) * np.sum(dz2, axis=1, keepdims=True)
da1 = parameters["W2"].T.dot(dz2)
if dropout == True:
da1 *= caches["D1"]
da1 /= 0.5
dz1 = da1 * a.relu_back(caches["a1"])
grads["W1"] = ((1/m) * dz1.dot(X.T))
grads["b1"] = (1/m) * np.sum(dz1, axis=1, keepdims=True)
return grads
def check_gradients(X,Y,parameters,L):
gradapprox = {}
for i in range(1,L):
params = ["W","b"]
for p in params:
epsilon = 0.0001
parameters1 = copy.deepcopy(parameters)
parameters2 = copy.deepcopy(parameters)
parameters1[p + str(i)][0,0] += epsilon
parameters2[p + str(i)][0,0] -= epsilon
fp1, fp1cache = fp.forward_propagate(parameters1, X, L)
fp2, fp2cache = fp.forward_propagate(parameters2, X, L)
cost1 = fp.cost(Y, fp1)
cost2 = fp.cost(Y, fp2)
gradapprox[p + str(i)] = (cost1 - cost2) / (2. *epsilon)
return gradapprox | [
"forwardpropagate.cost",
"activation.relu_back",
"activation.sigmoid_back",
"numpy.sum",
"copy.deepcopy",
"forwardpropagate.forward_propagate",
"numpy.divide"
] | [((275, 309), 'numpy.divide', 'np.divide', (['(1 - y)', "(1 - caches['a3'])"], {}), "(1 - y, 1 - caches['a3'])\n", (284, 309), True, 'import numpy as np\n'), ((327, 355), 'activation.sigmoid_back', 'a.sigmoid_back', (["caches['a3']"], {}), "(caches['a3'])\n", (341, 355), True, 'import activation as a\n'), ((440, 474), 'numpy.sum', 'np.sum', (['dz3'], {'axis': '(1)', 'keepdims': '(True)'}), '(dz3, axis=1, keepdims=True)\n', (446, 474), True, 'import numpy as np\n'), ((647, 672), 'activation.relu_back', 'a.relu_back', (["caches['a2']"], {}), "(caches['a2'])\n", (658, 672), True, 'import activation as a\n'), ((754, 788), 'numpy.sum', 'np.sum', (['dz2'], {'axis': '(1)', 'keepdims': '(True)'}), '(dz2, axis=1, keepdims=True)\n', (760, 788), True, 'import numpy as np\n'), ((933, 958), 'activation.relu_back', 'a.relu_back', (["caches['a1']"], {}), "(caches['a1'])\n", (944, 958), True, 'import activation as a\n'), ((1031, 1065), 'numpy.sum', 'np.sum', (['dz1'], {'axis': '(1)', 'keepdims': '(True)'}), '(dz1, axis=1, keepdims=True)\n', (1037, 1065), True, 'import numpy as np\n'), ((246, 272), 'numpy.divide', 'np.divide', (['y', "caches['a3']"], {}), "(y, caches['a3'])\n", (255, 272), True, 'import numpy as np\n'), ((1323, 1348), 'copy.deepcopy', 'copy.deepcopy', (['parameters'], {}), '(parameters)\n', (1336, 1348), False, 'import copy\n'), ((1375, 1400), 'copy.deepcopy', 'copy.deepcopy', (['parameters'], {}), '(parameters)\n', (1388, 1400), False, 'import copy\n'), ((1563, 1602), 'forwardpropagate.forward_propagate', 'fp.forward_propagate', (['parameters1', 'X', 'L'], {}), '(parameters1, X, L)\n', (1583, 1602), True, 'import forwardpropagate as fp\n'), ((1631, 1670), 'forwardpropagate.forward_propagate', 'fp.forward_propagate', (['parameters2', 'X', 'L'], {}), '(parameters2, X, L)\n', (1651, 1670), True, 'import forwardpropagate as fp\n'), ((1704, 1719), 'forwardpropagate.cost', 'fp.cost', (['Y', 'fp1'], {}), '(Y, fp1)\n', (1711, 1719), True, 'import forwardpropagate as fp\n'), ((1740, 1755), 'forwardpropagate.cost', 'fp.cost', (['Y', 'fp2'], {}), '(Y, fp2)\n', (1747, 1755), True, 'import forwardpropagate as fp\n')] |
#--------------------------------------------------------------------------------
# Authors:
# - <NAME>: <EMAIL>
# - <NAME>: <EMAIL>
#
# MIT License
# Copyright (c) 2021 CORSMAL
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--------------------------------------------------------------------------------
import os
import copy
import csv
import json
import pickle
import time
import math
import gym
import numpy as np
import scipy
import scipy.signal
import pybullet as p
import pybullet_data
from gym import spaces
from gym.utils import seeding
from libs.simulation.utils import *
from libs.simulation.ur5 import ur5
from libs.simulation.container import Container
from libs.simulation.hand import Hand
projectionMatrix = p.computeProjectionMatrixFOV(fov=90, aspect=1.777777778, nearVal=0.01, farVal=10)
image_renderer = p.ER_BULLET_HARDWARE_OPENGL
meshPath = os.getcwd() + "/data/meshes/objects/"
MPLPath = os.getcwd() + "/data/meshes/MPL/MPL.xml"
def rgba2rgb(rgba, background=(255, 255, 255)):
row, col, ch = rgba.shape
if ch == 3:
return rgba
assert ch == 4, 'RGBA image has 4 channels.'
rgb = np.zeros((row, col, 3), dtype='float32')
r, g, b, a = rgba[:, :, 0], rgba[:, :, 1], rgba[:, :, 2], rgba[:, :, 3]
a = np.asarray(a, dtype='float32') / 255.0
R, G, B = background
rgb[:, :, 0] = r * a + (1.0 - a) * R
rgb[:, :, 1] = g * a + (1.0 - a) * G
rgb[:, :, 2] = b * a + (1.0 - a) * B
return np.asarray(rgb, dtype='uint8')
def gripper_camera(obs):
# gripper pos and ori
pos = obs[-7:-4]
ori = obs[-4:] # last 4
rotation = list(p.getEulerFromQuaternion(ori))
rotation[0] = rotation[0] - math.pi * 0.5
ori = p.getQuaternionFromEuler(rotation)
rot_matrix = p.getMatrixFromQuaternion(ori)
rot_matrix = np.array(rot_matrix).reshape(3, 3)
# Initial vectors
init_camera_vector = (0, 0, 1) # z-axis
init_up_vector = (0, 1, 0) # y-axis
# Rotated vectors
camera_vector = rot_matrix.dot(init_camera_vector)
up_vector = rot_matrix.dot(init_up_vector)
view_matrix_gripper = p.computeViewMatrix(pos, pos + 0.1 * camera_vector, up_vector)
img = p.getCameraImage(360, 360, view_matrix_gripper, projectionMatrix, shadow=0, renderer=image_renderer)
rgba_img = img[2]
rgb_img = rgba2rgb(rgba_img)
return rgb_img
def render_side_cam(cam):
tvec = cam['extrinsic']['tvec']
rot_matrix = cam['extrinsic']['rvec']
# Convert from camera to world coordinates
pos = -rot_matrix.T.dot(tvec)
rot_matrix = rot_matrix.T
pos = pos.T[0] + np.array([0, 0, 1])
pos = pos.tolist()
# Initial vectors
init_camera_vector = (0, 0, 1) # z-axis
init_up_vector = (0, 0, 1) # y-axis
# Rotated vectors
camera_vector = rot_matrix.dot(init_camera_vector)
up_vector = rot_matrix.dot(init_up_vector)
view_matrix_sidecam = p.computeViewMatrix(pos, camera_vector, up_vector)
img = p.getCameraImage(1280, 720, view_matrix_sidecam, projectionMatrix, shadow=0, renderer=image_renderer)
rgba_img = img[2]
rgb_img = rgba2rgb(rgba_img)
return rgb_img
class handoverEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self,
actionRepeat=1,
isEnableSelfCollision=True,
renders=True,
containerID=1,
recordingID='s0_fi0_fu0_b0_l0',
objectURDF='',
pred_tolerance=0.3):
prGreen("Initialising environment...")
self._timeStep = 1. / 240. # simulation time step size
self._actionRepeat = actionRepeat
self._observation = []
self._envStepCounter = 0
self._renders = renders
self._width = 1280
self._height = 720
self.terminated = 0
self._p = p
self.containerID = containerID
self.recordingID = recordingID
self.container_mesh_path = './data/meshes/objects/CORSMAL_containers/' + str(self.containerID) + '/best.urdf'
self.container_trajectory_path = './data/vision_estimations/container/' + str(self.containerID) + '/volume_estimation/' + str(self.recordingID) + '_properties.txt'
self.container_trajectory = np.zeros([1, 3])
self.lefthand_trajectory = None
self.righthand_trajectory = None
self.sim_mass = 0.01
self.start_frame = 0
self.handover_hand = 'L'
self.replayFrameID = 0
self.replayFinished = False
self.pred_tolerance = pred_tolerance
self.fail_contact = 0
self.fail_weight = 0
self.fail_width = 0
self.stable_grasp = 0
self.delivery_success = 0
self.hand_visible = False
# TODO: use real camera calib files for each recording
self.cam_side_1 = self.readCalibration('./data/calibration/examples/calib/c1_calib.pickle')
self.cam_side_2 = self.readCalibration('./data/calibration/examples/calib/c2_calib.pickle')
self.cam_robot = self.readCalibration('./data/calibration/examples/calib/c3_calib.pickle')
self.mesh_recon_frame = 0
self.lefthand_debug_lines = []
self.righthand_debug_lines = []
# Render options
if self._renders:
cid = p.connect(p.SHARED_MEMORY)
if (cid < 0):
cid = p.connect(p.GUI)
else:
p.connect(p.DIRECT)
# Setup environment
self._seed()
self._reset()
observationDim = len(self.getSceneObservation())
observation_high = np.array([np.finfo(np.float32).max] * observationDim)
action_dim = 4
self._action_bound = 1
action_high = np.array([self._action_bound] * action_dim)
self.action_space = spaces.Box(-action_high, action_high, dtype=np.float32)
self.observation_space = spaces.Box(low=0, high=255, shape=(3, self._height, self._width), dtype=np.uint8)
self.viewer = None
def _reset(self):
prGreen("Resetting environment...")
self.terminated = 0
self._envStepCounter = 0
self.replayFrameID = 0
self.replayFinished = False
self.start_movement = False
self.extendReplay = False
self.gripper_state = 0.085
self.target_gripper_width = 0.085
self.joint_limit_effort = 0.01
self.grasp_target_z = 0.0
self.stable_grasp = 0
self.delivery_success = 0
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=150, deterministicOverlappingPairs=1)
p.setTimeStep(self._timeStep)
p.setGravity(0, 0, -10)
p.setRealTimeSimulation(0)
# Load scene setup
p.setAdditionalSearchPath(pybullet_data.getDataPath())
planeId = p.loadURDF("plane.urdf")
self.tables = []
self.tables.append(p.loadURDF((os.path.join(meshPath,"table/table.urdf")),[0.0,0.0,0.0],p.getQuaternionFromEuler([0,0,0]),useFixedBase=True))
self.tables.append(p.loadURDF((os.path.join(meshPath,"table/table.urdf")),[0.97,1.3,0.0],p.getQuaternionFromEuler([0,0,math.pi/2.0]),useFixedBase=True))
target = p.loadURDF(os.path.join(meshPath, "target/target.urdf"), (0.75, 1.52, 0.625), p.getQuaternionFromEuler([0, 0, 0]))
# Load robot
self._arm = ur5(robotStartPos=[0.22, 1.02, 0.53], maxGripperForce=0.1)
# Load container
self.container_mesh_path = './data/meshes/objects/CORSMAL_containers/' + str(self.containerID) + '/best.urdf'
self.container = Container(self.container_mesh_path, self.container_trajectory, self.sim_mass, self.tables)
# Load hand models
self.left_hand = Hand(MPLPath, self.lefthand_trajectory, 'L', len(self.container.trajectory), self.tables, self.container.uid)
self.right_hand = Hand(MPLPath, self.righthand_trajectory, 'R', len(self.container.trajectory), self.tables, self.container.uid, other_hand_uid=self.left_hand.uid)
p.stepSimulation()
self._observation = self.getSceneObservation()
return np.array(self._observation, dtype=object)
def __del__(self):
p.disconnect()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Read calibration file for the chosen setup
def readCalibration(self, filepath):
cam = dict.fromkeys(['intrinsic', 'extrinsic']);
with open(filepath, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
calibration = u.load()
cam['intrinsic'] = calibration[0]['rgb']
cam['extrinsic'] = calibration[1]['rgb']
return cam
def getSceneObservation(self):
self._observation = self._arm.getObservation()
# scene_obs = get_scene_observation(self.tables)
grip_img = gripper_camera(self._observation)
# obs = [self._observation, scene_obs]
cam_side_1_img = render_side_cam(self.cam_side_1)
cam_side_2_img = render_side_cam(self.cam_side_2)
cam_robot_img = render_side_cam(self.cam_robot)
return grip_img, cam_side_1_img, cam_side_2_img, cam_robot_img
def step(self, action, distance_to_target):
# replay movement of container human manipulation
if self.replayFrameID + 1 < len(self.container.trajectory) and (distance_to_target > 0.01):
self.left_hand.updatePose(self.replayFrameID, self.start_movement)
self.right_hand.updatePose(self.replayFrameID, self.start_movement)
if not(self.start_movement):
self.grasp_quat = list(p.getQuaternionFromEuler([math.pi, 0.0, -math.pi / 2.0]))
cont_quat = p.getQuaternionFromEuler([0, -math.pi/2.0, 0])
else:
if self.handover_hand == 'L':
self.grasp_quat = self.left_hand.robot_grasp_quat
cont_quat = self.left_hand.hand_base_quat
elif self.handover_hand == 'R':
self.grasp_quat = self.right_hand.robot_grasp_quat
cont_quat = self.right_hand.hand_base_quat
self.container.updatePose(self.replayFrameID, cont_quat)
self.replayFrameID += 1
else:
prYellow('Handover phase...')
target_gripper_width = self.target_gripper_width
state = p.getLinkState(self._arm.uid, 7)
actualEndEffectorPos = np.asarray(state[0])
last_target = actualEndEffectorPos + np.asarray(action[0:3])
action[3] = target_gripper_width
self._arm.maxGripperForce = self.joint_limit_effort
self.replayFinished = True
dv = 1.5
dx = action[0] * dv
dy = action[1] * dv
dz = action[2] * dv
f = action[3]
realAction = [dx, dy, dz, f]
if self.replayFinished:
self.start_movement = True
else:
if self.replayFrameID == self.start_frame:
self.start_movement = True
prYellow('Human manouevring phase...')
self._arm.action(realAction, self.grasp_quat, self.start_movement, False)
info = {}
done = False
reward = 0.0
if not (self.replayFinished):
for _ in range(8):
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
else:
if self.replayFrameID + 1 == len(self.container.trajectory):
for _ in range(480):
state = p.getLinkState(self._arm.uid, 7)
actualEndEffectorPos = np.asarray(state[0])
direction = last_target - actualEndEffectorPos
direction = direction * 1.5
realAction = list(direction) + [0.085]
self._arm.action(realAction, self.grasp_quat, self.start_movement, False)
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
# re-enable collision between container and table
for col_objs in self.tables:
p.setCollisionFilterPair(col_objs, self.container.uid, -1, -1, 1)
# execute grasp
realAction = [0.0, 0.0, 0.0, target_gripper_width]
self._arm.action(realAction, self.grasp_quat, self.start_movement, False)
for _ in range(100):
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
p.removeConstraint(self.container.cid)
# check normal forces on container immediately after grasp executed
total_normal_force_container_immediate, total_normal_force_hand_immediate = self.checkGraspNormalForce()
# check actual gripper width
actual_gripper_width = self.checkGraspActualWidth()
for _ in range(100):
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
# evaluate grasp safety
closest_l_left = self.checkGraspSafety(self._arm.uid, self.left_hand.uid)
closest_l_right = self.checkGraspSafety(self._arm.uid, self.right_hand.uid)
closest_l = min(closest_l_left, closest_l_right)
# check final normal forces applied on container
total_normal_force_container, total_normal_force_hand = self.checkGraspNormalForce()
# remove hand
p.removeBody(self.left_hand.uid)
p.removeBody(self.right_hand.uid)
p.removeAllUserDebugItems()
# evaluate container delivery
prYellow('Robot manouevring phase...')
# slow down arm movement for delivery to prevent slipping
self._arm.maxVelMultiplier = 0.3
delivery_z = 0.775
for _ in range(1000):
delivery_pos = [0.75 - 0.14, 1.52, delivery_z]
state = p.getLinkState(self._arm.uid, self._arm.endEffectorIndex)
real_pos = list(state[0])
# move arm
dv = 1.0
dx = (delivery_pos[0] - real_pos[0]) * dv
dy = (delivery_pos[1] - real_pos[1]) * dv
dz = (delivery_pos[2] - real_pos[2]) * dv
f = target_gripper_width
realAction = [dx, dy, dz, f]
self._arm.action(realAction, _, True, True)
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
# Lower object
for _ in range(240 * 2):
delivery_pos = [0.75 - 0.14, 1.52, 0.68]
state = p.getLinkState(self._arm.uid, self._arm.endEffectorIndex)
real_pos = list(state[0])
# move arm
dv = 1.0
dx = (delivery_pos[0] - real_pos[0]) * dv
dy = (delivery_pos[1] - real_pos[1]) * dv
dz = (delivery_pos[2] - real_pos[2]) * dv
f = target_gripper_width
realAction = [dx, dy, dz, f]
self._arm.action(realAction, _, True, True)
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
# Open gripper
for _ in range(100):
delivery_pos = [0.75 - 0.14, 1.52, 0.68]
state = p.getLinkState(self._arm.uid, self._arm.endEffectorIndex)
real_pos = list(state[0])
# move arm
dv = 1.0
dx = (delivery_pos[0] - real_pos[0]) * dv
dy = (delivery_pos[1] - real_pos[1]) * dv
dz = (delivery_pos[2] - real_pos[2]) * dv
f = 0.085
realAction = [dx, dy, dz, f]
self._arm.action(realAction, _, True, True)
p.stepSimulation()
if self._renders:
time.sleep(self._timeStep)
self._envStepCounter += 1
delivery_success, delivery_distance, delivery_beta = self.checkDeliverySuccess()
done = True
info = {'closest_l': closest_l, 'delivery_distance': delivery_distance, 'delivery_beta': delivery_beta, \
'total_normal_force_hand': total_normal_force_hand, 'total_normal_force_container': total_normal_force_container, \
'total_normal_force_container_immediate': total_normal_force_container_immediate, 'total_normal_force_hand_immediate': total_normal_force_hand_immediate, \
'actual_gripper_width': actual_gripper_width}
return np.array(self._observation, dtype=object), reward, done, info
def checkGraspSafety(self, robot_object_id, hand_object_id):
max_l = 1.0
closest_points = p.getClosestPoints(robot_object_id, hand_object_id, max_l)
if len(closest_points) > 0:
min_l = max_l
for cp in closest_points:
if cp[8] < min_l:
min_l = cp[8]
min_l = max(0.0, min_l)
else:
min_l = max_l
return min_l
def checkGraspNormalForce(self):
contact_points_container_robot = p.getContactPoints(self._arm.uid, self.container.uid)
total_normal_force_container = 0.0
for cp in contact_points_container_robot:
total_normal_force_container += abs(cp[9])
contact_points_lefthand_robot = p.getContactPoints(self._arm.uid, self.left_hand.uid)
total_normal_force_lefthand = 0.0
for cp in contact_points_lefthand_robot:
total_normal_force_lefthand += abs(cp[9])
contact_points_righthand_robot = p.getContactPoints(self._arm.uid, self.right_hand.uid)
total_normal_force_righthand = 0.0
for cp in contact_points_righthand_robot:
total_normal_force_righthand += abs(cp[9])
total_normal_force_hand = total_normal_force_lefthand + total_normal_force_righthand
return total_normal_force_container, total_normal_force_hand
def checkGraspActualWidth(self):
js = p.getJointState(self._arm.uid, 14)
# gripper_opening_angle = 0.715 - math.asin((gripper_opening_length - 0.010) / 0.1143)
gripper_opening_length = math.sin(0.715 - js[0]) * 0.1143 + 0.010
return gripper_opening_length
def checkDeliverySuccess(self):
success = False
state = p.getBasePositionAndOrientation(self.container.uid)
pos = state[0]
orn = state[1]
rotation = list(p.getEulerFromQuaternion(orn))
dir_x = math.cos(rotation[2]) * math.sin(rotation[1]) * math.cos(rotation[0]) + math.sin(rotation[2]) * math.sin(rotation[0])
dir_y = math.sin(rotation[2]) * math.sin(rotation[1]) * math.cos(rotation[0]) - math.cos(rotation[2]) * math.sin(rotation[0])
dir_z = math.cos(rotation[1]) * math.cos(rotation[0])
dir_v = np.array([dir_x, dir_y, dir_z])
unit_dir_v = dir_v / np.linalg.norm(dir_v)
delivery_beta = np.arccos(np.clip(np.dot(unit_dir_v, np.array([0.0, 0.0, 1.0])), -1.0, 1.0))
target_pos = [0.75, 1.52, 0.625]
gt_width_file = open('data/meshes/objects/CORSMAL_containers/' + str(self.containerID) + '/width.txt')
next(gt_width_file)
for line in gt_width_file:
line_split = line.split(', ')
if len(line_split) == 3:
gt_cont_minz = float(line_split[1])
break
pos = list(np.asarray(pos)+np.asarray(unit_dir_v)*gt_cont_minz)
delivery_distance = np.linalg.norm(np.array(pos) - np.array(target_pos))
eta = 0.5
if delivery_distance < eta and delivery_beta < math.pi / 18.0:
delivery_success = (1.0 - (delivery_distance / eta))
else:
delivery_success = 0.0
return delivery_success, delivery_distance, delivery_beta | [
"pybullet.getMatrixFromQuaternion",
"pybullet.computeViewMatrix",
"libs.simulation.ur5.ur5",
"pybullet_data.getDataPath",
"pybullet.setTimeStep",
"pybullet.setGravity",
"math.cos",
"numpy.array",
"pybullet.setPhysicsEngineParameter",
"pybullet.disconnect",
"time.sleep",
"numpy.linalg.norm",
... | [((1742, 1827), 'pybullet.computeProjectionMatrixFOV', 'p.computeProjectionMatrixFOV', ([], {'fov': '(90)', 'aspect': '(1.777777778)', 'nearVal': '(0.01)', 'farVal': '(10)'}), '(fov=90, aspect=1.777777778, nearVal=0.01,\n farVal=10)\n', (1770, 1827), True, 'import pybullet as p\n'), ((1882, 1893), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1891, 1893), False, 'import os\n'), ((1930, 1941), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1939, 1941), False, 'import os\n'), ((2149, 2189), 'numpy.zeros', 'np.zeros', (['(row, col, 3)'], {'dtype': '"""float32"""'}), "((row, col, 3), dtype='float32')\n", (2157, 2189), True, 'import numpy as np\n'), ((2476, 2506), 'numpy.asarray', 'np.asarray', (['rgb'], {'dtype': '"""uint8"""'}), "(rgb, dtype='uint8')\n", (2486, 2506), True, 'import numpy as np\n'), ((2718, 2752), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['rotation'], {}), '(rotation)\n', (2742, 2752), True, 'import pybullet as p\n'), ((2770, 2800), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (['ori'], {}), '(ori)\n', (2795, 2800), True, 'import pybullet as p\n'), ((3114, 3176), 'pybullet.computeViewMatrix', 'p.computeViewMatrix', (['pos', '(pos + 0.1 * camera_vector)', 'up_vector'], {}), '(pos, pos + 0.1 * camera_vector, up_vector)\n', (3133, 3176), True, 'import pybullet as p\n'), ((3188, 3292), 'pybullet.getCameraImage', 'p.getCameraImage', (['(360)', '(360)', 'view_matrix_gripper', 'projectionMatrix'], {'shadow': '(0)', 'renderer': 'image_renderer'}), '(360, 360, view_matrix_gripper, projectionMatrix, shadow=0,\n renderer=image_renderer)\n', (3204, 3292), True, 'import pybullet as p\n'), ((3908, 3958), 'pybullet.computeViewMatrix', 'p.computeViewMatrix', (['pos', 'camera_vector', 'up_vector'], {}), '(pos, camera_vector, up_vector)\n', (3927, 3958), True, 'import pybullet as p\n'), ((3970, 4075), 'pybullet.getCameraImage', 'p.getCameraImage', (['(1280)', '(720)', 'view_matrix_sidecam', 'projectionMatrix'], {'shadow': '(0)', 'renderer': 'image_renderer'}), '(1280, 720, view_matrix_sidecam, projectionMatrix, shadow=0,\n renderer=image_renderer)\n', (3986, 4075), True, 'import pybullet as p\n'), ((2275, 2305), 'numpy.asarray', 'np.asarray', (['a'], {'dtype': '"""float32"""'}), "(a, dtype='float32')\n", (2285, 2305), True, 'import numpy as np\n'), ((2631, 2660), 'pybullet.getEulerFromQuaternion', 'p.getEulerFromQuaternion', (['ori'], {}), '(ori)\n', (2655, 2660), True, 'import pybullet as p\n'), ((3604, 3623), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (3612, 3623), True, 'import numpy as np\n'), ((5331, 5347), 'numpy.zeros', 'np.zeros', (['[1, 3]'], {}), '([1, 3])\n', (5339, 5347), True, 'import numpy as np\n'), ((6795, 6838), 'numpy.array', 'np.array', (['([self._action_bound] * action_dim)'], {}), '([self._action_bound] * action_dim)\n', (6803, 6838), True, 'import numpy as np\n'), ((6867, 6922), 'gym.spaces.Box', 'spaces.Box', (['(-action_high)', 'action_high'], {'dtype': 'np.float32'}), '(-action_high, action_high, dtype=np.float32)\n', (6877, 6922), False, 'from gym import spaces\n'), ((6957, 7043), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(3, self._height, self._width)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(3, self._height, self._width), dtype=np.\n uint8)\n', (6967, 7043), False, 'from gym import spaces\n'), ((7556, 7575), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (7573, 7575), True, 'import pybullet as p\n'), ((7584, 7673), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSolverIterations': '(150)', 'deterministicOverlappingPairs': '(1)'}), '(numSolverIterations=150,\n deterministicOverlappingPairs=1)\n', (7611, 7673), True, 'import pybullet as p\n'), ((7678, 7707), 'pybullet.setTimeStep', 'p.setTimeStep', (['self._timeStep'], {}), '(self._timeStep)\n', (7691, 7707), True, 'import pybullet as p\n'), ((7716, 7739), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (7728, 7739), True, 'import pybullet as p\n'), ((7748, 7774), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(0)'], {}), '(0)\n', (7771, 7774), True, 'import pybullet as p\n'), ((7884, 7908), 'pybullet.loadURDF', 'p.loadURDF', (['"""plane.urdf"""'], {}), "('plane.urdf')\n", (7894, 7908), True, 'import pybullet as p\n'), ((8419, 8477), 'libs.simulation.ur5.ur5', 'ur5', ([], {'robotStartPos': '[0.22, 1.02, 0.53]', 'maxGripperForce': '(0.1)'}), '(robotStartPos=[0.22, 1.02, 0.53], maxGripperForce=0.1)\n', (8422, 8477), False, 'from libs.simulation.ur5 import ur5\n'), ((8647, 8742), 'libs.simulation.container.Container', 'Container', (['self.container_mesh_path', 'self.container_trajectory', 'self.sim_mass', 'self.tables'], {}), '(self.container_mesh_path, self.container_trajectory, self.\n sim_mass, self.tables)\n', (8656, 8742), False, 'from libs.simulation.container import Container\n'), ((9082, 9100), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (9098, 9100), True, 'import pybullet as p\n'), ((9180, 9221), 'numpy.array', 'np.array', (['self._observation'], {'dtype': 'object'}), '(self._observation, dtype=object)\n', (9188, 9221), True, 'import numpy as np\n'), ((9254, 9268), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (9266, 9268), True, 'import pybullet as p\n'), ((9333, 9356), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (9350, 9356), False, 'from gym.utils import seeding\n'), ((18307, 18365), 'pybullet.getClosestPoints', 'p.getClosestPoints', (['robot_object_id', 'hand_object_id', 'max_l'], {}), '(robot_object_id, hand_object_id, max_l)\n', (18325, 18365), True, 'import pybullet as p\n'), ((18712, 18765), 'pybullet.getContactPoints', 'p.getContactPoints', (['self._arm.uid', 'self.container.uid'], {}), '(self._arm.uid, self.container.uid)\n', (18730, 18765), True, 'import pybullet as p\n'), ((18955, 19008), 'pybullet.getContactPoints', 'p.getContactPoints', (['self._arm.uid', 'self.left_hand.uid'], {}), '(self._arm.uid, self.left_hand.uid)\n', (18973, 19008), True, 'import pybullet as p\n'), ((19195, 19249), 'pybullet.getContactPoints', 'p.getContactPoints', (['self._arm.uid', 'self.right_hand.uid'], {}), '(self._arm.uid, self.right_hand.uid)\n', (19213, 19249), True, 'import pybullet as p\n'), ((19614, 19648), 'pybullet.getJointState', 'p.getJointState', (['self._arm.uid', '(14)'], {}), '(self._arm.uid, 14)\n', (19629, 19648), True, 'import pybullet as p\n'), ((19934, 19985), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.container.uid'], {}), '(self.container.uid)\n', (19965, 19985), True, 'import pybullet as p\n'), ((20435, 20466), 'numpy.array', 'np.array', (['[dir_x, dir_y, dir_z]'], {}), '([dir_x, dir_y, dir_z])\n', (20443, 20466), True, 'import numpy as np\n'), ((2818, 2838), 'numpy.array', 'np.array', (['rot_matrix'], {}), '(rot_matrix)\n', (2826, 2838), True, 'import numpy as np\n'), ((6370, 6396), 'pybullet.connect', 'p.connect', (['p.SHARED_MEMORY'], {}), '(p.SHARED_MEMORY)\n', (6379, 6396), True, 'import pybullet as p\n'), ((6488, 6507), 'pybullet.connect', 'p.connect', (['p.DIRECT'], {}), '(p.DIRECT)\n', (6497, 6507), True, 'import pybullet as p\n'), ((7837, 7864), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (7862, 7864), False, 'import pybullet_data\n'), ((8273, 8317), 'os.path.join', 'os.path.join', (['meshPath', '"""target/target.urdf"""'], {}), "(meshPath, 'target/target.urdf')\n", (8285, 8317), False, 'import os\n'), ((8340, 8375), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (8364, 8375), True, 'import pybullet as p\n'), ((9586, 9606), 'pickle._Unpickler', 'pickle._Unpickler', (['f'], {}), '(f)\n', (9603, 9606), False, 'import pickle\n'), ((11501, 11533), 'pybullet.getLinkState', 'p.getLinkState', (['self._arm.uid', '(7)'], {}), '(self._arm.uid, 7)\n', (11515, 11533), True, 'import pybullet as p\n'), ((11569, 11589), 'numpy.asarray', 'np.asarray', (['state[0]'], {}), '(state[0])\n', (11579, 11589), True, 'import numpy as np\n'), ((13849, 13887), 'pybullet.removeConstraint', 'p.removeConstraint', (['self.container.cid'], {}), '(self.container.cid)\n', (13867, 13887), True, 'import pybullet as p\n'), ((14854, 14886), 'pybullet.removeBody', 'p.removeBody', (['self.left_hand.uid'], {}), '(self.left_hand.uid)\n', (14866, 14886), True, 'import pybullet as p\n'), ((14899, 14932), 'pybullet.removeBody', 'p.removeBody', (['self.right_hand.uid'], {}), '(self.right_hand.uid)\n', (14911, 14932), True, 'import pybullet as p\n'), ((14945, 14972), 'pybullet.removeAllUserDebugItems', 'p.removeAllUserDebugItems', ([], {}), '()\n', (14970, 14972), True, 'import pybullet as p\n'), ((18132, 18173), 'numpy.array', 'np.array', (['self._observation'], {'dtype': 'object'}), '(self._observation, dtype=object)\n', (18140, 18173), True, 'import numpy as np\n'), ((20056, 20085), 'pybullet.getEulerFromQuaternion', 'p.getEulerFromQuaternion', (['orn'], {}), '(orn)\n', (20080, 20085), True, 'import pybullet as p\n'), ((20372, 20393), 'math.cos', 'math.cos', (['rotation[1]'], {}), '(rotation[1])\n', (20380, 20393), False, 'import math\n'), ((20396, 20417), 'math.cos', 'math.cos', (['rotation[0]'], {}), '(rotation[0])\n', (20404, 20417), False, 'import math\n'), ((20496, 20517), 'numpy.linalg.norm', 'np.linalg.norm', (['dir_v'], {}), '(dir_v)\n', (20510, 20517), True, 'import numpy as np\n'), ((6445, 6461), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (6454, 6461), True, 'import pybullet as p\n'), ((7973, 8015), 'os.path.join', 'os.path.join', (['meshPath', '"""table/table.urdf"""'], {}), "(meshPath, 'table/table.urdf')\n", (7985, 8015), False, 'import os\n'), ((8030, 8065), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (8054, 8065), True, 'import pybullet as p\n'), ((8123, 8165), 'os.path.join', 'os.path.join', (['meshPath', '"""table/table.urdf"""'], {}), "(meshPath, 'table/table.urdf')\n", (8135, 8165), False, 'import os\n'), ((8181, 8228), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, math.pi / 2.0]'], {}), '([0, 0, math.pi / 2.0])\n', (8205, 8228), True, 'import pybullet as p\n'), ((10830, 10878), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, -math.pi / 2.0, 0]'], {}), '([0, -math.pi / 2.0, 0])\n', (10854, 10878), True, 'import pybullet as p\n'), ((11639, 11662), 'numpy.asarray', 'np.asarray', (['action[0:3]'], {}), '(action[0:3])\n', (11649, 11662), True, 'import numpy as np\n'), ((12444, 12462), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (12460, 12462), True, 'import pybullet as p\n'), ((13401, 13466), 'pybullet.setCollisionFilterPair', 'p.setCollisionFilterPair', (['col_objs', 'self.container.uid', '(-1)', '(-1)', '(1)'], {}), '(col_objs, self.container.uid, -1, -1, 1)\n', (13425, 13466), True, 'import pybullet as p\n'), ((13694, 13712), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (13710, 13712), True, 'import pybullet as p\n'), ((14242, 14260), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (14258, 14260), True, 'import pybullet as p\n'), ((15334, 15391), 'pybullet.getLinkState', 'p.getLinkState', (['self._arm.uid', 'self._arm.endEffectorIndex'], {}), '(self._arm.uid, self._arm.endEffectorIndex)\n', (15348, 15391), True, 'import pybullet as p\n'), ((15824, 15842), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (15840, 15842), True, 'import pybullet as p\n'), ((16111, 16168), 'pybullet.getLinkState', 'p.getLinkState', (['self._arm.uid', 'self._arm.endEffectorIndex'], {}), '(self._arm.uid, self._arm.endEffectorIndex)\n', (16125, 16168), True, 'import pybullet as p\n'), ((16601, 16619), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (16617, 16619), True, 'import pybullet as p\n'), ((16884, 16941), 'pybullet.getLinkState', 'p.getLinkState', (['self._arm.uid', 'self._arm.endEffectorIndex'], {}), '(self._arm.uid, self._arm.endEffectorIndex)\n', (16898, 16941), True, 'import pybullet as p\n'), ((17359, 17377), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (17375, 17377), True, 'import pybullet as p\n'), ((19777, 19800), 'math.sin', 'math.sin', (['(0.715 - js[0])'], {}), '(0.715 - js[0])\n', (19785, 19800), False, 'import math\n'), ((20152, 20173), 'math.cos', 'math.cos', (['rotation[0]'], {}), '(rotation[0])\n', (20160, 20173), False, 'import math\n'), ((20176, 20197), 'math.sin', 'math.sin', (['rotation[2]'], {}), '(rotation[2])\n', (20184, 20197), False, 'import math\n'), ((20200, 20221), 'math.sin', 'math.sin', (['rotation[0]'], {}), '(rotation[0])\n', (20208, 20221), False, 'import math\n'), ((20286, 20307), 'math.cos', 'math.cos', (['rotation[0]'], {}), '(rotation[0])\n', (20294, 20307), False, 'import math\n'), ((20310, 20331), 'math.cos', 'math.cos', (['rotation[2]'], {}), '(rotation[2])\n', (20318, 20331), False, 'import math\n'), ((20334, 20355), 'math.sin', 'math.sin', (['rotation[0]'], {}), '(rotation[0])\n', (20342, 20355), False, 'import math\n'), ((21005, 21020), 'numpy.asarray', 'np.asarray', (['pos'], {}), '(pos)\n', (21015, 21020), True, 'import numpy as np\n'), ((21101, 21114), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (21109, 21114), True, 'import numpy as np\n'), ((21117, 21137), 'numpy.array', 'np.array', (['target_pos'], {}), '(target_pos)\n', (21125, 21137), True, 'import numpy as np\n'), ((10744, 10800), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[math.pi, 0.0, -math.pi / 2.0]'], {}), '([math.pi, 0.0, -math.pi / 2.0])\n', (10768, 10800), True, 'import pybullet as p\n'), ((12517, 12543), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (12527, 12543), False, 'import time\n'), ((12739, 12771), 'pybullet.getLinkState', 'p.getLinkState', (['self._arm.uid', '(7)'], {}), '(self._arm.uid, 7)\n', (12753, 12771), True, 'import pybullet as p\n'), ((12815, 12835), 'numpy.asarray', 'np.asarray', (['state[0]'], {}), '(state[0])\n', (12825, 12835), True, 'import numpy as np\n'), ((13127, 13145), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (13143, 13145), True, 'import pybullet as p\n'), ((13767, 13793), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (13777, 13793), False, 'import time\n'), ((14315, 14341), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (14325, 14341), False, 'import time\n'), ((15897, 15923), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (15907, 15923), False, 'import time\n'), ((16674, 16700), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (16684, 16700), False, 'import time\n'), ((17432, 17458), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (17442, 17458), False, 'import time\n'), ((20104, 20125), 'math.cos', 'math.cos', (['rotation[2]'], {}), '(rotation[2])\n', (20112, 20125), False, 'import math\n'), ((20128, 20149), 'math.sin', 'math.sin', (['rotation[1]'], {}), '(rotation[1])\n', (20136, 20149), False, 'import math\n'), ((20238, 20259), 'math.sin', 'math.sin', (['rotation[2]'], {}), '(rotation[2])\n', (20246, 20259), False, 'import math\n'), ((20262, 20283), 'math.sin', 'math.sin', (['rotation[1]'], {}), '(rotation[1])\n', (20270, 20283), False, 'import math\n'), ((20579, 20604), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (20587, 20604), True, 'import numpy as np\n'), ((21021, 21043), 'numpy.asarray', 'np.asarray', (['unit_dir_v'], {}), '(unit_dir_v)\n', (21031, 21043), True, 'import numpy as np\n'), ((6674, 6694), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (6682, 6694), True, 'import numpy as np\n'), ((13208, 13234), 'time.sleep', 'time.sleep', (['self._timeStep'], {}), '(self._timeStep)\n', (13218, 13234), False, 'import time\n')] |
import os.path as osp
import random
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from mmaction.core import (ActivityNetLocalization,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, mean_average_precision,
mean_class_accuracy, mmit_mean_average_precision,
pairwise_temporal_iou, top_k_accuracy)
from mmaction.core.evaluation.ava_utils import ava_eval
def gt_confusion_matrix(gt_labels, pred_labels, normalize=None):
"""Calculate the ground truth confusion matrix."""
max_index = max(max(gt_labels), max(pred_labels))
confusion_mat = np.zeros((max_index + 1, max_index + 1), dtype=np.int64)
for gt, pred in zip(gt_labels, pred_labels):
confusion_mat[gt][pred] += 1
del_index = []
for i in range(max_index):
if sum(confusion_mat[i]) == 0 and sum(confusion_mat[:, i]) == 0:
del_index.append(i)
confusion_mat = np.delete(confusion_mat, del_index, axis=0)
confusion_mat = np.delete(confusion_mat, del_index, axis=1)
if normalize is not None:
confusion_mat = np.array(confusion_mat, dtype=np.float)
m, n = confusion_mat.shape
if normalize == 'true':
for i in range(m):
s = np.sum(confusion_mat[i], dtype=float)
if s == 0:
continue
confusion_mat[i, :] = confusion_mat[i, :] / s
print(confusion_mat[i, :])
elif normalize == 'pred':
for i in range(n):
s = sum(confusion_mat[:, i])
if s == 0:
continue
confusion_mat[:, i] = confusion_mat[:, i] / s
elif normalize == 'all':
s = np.sum(confusion_mat)
if s != 0:
confusion_mat /= s
return confusion_mat
def test_activitynet_localization():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_localization'))
gt_path = osp.join(data_prefix, 'gt.json')
result_path = osp.join(data_prefix, 'result.json')
localization = ActivityNetLocalization(gt_path, result_path)
results = localization.evaluate()
mAP = np.array([
0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222,
0.52083333, 0.52083333, 0.52083333, 0.5
])
average_mAP = 0.6177579365079365
assert_array_almost_equal(results[0], mAP)
assert_array_almost_equal(results[1], average_mAP)
def test_ava_detection():
data_prefix = osp.normpath(
osp.join(osp.dirname(__file__), '../data/eval_detection'))
gt_path = osp.join(data_prefix, 'gt.csv')
result_path = osp.join(data_prefix, 'pred.csv')
label_map = osp.join(data_prefix, 'action_list.txt')
# eval bbox
detection = ava_eval(result_path, 'mAP', label_map, gt_path, None)
assert_array_almost_equal(detection['mAP@0.5IOU'], 0.09385522)
def test_confusion_matrix():
# custom confusion_matrix
gt_labels = [np.int64(random.randint(0, 9)) for _ in range(100)]
pred_labels = np.random.randint(10, size=100, dtype=np.int64)
for normalize in [None, 'true', 'pred', 'all']:
cf_mat = confusion_matrix(pred_labels, gt_labels, normalize)
gt_cf_mat = gt_confusion_matrix(gt_labels, pred_labels, normalize)
assert_array_equal(cf_mat, gt_cf_mat)
with pytest.raises(ValueError):
# normalize must be in ['true', 'pred', 'all', None]
confusion_matrix([1], [1], 'unsupport')
with pytest.raises(TypeError):
# y_pred must be list or np.ndarray
confusion_matrix(0.5, [1])
with pytest.raises(TypeError):
# y_real must be list or np.ndarray
confusion_matrix([1], 0.5)
with pytest.raises(TypeError):
# y_pred dtype must be np.int64
confusion_matrix([0.5], [1])
with pytest.raises(TypeError):
# y_real dtype must be np.int64
confusion_matrix([1], [0.5])
def test_topk():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# top1 acc
k = (1, )
top1_labels_0 = [3, 1, 1, 1]
top1_labels_25 = [2, 0, 4, 3]
top1_labels_50 = [2, 2, 3, 1]
top1_labels_75 = [2, 2, 2, 3]
top1_labels_100 = [2, 2, 2, 4]
res = top_k_accuracy(scores, top1_labels_0, k)
assert res == [0]
res = top_k_accuracy(scores, top1_labels_25, k)
assert res == [0.25]
res = top_k_accuracy(scores, top1_labels_50, k)
assert res == [0.5]
res = top_k_accuracy(scores, top1_labels_75, k)
assert res == [0.75]
res = top_k_accuracy(scores, top1_labels_100, k)
assert res == [1.0]
# top1 acc, top2 acc
k = (1, 2)
top2_labels_0_100 = [3, 1, 1, 1]
top2_labels_25_75 = [3, 1, 2, 3]
res = top_k_accuracy(scores, top2_labels_0_100, k)
assert res == [0, 1.0]
res = top_k_accuracy(scores, top2_labels_25_75, k)
assert res == [0.25, 0.75]
# top1 acc, top3 acc, top5 acc
k = (1, 3, 5)
top5_labels_0_0_100 = [1, 0, 3, 2]
top5_labels_0_50_100 = [1, 3, 4, 0]
top5_labels_25_75_100 = [2, 3, 0, 2]
res = top_k_accuracy(scores, top5_labels_0_0_100, k)
assert res == [0, 0, 1.0]
res = top_k_accuracy(scores, top5_labels_0_50_100, k)
assert res == [0, 0.5, 1.0]
res = top_k_accuracy(scores, top5_labels_25_75_100, k)
assert res == [0.25, 0.75, 1.0]
def test_mean_class_accuracy():
scores = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
# test mean class accuracy in [0, 0.25, 1/3, 0.75, 1.0]
mean_cls_acc_0 = np.int64([1, 4, 0, 2])
mean_cls_acc_25 = np.int64([2, 0, 4, 3])
mean_cls_acc_33 = np.int64([2, 2, 2, 3])
mean_cls_acc_75 = np.int64([4, 2, 2, 4])
mean_cls_acc_100 = np.int64([2, 2, 2, 4])
assert mean_class_accuracy(scores, mean_cls_acc_0) == 0
assert mean_class_accuracy(scores, mean_cls_acc_25) == 0.25
assert mean_class_accuracy(scores, mean_cls_acc_33) == 1 / 3
assert mean_class_accuracy(scores, mean_cls_acc_75) == 0.75
assert mean_class_accuracy(scores, mean_cls_acc_100) == 1.0
def test_mmit_mean_average_precision():
# One sample
y_true = [np.array([0, 0, 1, 1])]
y_scores = [np.array([0.1, 0.4, 0.35, 0.8])]
map = mmit_mean_average_precision(y_scores, y_true)
precision = [2.0 / 3.0, 0.5, 1., 1.]
recall = [1., 0.5, 0.5, 0.]
target = -np.sum(np.diff(recall) * np.array(precision)[:-1])
assert target == map
def test_pairwise_temporal_iou():
target_segments = np.array([])
candidate_segments = np.array([])
with pytest.raises(ValueError):
pairwise_temporal_iou(target_segments, candidate_segments)
# test temporal iou
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou = pairwise_temporal_iou(candidate_segments, target_segments)
assert_array_equal(temporal_iou, [[0, 0], [1, 0.5]])
# test temporal overlap_self
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([[2, 3], [2.5, 3]])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [[0, 0], [1, 1]])
# test temporal overlap_self when candidate_segments is 1d
target_segments = np.array([[1, 2], [2, 3]])
candidate_segments = np.array([2.5, 3])
temporal_iou, temporal_overlap_self = pairwise_temporal_iou(
candidate_segments, target_segments, calculate_overlap_self=True)
assert_array_equal(temporal_overlap_self, [0, 1])
def test_average_recall_at_avg_proposals():
ground_truth1 = {
'v_test1': np.array([[0, 1], [1, 2]]),
'v_test2': np.array([[0, 1], [1, 2]])
}
ground_truth2 = {'v_test1': np.array([[0, 1]])}
proposals1 = {
'v_test1': np.array([[0, 1, 1], [1, 2, 1]]),
'v_test2': np.array([[0, 1, 1], [1, 2, 1]])
}
proposals2 = {
'v_test1': np.array([[10, 11, 0.6], [11, 12, 0.4]]),
'v_test2': np.array([[10, 11, 0.6], [11, 12, 0.4]])
}
proposals3 = {
'v_test1': np.array([[i, i + 1, 1 / (i + 1)] for i in range(100)])
}
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals1, 4))
assert_array_equal(recall, [[0.] * 49 + [0.5] * 50 + [1.]] * 10)
assert_array_equal(avg_recall, [0.] * 49 + [0.5] * 50 + [1.])
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 25.5
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth1, proposals2, 4))
assert_array_equal(recall, [[0.] * 100] * 10)
assert_array_equal(avg_recall, [0.] * 100)
assert_array_almost_equal(
proposals_per_video, np.arange(0.02, 2.02, 0.02), decimal=10)
assert auc == 0
recall, avg_recall, proposals_per_video, auc = (
average_recall_at_avg_proposals(ground_truth2, proposals3, 100))
assert_array_equal(recall, [[1.] * 100] * 10)
assert_array_equal(avg_recall, ([1.] * 100))
assert_array_almost_equal(
proposals_per_video, np.arange(1, 101, 1), decimal=10)
assert auc == 99.0
def test_get_weighted_score():
score_a = [
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]),
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])
]
score_b = [
np.array([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]),
np.array([0.0365, 0.5158, 1.1067, -0.9276, -0.2124]),
np.array([0.6232, 0.9912, -0.8562, 0.0148, 1.6413]),
np.array([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])
]
weighted_score = get_weighted_score([score_a], [1])
assert np.all(np.isclose(np.array(score_a), np.array(weighted_score)))
coeff_a, coeff_b = 2., 1.
weighted_score = get_weighted_score([score_a, score_b], [coeff_a, coeff_b])
ground_truth = [
x * coeff_a + y * coeff_b for x, y in zip(score_a, score_b)
]
assert np.all(np.isclose(np.array(ground_truth), np.array(weighted_score)))
def test_mean_average_precision():
def content_for_unittest(scores, labels, result):
gt = mean_average_precision(scores, labels)
assert gt == result
scores = [
np.array([0.1, 0.2, 0.3, 0.4]),
np.array([0.2, 0.3, 0.4, 0.1]),
np.array([0.3, 0.4, 0.1, 0.2]),
np.array([0.4, 0.1, 0.2, 0.3])
]
label1 = np.array([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])
result1 = 2 / 3
label2 = np.array([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])
result2 = np.mean([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])
content_for_unittest(scores, label1, result1)
content_for_unittest(scores, label2, result2)
| [
"mmaction.core.average_recall_at_avg_proposals",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.testing.assert_array_almost_equal",
"numpy.int64",
"mmaction.core.mmit_mean_average_precision",
"numpy.delete",
"mmaction.core.top_k_accuracy",
"mmaction.core.mean_class_accuracy",
"numpy.diff",
... | [((739, 795), 'numpy.zeros', 'np.zeros', (['(max_index + 1, max_index + 1)'], {'dtype': 'np.int64'}), '((max_index + 1, max_index + 1), dtype=np.int64)\n', (747, 795), True, 'import numpy as np\n'), ((1057, 1100), 'numpy.delete', 'np.delete', (['confusion_mat', 'del_index'], {'axis': '(0)'}), '(confusion_mat, del_index, axis=0)\n', (1066, 1100), True, 'import numpy as np\n'), ((1121, 1164), 'numpy.delete', 'np.delete', (['confusion_mat', 'del_index'], {'axis': '(1)'}), '(confusion_mat, del_index, axis=1)\n', (1130, 1164), True, 'import numpy as np\n'), ((2044, 2076), 'os.path.join', 'osp.join', (['data_prefix', '"""gt.json"""'], {}), "(data_prefix, 'gt.json')\n", (2052, 2076), True, 'import os.path as osp\n'), ((2095, 2131), 'os.path.join', 'osp.join', (['data_prefix', '"""result.json"""'], {}), "(data_prefix, 'result.json')\n", (2103, 2131), True, 'import os.path as osp\n'), ((2151, 2196), 'mmaction.core.ActivityNetLocalization', 'ActivityNetLocalization', (['gt_path', 'result_path'], {}), '(gt_path, result_path)\n', (2174, 2196), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((2246, 2366), 'numpy.array', 'np.array', (['[0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222, 0.52083333,\n 0.52083333, 0.52083333, 0.5]'], {}), '([0.71428571, 0.71428571, 0.71428571, 0.6875, 0.6875, 0.59722222, \n 0.52083333, 0.52083333, 0.52083333, 0.5])\n', (2254, 2366), True, 'import numpy as np\n'), ((2426, 2468), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['results[0]', 'mAP'], {}), '(results[0], mAP)\n', (2451, 2468), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((2473, 2523), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['results[1]', 'average_mAP'], {}), '(results[1], average_mAP)\n', (2498, 2523), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((2666, 2697), 'os.path.join', 'osp.join', (['data_prefix', '"""gt.csv"""'], {}), "(data_prefix, 'gt.csv')\n", (2674, 2697), True, 'import os.path as osp\n'), ((2716, 2749), 'os.path.join', 'osp.join', (['data_prefix', '"""pred.csv"""'], {}), "(data_prefix, 'pred.csv')\n", (2724, 2749), True, 'import os.path as osp\n'), ((2766, 2806), 'os.path.join', 'osp.join', (['data_prefix', '"""action_list.txt"""'], {}), "(data_prefix, 'action_list.txt')\n", (2774, 2806), True, 'import os.path as osp\n'), ((2840, 2894), 'mmaction.core.evaluation.ava_utils.ava_eval', 'ava_eval', (['result_path', '"""mAP"""', 'label_map', 'gt_path', 'None'], {}), "(result_path, 'mAP', label_map, gt_path, None)\n", (2848, 2894), False, 'from mmaction.core.evaluation.ava_utils import ava_eval\n'), ((2899, 2961), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (["detection['mAP@0.5IOU']", '(0.09385522)'], {}), "(detection['mAP@0.5IOU'], 0.09385522)\n", (2924, 2961), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((3110, 3157), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(100)', 'dtype': 'np.int64'}), '(10, size=100, dtype=np.int64)\n', (3127, 3157), True, 'import numpy as np\n'), ((4499, 4539), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top1_labels_0', 'k'], {}), '(scores, top1_labels_0, k)\n', (4513, 4539), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((4572, 4613), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top1_labels_25', 'k'], {}), '(scores, top1_labels_25, k)\n', (4586, 4613), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((4649, 4690), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top1_labels_50', 'k'], {}), '(scores, top1_labels_50, k)\n', (4663, 4690), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((4725, 4766), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top1_labels_75', 'k'], {}), '(scores, top1_labels_75, k)\n', (4739, 4766), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((4802, 4844), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top1_labels_100', 'k'], {}), '(scores, top1_labels_100, k)\n', (4816, 4844), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((4994, 5038), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top2_labels_0_100', 'k'], {}), '(scores, top2_labels_0_100, k)\n', (5008, 5038), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((5076, 5120), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top2_labels_25_75', 'k'], {}), '(scores, top2_labels_25_75, k)\n', (5090, 5120), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((5336, 5382), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top5_labels_0_0_100', 'k'], {}), '(scores, top5_labels_0_0_100, k)\n', (5350, 5382), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((5423, 5470), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top5_labels_0_50_100', 'k'], {}), '(scores, top5_labels_0_50_100, k)\n', (5437, 5470), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((5513, 5561), 'mmaction.core.top_k_accuracy', 'top_k_accuracy', (['scores', 'top5_labels_25_75_100', 'k'], {}), '(scores, top5_labels_25_75_100, k)\n', (5527, 5561), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((5981, 6003), 'numpy.int64', 'np.int64', (['[1, 4, 0, 2]'], {}), '([1, 4, 0, 2])\n', (5989, 6003), True, 'import numpy as np\n'), ((6026, 6048), 'numpy.int64', 'np.int64', (['[2, 0, 4, 3]'], {}), '([2, 0, 4, 3])\n', (6034, 6048), True, 'import numpy as np\n'), ((6071, 6093), 'numpy.int64', 'np.int64', (['[2, 2, 2, 3]'], {}), '([2, 2, 2, 3])\n', (6079, 6093), True, 'import numpy as np\n'), ((6116, 6138), 'numpy.int64', 'np.int64', (['[4, 2, 2, 4]'], {}), '([4, 2, 2, 4])\n', (6124, 6138), True, 'import numpy as np\n'), ((6162, 6184), 'numpy.int64', 'np.int64', (['[2, 2, 2, 4]'], {}), '([2, 2, 2, 4])\n', (6170, 6184), True, 'import numpy as np\n'), ((6658, 6703), 'mmaction.core.mmit_mean_average_precision', 'mmit_mean_average_precision', (['y_scores', 'y_true'], {}), '(y_scores, y_true)\n', (6685, 6703), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((6926, 6938), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6934, 6938), True, 'import numpy as np\n'), ((6964, 6976), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6972, 6976), True, 'import numpy as np\n'), ((7127, 7153), 'numpy.array', 'np.array', (['[[1, 2], [2, 3]]'], {}), '([[1, 2], [2, 3]])\n', (7135, 7153), True, 'import numpy as np\n'), ((7179, 7207), 'numpy.array', 'np.array', (['[[2, 3], [2.5, 3]]'], {}), '([[2, 3], [2.5, 3]])\n', (7187, 7207), True, 'import numpy as np\n'), ((7227, 7285), 'mmaction.core.pairwise_temporal_iou', 'pairwise_temporal_iou', (['candidate_segments', 'target_segments'], {}), '(candidate_segments, target_segments)\n', (7248, 7285), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((7290, 7342), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['temporal_iou', '[[0, 0], [1, 0.5]]'], {}), '(temporal_iou, [[0, 0], [1, 0.5]])\n', (7308, 7342), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((7399, 7425), 'numpy.array', 'np.array', (['[[1, 2], [2, 3]]'], {}), '([[1, 2], [2, 3]])\n', (7407, 7425), True, 'import numpy as np\n'), ((7451, 7479), 'numpy.array', 'np.array', (['[[2, 3], [2.5, 3]]'], {}), '([[2, 3], [2.5, 3]])\n', (7459, 7479), True, 'import numpy as np\n'), ((7522, 7613), 'mmaction.core.pairwise_temporal_iou', 'pairwise_temporal_iou', (['candidate_segments', 'target_segments'], {'calculate_overlap_self': '(True)'}), '(candidate_segments, target_segments,\n calculate_overlap_self=True)\n', (7543, 7613), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((7623, 7682), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['temporal_overlap_self', '[[0, 0], [1, 1]]'], {}), '(temporal_overlap_self, [[0, 0], [1, 1]])\n', (7641, 7682), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((7769, 7795), 'numpy.array', 'np.array', (['[[1, 2], [2, 3]]'], {}), '([[1, 2], [2, 3]])\n', (7777, 7795), True, 'import numpy as np\n'), ((7821, 7839), 'numpy.array', 'np.array', (['[2.5, 3]'], {}), '([2.5, 3])\n', (7829, 7839), True, 'import numpy as np\n'), ((7882, 7973), 'mmaction.core.pairwise_temporal_iou', 'pairwise_temporal_iou', (['candidate_segments', 'target_segments'], {'calculate_overlap_self': '(True)'}), '(candidate_segments, target_segments,\n calculate_overlap_self=True)\n', (7903, 7973), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((7983, 8032), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['temporal_overlap_self', '[0, 1]'], {}), '(temporal_overlap_self, [0, 1])\n', (8001, 8032), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((8690, 8751), 'mmaction.core.average_recall_at_avg_proposals', 'average_recall_at_avg_proposals', (['ground_truth1', 'proposals1', '(4)'], {}), '(ground_truth1, proposals1, 4)\n', (8721, 8751), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((8757, 8823), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['recall', '([[0.0] * 49 + [0.5] * 50 + [1.0]] * 10)'], {}), '(recall, [[0.0] * 49 + [0.5] * 50 + [1.0]] * 10)\n', (8775, 8823), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((8826, 8889), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['avg_recall', '([0.0] * 49 + [0.5] * 50 + [1.0])'], {}), '(avg_recall, [0.0] * 49 + [0.5] * 50 + [1.0])\n', (8844, 8889), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((9074, 9135), 'mmaction.core.average_recall_at_avg_proposals', 'average_recall_at_avg_proposals', (['ground_truth1', 'proposals2', '(4)'], {}), '(ground_truth1, proposals2, 4)\n', (9105, 9135), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((9141, 9187), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['recall', '([[0.0] * 100] * 10)'], {}), '(recall, [[0.0] * 100] * 10)\n', (9159, 9187), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((9191, 9234), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['avg_recall', '([0.0] * 100)'], {}), '(avg_recall, [0.0] * 100)\n', (9209, 9234), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((9417, 9480), 'mmaction.core.average_recall_at_avg_proposals', 'average_recall_at_avg_proposals', (['ground_truth2', 'proposals3', '(100)'], {}), '(ground_truth2, proposals3, 100)\n', (9448, 9480), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((9486, 9532), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['recall', '([[1.0] * 100] * 10)'], {}), '(recall, [[1.0] * 100] * 10)\n', (9504, 9532), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((9536, 9579), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['avg_recall', '([1.0] * 100)'], {}), '(avg_recall, [1.0] * 100)\n', (9554, 9579), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((10288, 10322), 'mmaction.core.get_weighted_score', 'get_weighted_score', (['[score_a]', '[1]'], {}), '([score_a], [1])\n', (10306, 10322), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((10449, 10507), 'mmaction.core.get_weighted_score', 'get_weighted_score', (['[score_a, score_b]', '[coeff_a, coeff_b]'], {}), '([score_a, score_b], [coeff_a, coeff_b])\n', (10467, 10507), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((11050, 11116), 'numpy.array', 'np.array', (['[[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]]'], {}), '([[1, 1, 0, 0], [1, 0, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1]])\n', (11058, 11116), True, 'import numpy as np\n'), ((11150, 11216), 'numpy.array', 'np.array', (['[[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]]'], {}), '([[0, 1, 0, 1], [0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1]])\n', (11158, 11216), True, 'import numpy as np\n'), ((11231, 11290), 'numpy.mean', 'np.mean', (['[0.5, 0.5833333333333333, 0.8055555555555556, 1.0]'], {}), '([0.5, 0.5833333333333333, 0.8055555555555556, 1.0])\n', (11238, 11290), True, 'import numpy as np\n'), ((1220, 1259), 'numpy.array', 'np.array', (['confusion_mat'], {'dtype': 'np.float'}), '(confusion_mat, dtype=np.float)\n', (1228, 1259), True, 'import numpy as np\n'), ((3228, 3279), 'mmaction.core.confusion_matrix', 'confusion_matrix', (['pred_labels', 'gt_labels', 'normalize'], {}), '(pred_labels, gt_labels, normalize)\n', (3244, 3279), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((3363, 3400), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['cf_mat', 'gt_cf_mat'], {}), '(cf_mat, gt_cf_mat)\n', (3381, 3400), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((3411, 3436), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3424, 3436), False, 'import pytest\n'), ((3507, 3546), 'mmaction.core.confusion_matrix', 'confusion_matrix', (['[1]', '[1]', '"""unsupport"""'], {}), "([1], [1], 'unsupport')\n", (3523, 3546), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((3557, 3581), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3570, 3581), False, 'import pytest\n'), ((3635, 3661), 'mmaction.core.confusion_matrix', 'confusion_matrix', (['(0.5)', '[1]'], {}), '(0.5, [1])\n', (3651, 3661), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((3672, 3696), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3685, 3696), False, 'import pytest\n'), ((3750, 3776), 'mmaction.core.confusion_matrix', 'confusion_matrix', (['[1]', '(0.5)'], {}), '([1], 0.5)\n', (3766, 3776), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((3787, 3811), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3800, 3811), False, 'import pytest\n'), ((3861, 3889), 'mmaction.core.confusion_matrix', 'confusion_matrix', (['[0.5]', '[1]'], {}), '([0.5], [1])\n', (3877, 3889), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((3900, 3924), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3913, 3924), False, 'import pytest\n'), ((3974, 4002), 'mmaction.core.confusion_matrix', 'confusion_matrix', (['[1]', '[0.5]'], {}), '([1], [0.5])\n', (3990, 4002), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((4045, 4098), 'numpy.array', 'np.array', (['[-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]'], {}), '([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])\n', (4053, 4098), True, 'import numpy as np\n'), ((4108, 4159), 'numpy.array', 'np.array', (['[-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]'], {}), '([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395])\n', (4116, 4159), True, 'import numpy as np\n'), ((4169, 4221), 'numpy.array', 'np.array', (['[0.0365, 0.5158, 1.1067, -0.9276, -0.2124]'], {}), '([0.0365, 0.5158, 1.1067, -0.9276, -0.2124])\n', (4177, 4221), True, 'import numpy as np\n'), ((4231, 4282), 'numpy.array', 'np.array', (['[0.6232, 0.9912, -0.8562, 0.0148, 1.6413]'], {}), '([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])\n', (4239, 4282), True, 'import numpy as np\n'), ((5655, 5708), 'numpy.array', 'np.array', (['[-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]'], {}), '([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])\n', (5663, 5708), True, 'import numpy as np\n'), ((5718, 5769), 'numpy.array', 'np.array', (['[-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]'], {}), '([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395])\n', (5726, 5769), True, 'import numpy as np\n'), ((5779, 5831), 'numpy.array', 'np.array', (['[0.0365, 0.5158, 1.1067, -0.9276, -0.2124]'], {}), '([0.0365, 0.5158, 1.1067, -0.9276, -0.2124])\n', (5787, 5831), True, 'import numpy as np\n'), ((5841, 5892), 'numpy.array', 'np.array', (['[0.6232, 0.9912, -0.8562, 0.0148, 1.6413]'], {}), '([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])\n', (5849, 5892), True, 'import numpy as np\n'), ((6196, 6239), 'mmaction.core.mean_class_accuracy', 'mean_class_accuracy', (['scores', 'mean_cls_acc_0'], {}), '(scores, mean_cls_acc_0)\n', (6215, 6239), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((6256, 6300), 'mmaction.core.mean_class_accuracy', 'mean_class_accuracy', (['scores', 'mean_cls_acc_25'], {}), '(scores, mean_cls_acc_25)\n', (6275, 6300), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((6320, 6364), 'mmaction.core.mean_class_accuracy', 'mean_class_accuracy', (['scores', 'mean_cls_acc_33'], {}), '(scores, mean_cls_acc_33)\n', (6339, 6364), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((6385, 6429), 'mmaction.core.mean_class_accuracy', 'mean_class_accuracy', (['scores', 'mean_cls_acc_75'], {}), '(scores, mean_cls_acc_75)\n', (6404, 6429), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((6449, 6494), 'mmaction.core.mean_class_accuracy', 'mean_class_accuracy', (['scores', 'mean_cls_acc_100'], {}), '(scores, mean_cls_acc_100)\n', (6468, 6494), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((6575, 6597), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (6583, 6597), True, 'import numpy as np\n'), ((6615, 6646), 'numpy.array', 'np.array', (['[0.1, 0.4, 0.35, 0.8]'], {}), '([0.1, 0.4, 0.35, 0.8])\n', (6623, 6646), True, 'import numpy as np\n'), ((6986, 7011), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6999, 7011), False, 'import pytest\n'), ((7021, 7079), 'mmaction.core.pairwise_temporal_iou', 'pairwise_temporal_iou', (['target_segments', 'candidate_segments'], {}), '(target_segments, candidate_segments)\n', (7042, 7079), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((8120, 8146), 'numpy.array', 'np.array', (['[[0, 1], [1, 2]]'], {}), '([[0, 1], [1, 2]])\n', (8128, 8146), True, 'import numpy as np\n'), ((8167, 8193), 'numpy.array', 'np.array', (['[[0, 1], [1, 2]]'], {}), '([[0, 1], [1, 2]])\n', (8175, 8193), True, 'import numpy as np\n'), ((8232, 8250), 'numpy.array', 'np.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (8240, 8250), True, 'import numpy as np\n'), ((8290, 8322), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 2, 1]]'], {}), '([[0, 1, 1], [1, 2, 1]])\n', (8298, 8322), True, 'import numpy as np\n'), ((8343, 8375), 'numpy.array', 'np.array', (['[[0, 1, 1], [1, 2, 1]]'], {}), '([[0, 1, 1], [1, 2, 1]])\n', (8351, 8375), True, 'import numpy as np\n'), ((8420, 8460), 'numpy.array', 'np.array', (['[[10, 11, 0.6], [11, 12, 0.4]]'], {}), '([[10, 11, 0.6], [11, 12, 0.4]])\n', (8428, 8460), True, 'import numpy as np\n'), ((8481, 8521), 'numpy.array', 'np.array', (['[[10, 11, 0.6], [11, 12, 0.4]]'], {}), '([[10, 11, 0.6], [11, 12, 0.4]])\n', (8489, 8521), True, 'import numpy as np\n'), ((8948, 8975), 'numpy.arange', 'np.arange', (['(0.02)', '(2.02)', '(0.02)'], {}), '(0.02, 2.02, 0.02)\n', (8957, 8975), True, 'import numpy as np\n'), ((9294, 9321), 'numpy.arange', 'np.arange', (['(0.02)', '(2.02)', '(0.02)'], {}), '(0.02, 2.02, 0.02)\n', (9303, 9321), True, 'import numpy as np\n'), ((9641, 9661), 'numpy.arange', 'np.arange', (['(1)', '(101)', '(1)'], {}), '(1, 101, 1)\n', (9650, 9661), True, 'import numpy as np\n'), ((9755, 9808), 'numpy.array', 'np.array', (['[-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]'], {}), '([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])\n', (9763, 9808), True, 'import numpy as np\n'), ((9818, 9869), 'numpy.array', 'np.array', (['[-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]'], {}), '([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395])\n', (9826, 9869), True, 'import numpy as np\n'), ((9879, 9931), 'numpy.array', 'np.array', (['[0.0365, 0.5158, 1.1067, -0.9276, -0.2124]'], {}), '([0.0365, 0.5158, 1.1067, -0.9276, -0.2124])\n', (9887, 9931), True, 'import numpy as np\n'), ((9941, 9992), 'numpy.array', 'np.array', (['[0.6232, 0.9912, -0.8562, 0.0148, 1.6413]'], {}), '([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])\n', (9949, 9992), True, 'import numpy as np\n'), ((10023, 10074), 'numpy.array', 'np.array', (['[-0.0413, 0.6366, 1.1155, 0.3484, 0.0395]'], {}), '([-0.0413, 0.6366, 1.1155, 0.3484, 0.0395])\n', (10031, 10074), True, 'import numpy as np\n'), ((10084, 10136), 'numpy.array', 'np.array', (['[0.0365, 0.5158, 1.1067, -0.9276, -0.2124]'], {}), '([0.0365, 0.5158, 1.1067, -0.9276, -0.2124])\n', (10092, 10136), True, 'import numpy as np\n'), ((10146, 10197), 'numpy.array', 'np.array', (['[0.6232, 0.9912, -0.8562, 0.0148, 1.6413]'], {}), '([0.6232, 0.9912, -0.8562, 0.0148, 1.6413])\n', (10154, 10197), True, 'import numpy as np\n'), ((10207, 10260), 'numpy.array', 'np.array', (['[-0.2203, -0.7538, 1.8789, 0.4451, -0.2526]'], {}), '([-0.2203, -0.7538, 1.8789, 0.4451, -0.2526])\n', (10215, 10260), True, 'import numpy as np\n'), ((10788, 10826), 'mmaction.core.mean_average_precision', 'mean_average_precision', (['scores', 'labels'], {}), '(scores, labels)\n', (10810, 10826), False, 'from mmaction.core import ActivityNetLocalization, average_recall_at_avg_proposals, confusion_matrix, get_weighted_score, mean_average_precision, mean_class_accuracy, mmit_mean_average_precision, pairwise_temporal_iou, top_k_accuracy\n'), ((10879, 10909), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4]'], {}), '([0.1, 0.2, 0.3, 0.4])\n', (10887, 10909), True, 'import numpy as np\n'), ((10919, 10949), 'numpy.array', 'np.array', (['[0.2, 0.3, 0.4, 0.1]'], {}), '([0.2, 0.3, 0.4, 0.1])\n', (10927, 10949), True, 'import numpy as np\n'), ((10959, 10989), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.1, 0.2]'], {}), '([0.3, 0.4, 0.1, 0.2])\n', (10967, 10989), True, 'import numpy as np\n'), ((10999, 11029), 'numpy.array', 'np.array', (['[0.4, 0.1, 0.2, 0.3]'], {}), '([0.4, 0.1, 0.2, 0.3])\n', (11007, 11029), True, 'import numpy as np\n'), ((1362, 1399), 'numpy.sum', 'np.sum', (['confusion_mat[i]'], {'dtype': 'float'}), '(confusion_mat[i], dtype=float)\n', (1368, 1399), True, 'import numpy as np\n'), ((1976, 1997), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (1987, 1997), True, 'import os.path as osp\n'), ((2601, 2622), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (2612, 2622), True, 'import os.path as osp\n'), ((3049, 3069), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (3063, 3069), False, 'import random\n'), ((10352, 10369), 'numpy.array', 'np.array', (['score_a'], {}), '(score_a)\n', (10360, 10369), True, 'import numpy as np\n'), ((10371, 10395), 'numpy.array', 'np.array', (['weighted_score'], {}), '(weighted_score)\n', (10379, 10395), True, 'import numpy as np\n'), ((10632, 10654), 'numpy.array', 'np.array', (['ground_truth'], {}), '(ground_truth)\n', (10640, 10654), True, 'import numpy as np\n'), ((10656, 10680), 'numpy.array', 'np.array', (['weighted_score'], {}), '(weighted_score)\n', (10664, 10680), True, 'import numpy as np\n'), ((1790, 1811), 'numpy.sum', 'np.sum', (['confusion_mat'], {}), '(confusion_mat)\n', (1796, 1811), True, 'import numpy as np\n'), ((6799, 6814), 'numpy.diff', 'np.diff', (['recall'], {}), '(recall)\n', (6806, 6814), True, 'import numpy as np\n'), ((6817, 6836), 'numpy.array', 'np.array', (['precision'], {}), '(precision)\n', (6825, 6836), True, 'import numpy as np\n')] |
import numpy as np
import sys
import os
import pytest
from knnFeat import _distance
sys.path.append(os.getcwd())
@pytest.mark.success
def test_distance():
a = np.array([0, 0])
b = np.array([3, 4])
expected = _distance(a, b)
actual = 5
assert expected == actual
| [
"numpy.array",
"knnFeat._distance",
"os.getcwd"
] | [((100, 111), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (109, 111), False, 'import os\n'), ((165, 181), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (173, 181), True, 'import numpy as np\n'), ((190, 206), 'numpy.array', 'np.array', (['[3, 4]'], {}), '([3, 4])\n', (198, 206), True, 'import numpy as np\n'), ((222, 237), 'knnFeat._distance', '_distance', (['a', 'b'], {}), '(a, b)\n', (231, 237), False, 'from knnFeat import _distance\n')] |
import numpy as np
import os
import pdb
import sys
import time
import pickle
from create_pair_set import create
from mediator import train_mediator, test_mediator
def get_hist(cmt):
cmt = [idx for c in cmt for idx in c]
hist = {}
for i in cmt:
if i == -1:
continue
if i in hist.keys():
hist[i] += 1
else:
hist[i] = 1
return hist
def sample_task(i, base, committee, vote_num=7, th=0.7):
pairs = []
scores = []
hist = get_hist([c[i][0] for c in committee])
knn = base[i][0]
simi = 1.0 - base[i][1]
for j, k in enumerate(knn):
if k != -1 and k in hist.keys() and hist[k] >= vote_num and k != i and simi[j] > th:
pairs.append(sorted([i, k]))
scores.append(simi[j])
return pairs, scores
def helper(args):
sample_task(*args)
def sample_parallel(base, committee, vote_num=7, th=0.7):
import multiprocessing
import tqdm
pool = multiprocessing.Pool(16)
if len(committee) > 0:
res = list(tqdm.tqdm(pool.imap(helper, zip(range(len(base)), [base]*len(base), [committee]*len(base), [vote_num]*len(base), [th]*len(base))), total=len(base)))
pairs, scores = zip(*res)
pairs = np.array(pairs)
scores = np.array(scores)
pairs, unique_idx = np.unique(pairs, return_index=True, axis=0)
scores = scores[unique_idx]
return pairs, scores
def sample(base, committee, vote_num=7, th=0.7):
pairs = []
scores = []
if len(committee) > 0:
for i in range(len(base)):
hist = get_hist([c[i][0] for c in committee])
knn = base[i][0]
simi = 1.0 - base[i][1]
for j, k in enumerate(knn):
if k != -1 and k in hist.keys() and hist[k] >= vote_num and k != i and simi[j] > th:
pairs.append(sorted([i, k]))
scores.append(simi[j])
else:
for i in range(len(base)):
knn = base[i][0]
simi = 1.0 - base[i][1]
for j,k in enumerate(knn):
if k != -1 and k != i and simi[j] > th:
pairs.append(sorted([i, k]))
scores.append(simi[j])
pairs = np.array(pairs)
scores = np.array(scores)
pairs, unique_idx = np.unique(pairs, return_index=True, axis=0)
scores = scores[unique_idx]
return pairs, scores
def cdp(args):
exp_root = './experiment/' + args.data_name
with open("../data/{}/{}.txt".format(args.data_name, args.data_name), 'r') as f:
fns = f.readlines()
args.total_num = len(fns)
if args.strategy == "vote":
output_cdp = '{}/output/{}_accept{}_th{}'.format(exp_root, args.strategy, args.vote['accept_num'], args.vote['threshold'])
elif args.strategy == "mediator":
output_cdp = '{}/output/{}_th{}'.format(exp_root, args.strategy, args.mediator['threshold'])
else:
raise Exception('No such strategy: {}'.format(args.strategy))
# output_sub = '{}/sz{}_step{}'.format(output_cdp, args.propagation['max_sz'], args.propagation['step'])
# print('Output folder: {}'.format(output_sub))
# outcdp = output_sub + '/cdp.pkl'
# outpred = output_sub + '/pred.npy'
# outlist = '{}/list.txt'.format(output_sub)
# outmeta = '{}/meta.txt'.format(output_sub)
# if not os.path.isdir(output_sub):
# os.makedirs(output_sub)
# pair selection
if args.strategy == 'vote':
pairs, scores = vote(output_cdp, args)
else:
if args.mediator['phase'] == 'train':
if not os.path.isfile(output_cdp + "/mediator_input.npy") or not os.path.isfile(output_cdp + "/pair_label.npy"):
create(exp_root + "/output", args)
train_mediator(args)
return
else:
pairs, scores = mediator(exp_root + "/output", args)
print("pair num: {}".format(len(pairs)))
np.save(exp_root + "/output/selected_pairs_{}.npy".format(args.mediator['threshold']), pairs)
np.save(exp_root+ "/output/selected_pairs_scores_{}.npy".format(args.mediator['threshold']), scores)
def vote(output, args):
assert args.vote['accept_num'] <= len(args.committee)
base_knn_fn = 'data/{}/knn/{}.pkl'.format(args.data_name, args.base)
committee_knn_fn = ['data/{}/knn/{}.pkl'.format(args.data_name, cmt) for cmt in args.committee]
if not os.path.isfile(output + '/vote_pairs.npy'):
print('Extracting pairs by voting ...')
with open(base_knn_fn, 'rb') as f:
knn_base = pickle.load(f)
knn_committee = []
for i,cfn in enumerate(committee_knn_fn):
with open(cfn, 'rb') as f:
knn_cmt = pickle.load(f)
knn_committee.append(knn_cmt)
pairs, scores = sample(knn_base, knn_committee, vote_num=args.vote['accept_num'], th=args.vote['threshold'])
np.save(output + '/vote_pairs.npy', pairs)
np.save(output + '/vote_scores.npy', scores)
else:
print('Loading pairs by voting ...')
pairs = np.load(output + '/vote_pairs.npy')
scores = np.load(output + '/vote_scores.npy')
return pairs, scores
def mediator(output, args):
if not os.path.isfile(output + "/mediator_input.npy"):
create(output, args)
if not os.path.isfile(output + "/pairs_pred.npy"):
test_mediator(args, output + "/pairs_pred.npy")
raw_pairs = np.load(output + "/pairs.npy")
pair_pred = np.load(output + "/pairs_pred.npy")
sel = np.where(pair_pred > args.mediator['threshold'])[0]
pairs = raw_pairs[sel, :]
scores = pair_pred[sel]
return pairs, scores
def groundtruth(args):
raw_pairs = np.load(args.groundtruth['pair_file'])
pair_gt = np.load(args.groundtruth['pair_gt_fn']).astype(np.int)
pairs = raw_pairs[np.where(pair_gt == 1)[0], :]
pairs = pairs[np.where(pairs[:,0] != pairs[:,1])]
pairs = np.unique(np.sort(pairs, axis=1), axis=0)
scores = np.ones((pairs.shape[0]), dtype=np.float32)
return pairs, scores
def evaluate_cluster(label, pred):
prec, recall, fmi = eval_cluster.fowlkes_mallows_score(label, pred)
print('prec: {}, recall: {}, fmi: {}'.format(prec, recall, fmi)) | [
"numpy.ones",
"numpy.unique",
"numpy.where",
"numpy.sort",
"pickle.load",
"os.path.isfile",
"numpy.array",
"mediator.test_mediator",
"multiprocessing.Pool",
"create_pair_set.create",
"mediator.train_mediator",
"numpy.load",
"numpy.save"
] | [((979, 1003), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(16)'], {}), '(16)\n', (999, 1003), False, 'import multiprocessing\n'), ((1241, 1256), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (1249, 1256), True, 'import numpy as np\n'), ((1270, 1286), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1278, 1286), True, 'import numpy as np\n'), ((1311, 1354), 'numpy.unique', 'np.unique', (['pairs'], {'return_index': '(True)', 'axis': '(0)'}), '(pairs, return_index=True, axis=0)\n', (1320, 1354), True, 'import numpy as np\n'), ((2220, 2235), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (2228, 2235), True, 'import numpy as np\n'), ((2249, 2265), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2257, 2265), True, 'import numpy as np\n'), ((2290, 2333), 'numpy.unique', 'np.unique', (['pairs'], {'return_index': '(True)', 'axis': '(0)'}), '(pairs, return_index=True, axis=0)\n', (2299, 2333), True, 'import numpy as np\n'), ((5403, 5433), 'numpy.load', 'np.load', (["(output + '/pairs.npy')"], {}), "(output + '/pairs.npy')\n", (5410, 5433), True, 'import numpy as np\n'), ((5450, 5485), 'numpy.load', 'np.load', (["(output + '/pairs_pred.npy')"], {}), "(output + '/pairs_pred.npy')\n", (5457, 5485), True, 'import numpy as np\n'), ((5671, 5709), 'numpy.load', 'np.load', (["args.groundtruth['pair_file']"], {}), "(args.groundtruth['pair_file'])\n", (5678, 5709), True, 'import numpy as np\n'), ((5952, 5993), 'numpy.ones', 'np.ones', (['pairs.shape[0]'], {'dtype': 'np.float32'}), '(pairs.shape[0], dtype=np.float32)\n', (5959, 5993), True, 'import numpy as np\n'), ((4376, 4418), 'os.path.isfile', 'os.path.isfile', (["(output + '/vote_pairs.npy')"], {}), "(output + '/vote_pairs.npy')\n", (4390, 4418), False, 'import os\n'), ((4877, 4919), 'numpy.save', 'np.save', (["(output + '/vote_pairs.npy')", 'pairs'], {}), "(output + '/vote_pairs.npy', pairs)\n", (4884, 4919), True, 'import numpy as np\n'), ((4928, 4972), 'numpy.save', 'np.save', (["(output + '/vote_scores.npy')", 'scores'], {}), "(output + '/vote_scores.npy', scores)\n", (4935, 4972), True, 'import numpy as np\n'), ((5044, 5079), 'numpy.load', 'np.load', (["(output + '/vote_pairs.npy')"], {}), "(output + '/vote_pairs.npy')\n", (5051, 5079), True, 'import numpy as np\n'), ((5097, 5133), 'numpy.load', 'np.load', (["(output + '/vote_scores.npy')"], {}), "(output + '/vote_scores.npy')\n", (5104, 5133), True, 'import numpy as np\n'), ((5199, 5245), 'os.path.isfile', 'os.path.isfile', (["(output + '/mediator_input.npy')"], {}), "(output + '/mediator_input.npy')\n", (5213, 5245), False, 'import os\n'), ((5255, 5275), 'create_pair_set.create', 'create', (['output', 'args'], {}), '(output, args)\n', (5261, 5275), False, 'from create_pair_set import create\n'), ((5287, 5329), 'os.path.isfile', 'os.path.isfile', (["(output + '/pairs_pred.npy')"], {}), "(output + '/pairs_pred.npy')\n", (5301, 5329), False, 'import os\n'), ((5339, 5386), 'mediator.test_mediator', 'test_mediator', (['args', "(output + '/pairs_pred.npy')"], {}), "(args, output + '/pairs_pred.npy')\n", (5352, 5386), False, 'from mediator import train_mediator, test_mediator\n'), ((5496, 5544), 'numpy.where', 'np.where', (["(pair_pred > args.mediator['threshold'])"], {}), "(pair_pred > args.mediator['threshold'])\n", (5504, 5544), True, 'import numpy as np\n'), ((5849, 5885), 'numpy.where', 'np.where', (['(pairs[:, 0] != pairs[:, 1])'], {}), '(pairs[:, 0] != pairs[:, 1])\n', (5857, 5885), True, 'import numpy as np\n'), ((5907, 5929), 'numpy.sort', 'np.sort', (['pairs'], {'axis': '(1)'}), '(pairs, axis=1)\n', (5914, 5929), True, 'import numpy as np\n'), ((3741, 3761), 'mediator.train_mediator', 'train_mediator', (['args'], {}), '(args)\n', (3755, 3761), False, 'from mediator import train_mediator, test_mediator\n'), ((4534, 4548), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4545, 4548), False, 'import pickle\n'), ((5724, 5763), 'numpy.load', 'np.load', (["args.groundtruth['pair_gt_fn']"], {}), "(args.groundtruth['pair_gt_fn'])\n", (5731, 5763), True, 'import numpy as np\n'), ((3694, 3728), 'create_pair_set.create', 'create', (["(exp_root + '/output')", 'args'], {}), "(exp_root + '/output', args)\n", (3700, 3728), False, 'from create_pair_set import create\n'), ((4691, 4705), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4702, 4705), False, 'import pickle\n'), ((5801, 5823), 'numpy.where', 'np.where', (['(pair_gt == 1)'], {}), '(pair_gt == 1)\n', (5809, 5823), True, 'import numpy as np\n'), ((3572, 3622), 'os.path.isfile', 'os.path.isfile', (["(output_cdp + '/mediator_input.npy')"], {}), "(output_cdp + '/mediator_input.npy')\n", (3586, 3622), False, 'import os\n'), ((3630, 3676), 'os.path.isfile', 'os.path.isfile', (["(output_cdp + '/pair_label.npy')"], {}), "(output_cdp + '/pair_label.npy')\n", (3644, 3676), False, 'import os\n')] |
from scipy import stats
from scipy import optimize
from scipy.special import logsumexp
from scipy.cluster import hierarchy
import scipy
import warnings
import numpy as np
import sys
import collections
import operator
import itertools
from time import perf_counter
from datetime import timedelta
from enum import Enum
from . import common
from . import variants as variants_
from . import cn_tools
from . import cn_hmm
from . import itree
from .genome import Interval
from .. import __pkg_name__, __version__
_BETA_DISTR_A = 5.0
_BETA_DISTR_B = 1.0
def _get_f_prior(f_values):
EPSILON = 1e-6
max_f_value = np.max(f_values)
beta_cdfs = stats.beta.logcdf((max_f_value, max_f_value - EPSILON), _BETA_DISTR_A, _BETA_DISTR_B)
return logsumexp(beta_cdfs, b=(1, -1))
def _e_step(psv_infos, psv_f_values, genotype_priors, n_samples):
"""
Returns
- total ln likelihood,
- matrix of P(sample_genotype | allele_counts, psvs), size = (n_samples, n_sample_genotypes).
"""
n_psvs = len(psv_infos)
n_sample_genotypes = len(genotype_priors)
prob_matrix = np.zeros((n_samples, n_sample_genotypes))
usable_samples = np.zeros(n_samples, dtype=np.bool)
total_f_prior = 0
for psv_info in psv_infos:
if not psv_info.in_em:
continue
sample_ids = psv_info.sample_ids
usable_samples[sample_ids] = True
precomp_data = psv_info.precomp_data_ref_cn
psv_gt_probs = psv_info.em_psv_gt_probs
exponent = psv_info.info_content
psv_ix = psv_info.psv_ix
f_values = psv_f_values[psv_ix]
total_f_prior += _get_f_prior(f_values)
f_pow_combs = precomp_data.f_power_combinations(f_values)
all_psv_gt_coefs = precomp_data.eval_poly_matrices(f_pow_combs)
for sample_gt_ix, psv_gt_coefs in enumerate(all_psv_gt_coefs):
curr_probs = exponent * logsumexp(psv_gt_coefs + psv_gt_probs, axis=1)
prob_matrix[sample_ids, sample_gt_ix] += curr_probs
with np.errstate(invalid='ignore'):
prob_matrix += genotype_priors[np.newaxis, :]
prob_matrix[~usable_samples] = np.nan
row_sums = logsumexp(prob_matrix, axis=1)
total_lik = np.sum(row_sums[usable_samples]) + total_f_prior
prob_matrix -= row_sums[:, np.newaxis]
return total_lik, prob_matrix
def _minus_lik_fn(gt_probs_regular, precomp_data, psv_gt_probs):
n_samples, n_genotypes = gt_probs_regular.shape
def fn(f_values):
beta_prior = -_get_f_prior(f_values)
if np.isinf(beta_prior):
return beta_prior
inner_term = np.full((n_samples, n_genotypes), np.nan)
f_pow_combs = precomp_data.f_power_combinations(f_values)
all_psv_gt_coefs = precomp_data.eval_poly_matrices(f_pow_combs)
for sample_gt_ix, psv_gt_coefs in enumerate(all_psv_gt_coefs):
inner_term[:, sample_gt_ix] = logsumexp(psv_gt_coefs + psv_gt_probs, axis=1)
return -np.sum(inner_term * gt_probs_regular) + beta_prior
return fn
def _m_step(sample_gt_probs, psv_infos, psv_ixs, prev_psv_f_values):
"""
Returns
- matrix with f-values (n_psvs x n_copies),
- total_likelihood.
"""
n_psvs = len(psv_infos)
n_samples, n_genotypes = sample_gt_probs.shape
n_copies = prev_psv_f_values.shape[1]
psv_f_values = np.full_like(prev_psv_f_values, np.nan)
OPTS = dict(maxiter=50)
METHOD = 'L-BFGS-B'
# If we use (0, 1) here, the optimization will not work properly because of the inifinite prior at the boundaries.
bounds = ((1e-6, 1 - 1e-6),) * n_copies
sample_gt_probs_regular = np.exp(sample_gt_probs)
total_lik = 0
for psv_ix in psv_ixs:
psv_info = psv_infos[psv_ix]
sample_ids = psv_info.sample_ids
assert len(sample_ids) > 0
sample_gt_probs_subset = sample_gt_probs_regular[sample_ids]
precomp_data = psv_info.precomp_data_ref_cn
psv_gt_probs = psv_info.em_psv_gt_probs
prev_f_values = prev_psv_f_values[psv_ix]
minus_lik = _minus_lik_fn(sample_gt_probs_subset, precomp_data, psv_gt_probs)
sol = optimize.minimize(minus_lik, x0=prev_f_values, bounds=bounds, options=OPTS, method=METHOD)
total_lik -= sol.fun
psv_f_values[psv_ix] = sol.x
return psv_f_values, total_lik
def _match_psv_alleles(psv, regions2, genome):
pos2 = psv.info['pos2']
if len(pos2) == 1:
return (0, 1)
res = [None] * (len(regions2) + 1)
res[0] = 0
for entry in pos2:
chrom, pos, strand, allele = entry.split(':')
chrom_id = genome.chrom_id(chrom)
pos = int(pos) - 1
strand = strand == '+'
allele = int(allele)
if not strand:
pos += len(psv.alleles[allele]) - 1
for i, (region2, strand2) in enumerate(regions2):
if res[i + 1] is None and strand == strand2 and region2.contains_point(chrom_id, pos):
res[i + 1] = allele
break
else:
s = 'Cannot match PSV alleles:\n'
for i, entry2 in enumerate(pos2, start=1):
s += ' {} {}{}\n'.format(i, entry2, ' !!!' if entry == entry2 else '')
s += 'With regions:\n '
s += cn_tools.regions2_str(regions2, genome, use_comma=True, sep='\n ')
common.log(s)
raise RuntimeError('Cannot match PSV alleles')
return tuple(res)
def _select_psv_sample_pairs(region_group_extra, samples, outp, min_samples):
group = region_group_extra.region_group
psv_infos = region_group_extra.psv_infos
sample_reliable_regions = region_group_extra.sample_reliable_regions
group_name = group.name
n_psvs = len(psv_infos)
n_samples = len(samples)
outp.write('# Group {}. Sample regions with agCN = refCN = {}:\n'.format(group_name, group.cn))
reliable_regions = [None] * n_samples
n_reliable = 0
for sample_id, sample in enumerate(samples):
sample_region = sample_reliable_regions[sample_id]
if sample_region is None:
continue
outp.write('# {}: {:,}-{:,}\n'.format(sample, sample_region.start + 1, sample_region.end))
n_reliable += 1
if n_reliable < min_samples:
outp.write('# Too few samples ({} < {}).\n'.format(n_reliable, min_samples))
return
for psv_ix, psv_info in enumerate(psv_infos):
info_str = ''
use_samples = np.zeros(n_samples, dtype=np.bool)
for sample_id, sample_region in enumerate(sample_reliable_regions):
good_obs = psv_info.psv_gt_probs[sample_id] is not None
is_ref = sample_region is not None \
and sample_region.start <= psv_info.start and psv_info.end <= sample_region.end
info_str += '\t{}{}'.format('+' if good_obs else '-', '+' if is_ref else '-')
use_samples[sample_id] = good_obs and is_ref
psv_info.set_use_samples(use_samples)
psv_info.in_em = psv_info.n_used_samples >= min_samples
outp.write('{}\t{}:{}\t{}{}\n'.format(group_name, psv_info.chrom, psv_info.start + 1,
psv_info.n_used_samples, info_str))
class _PsvInfo:
def __init__(self, psv_ix, psv, region_group, n_samples, genome):
self.psv_ix = psv_ix
self.psv = psv
self.chrom = psv.chrom
self.start = psv.start
self.end = psv.start + len(psv.ref)
self.is_indel = len(set(map(len, psv.alleles))) != 1
self.allele_corresp = _match_psv_alleles(psv, region_group.regions2, genome)
self.ref_cn = region_group.cn
self.info_content = np.nan
self.n_used_samples = 0
self.in_em = False
self.sample_ids = None
# keys: agCN values.
self.precomp_datas = {}
self.sample_cns = [None] * n_samples
self.psv_gt_probs = [None] * n_samples
self.em_psv_gt_probs = None
self.precomp_data_ref_cn = None
self.support_matrix = None
def set_use_samples(self, use_samples):
self.sample_ids = np.where(use_samples)[0]
self.n_used_samples = len(self.sample_ids)
def distance(self, other):
if other.psv_ix < self.psv_ix:
return self.start - other.end
return other.start - self.end
def __str__(self):
return '{}:{}'.format(self.chrom, self.start + 1)
def str_ext(self):
return '{} ({} information content {:.4f})'.format(
self, 'indel,' if self.is_indel else 'snp, ', self.info_content)
def __lt__(self, other):
"""
Used in sort, so better PSV would have lower key.
"""
if not other.in_em:
return True
if not self.in_em:
return False
if (self.info_content < 0.9 or other.info_content < 0.9) \
and (abs(self.info_content - other.info_content) >= 0.01):
return self.info_content > other.info_content
if self.is_indel != other.is_indel:
return other.is_indel
return self.start < other.start
def check_complicated_pos(self, region, region_seq, homopolymer_len=5, distance_to_edge=10):
if self.start - region.start < distance_to_edge or region.end - self.end < distance_to_edge:
self.in_em = False
return
if self.end == region.end:
return
after_psv = region_seq[self.end - region.start : self.end + homopolymer_len - region.start]
if len(set(after_psv)) == 1:
self.in_em = False
def create_em_psv_gt_probs(self):
if not self.n_used_samples:
return
self.precomp_data_ref_cn = self.precomp_datas[self.ref_cn]
self.em_psv_gt_probs = np.zeros((self.n_used_samples, self.precomp_data_ref_cn.n_psv_genotypes))
for i, sample_id in enumerate(self.sample_ids):
assert self.psv_gt_probs[sample_id] is not None
self.em_psv_gt_probs[i] = self.psv_gt_probs[sample_id]
def _calculate_psv_info_content(group_name, psv_infos, min_samples, outp):
outp.write('Region group {}\n'.format(group_name))
for psv_info in psv_infos:
if not psv_info.n_used_samples:
outp.write('{} no applicable samples, skipping.\n'.format(psv_info))
continue
precomp_data = psv_info.precomp_data_ref_cn
psv_gt_probs = psv_info.em_psv_gt_probs
n_alleles = precomp_data.n_alleles
gt_mult = np.fromiter((n_alleles - gt.count(0) - 1 for gt in precomp_data.psv_genotypes),
np.int16, len(precomp_data.psv_genotypes))
sum_info_content = 0.0
inform_samples = 0
for i in range(psv_info.n_used_samples):
curr_info_content = np.sum(np.exp(psv_gt_probs[i]) * gt_mult)
sum_info_content += curr_info_content
if curr_info_content >= 0.8:
inform_samples += 1
outp.write('{} informative: {}/{} samples. '.format(psv_info, inform_samples, psv_info.n_used_samples))
psv_info.info_content = sum_info_content / psv_info.n_used_samples / (n_alleles - 1)
outp.write('Information content: {:.4f}\n'.format(psv_info.info_content))
if psv_info.info_content < 1e-6:
psv_info.in_em = False
def _filter_close_psvs(psv_infos, outp, close_psv_dist):
outp.write('\nFiltering closeby PSVs\n')
n_psvs = len(psv_infos)
removed_a = sum(not psv_info.in_em for psv_info in psv_infos)
for psv_info in sorted(psv_infos):
psv_ix = psv_info.psv_ix
if not psv_info.in_em:
continue
for step in (-1, 1):
for j in itertools.count(psv_ix + step, step):
if j < 0 or j >= n_psvs:
break
oth_info = psv_infos[j]
dist = oth_info.distance(psv_info)
if dist > close_psv_dist:
break
if oth_info.in_em:
oth_info.in_em = False
outp.write('Removing PSV {} - close to PSV {}, distance {}\n'
.format(oth_info.str_ext(), psv_info.str_ext(), dist))
removed_b = sum(not psv_info.in_em for psv_info in psv_infos)
outp.write('Total {:5} PSVs\n'.format(n_psvs))
outp.write(' - {:5} uninformative PSVs\n'.format(removed_a))
outp.write(' - {:5} closeby PSVs\n'.format(removed_b - removed_a))
outp.write(' = {:5} retained PSVs\n'.format(n_psvs - removed_b))
def _define_sample_gt_priors(n_copies, sample_genotypes):
# Sample priors are distributed by distance to the reference genotypes.
# Minimal prior is for (4,0) or (6,0,0) ..., and it is 1e-6.
MIN_PRIOR = -6 * np.log(10)
ref_gt = np.full(n_copies, 2)
max_dist = 2 * (n_copies - 1)
priors = np.full(len(sample_genotypes), np.nan)
for i, gt in enumerate(sample_genotypes):
dist_to_ref = np.sum(np.abs(ref_gt - gt)) // 2
priors[i] = MIN_PRIOR * dist_to_ref / max_dist
priors -= logsumexp(priors)
return priors
def _cluster_psvs(psv_infos, psv_counts, n_samples):
n_psvs = len(psv_infos)
ref_fractions = np.full((n_psvs, n_samples), np.nan)
for psv_info in psv_infos:
allele_corresp = psv_info.allele_corresp
mult = len(allele_corresp) / allele_corresp.count(0)
for sample_id in psv_info.sample_ids:
allele_counts = psv_counts[psv_info.psv_ix][sample_id].allele_counts
ref_fractions[psv_info.psv_ix, sample_id] = allele_counts[0] / sum(allele_counts) * mult
cor_matrix = np.full((n_psvs, n_psvs), np.nan)
np.fill_diagonal(cor_matrix, 1.0)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=stats.PearsonRConstantInputWarning)
for psv_i in range(n_psvs):
for psv_j in range(psv_i + 1, n_psvs):
# Samples with present observations for both PSVs.
mask = ~np.logical_or(np.isnan(ref_fractions[psv_i]), np.isnan(ref_fractions[psv_j]))
if np.sum(mask) < 5:
continue
cor, _ = stats.pearsonr(ref_fractions[psv_i, mask], ref_fractions[psv_j, mask])
cor_matrix[psv_i, psv_j] = cor_matrix[psv_j, psv_i] = cor
dist_matrix = np.full((n_psvs, n_psvs), np.nan)
np.fill_diagonal(dist_matrix, 0)
use_psvs = np.ones(n_psvs, dtype=np.bool)
for psv_i in range(n_psvs):
if not use_psvs[psv_i]:
continue
for psv_j in range(psv_i + 1, n_psvs):
if not use_psvs[psv_j]:
continue
mask_i = ~np.isnan(cor_matrix[psv_i])
mask_j = ~np.isnan(cor_matrix[psv_j])
mask = np.logical_and(mask_i, mask_j)
mask_size = np.sum(mask)
if not mask_size:
if np.sum(mask_i) > np.sum(mask_j):
use_psvs[psv_j] = False
else:
use_psvs[psv_i] = False
break
else:
dist_matrix[psv_i, psv_j] = dist_matrix[psv_j, psv_i] = \
scipy.spatial.distance.pdist((cor_matrix[psv_i, mask], cor_matrix[psv_j, mask])) / mask_size
all_usable = np.array([psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em])
N_CLUSTERS = 2
MIN_CLUSTER_SIZE = 5
psv_ixs = np.where(use_psvs)[0]
if len(psv_ixs) < MIN_CLUSTER_SIZE * 2:
return [all_usable]
condensed_dist = scipy.spatial.distance.squareform(dist_matrix[psv_ixs[:, None], psv_ixs])
linkage = hierarchy.linkage(condensed_dist, method='complete')
clusters = hierarchy.fcluster(linkage, 2, criterion='maxclust')
cluster_sizes = np.bincount(clusters)
res = [all_usable]
if len(cluster_sizes) <= 1 or np.max(cluster_sizes) < MIN_CLUSTER_SIZE:
return res
added_all = False
for cluster, count in enumerate(cluster_sizes):
if count < MIN_CLUSTER_SIZE:
continue
res.append(psv_ixs[clusters == cluster])
if len(clusters) - count < MIN_CLUSTER_SIZE:
res[0] = None
if res[0] is None:
del res[0]
return res
_BestCluster = collections.namedtuple('_BestCluster',
'cluster_i n_reliable info_content likelihood psv_f_values sample_gt_probs')
_SMALLEST_COPY_NUM = 2
def write_headers(out, samples, args):
samples_str = '\t'.join(samples) + '\n'
out.checked_write('use_psv_sample',
'# Each element stores two booleans: first shows if the PSV has good observations at the sample,\n',
'# Second shows if the sample has the reference copy number at the PSV.\n',
'# If PSV has less than {} "good" samples, it is not used.\n'.format(args.min_samples),
'region_group\tpsv\tgood\t' + samples_str)
max_ref_cn = min(args.max_ref_cn, args.pscn_bound[0])
col_copies = '\t'.join(map('copy{}'.format, range(1, max_ref_cn // 2 + 1))) + '\n'
out.checked_write('interm_psv_f_values', 'region_group\tcluster\titeration\tpsv\tinfo_content\t' + col_copies)
out.checked_write('psv_f_values', 'region_group\tpsv\tn_samples\tuse_in_em\tinfo_content\t' + col_copies)
out.checked_write('em_likelihoods',
'region_group\tcluster\titeration\ttime\tlikelihood\tn_reliable\treliable_info\n')
out.checked_write('em_sample_gts', 'region_group\tcluster\titeration\tgenotype\tprior\t' + samples_str)
out.checked_write('paralog_cn', 'region_group\tsample\tregion1\tgenotypes\tmarginal_probs\n')
out.checked_write('gene_conversion',
'#chrom\tstart\tend\tsample\tregion_group\tmain_gt\treplacement_gt\tqual\tn_psvs\n')
def create_psv_infos(psvs, region_group, n_samples, genome):
return [_PsvInfo(psv_ix, psv, region_group, n_samples, genome) for psv_ix, psv in enumerate(psvs)]
def find_reliable_psvs(region_group_extra, samples, genome, out, min_samples,
reliable_threshold, max_agcn):
# ===== Setting up variables =====
psvs = region_group_extra.psvs
n_psvs = len(psvs)
region_group = region_group_extra.region_group
group_name = region_group.name
cn = region_group.cn
n_copies = cn // 2
n_samples = len(samples)
if not n_psvs or n_copies < _SMALLEST_COPY_NUM or n_copies > max_agcn // 2:
return
# ===== Selecting a set of PSVs used in the EM algorithm =====
_select_psv_sample_pairs(region_group_extra, samples, out.use_psv_sample, min_samples)
psv_infos = region_group_extra.psv_infos
region_sequence = region_group.region1.get_sequence(genome)
for psv_info in psv_infos:
psv_info.check_complicated_pos(region_group.region1, region_sequence)
psv_info.create_em_psv_gt_probs()
if not any(psv_info.in_em for psv_info in psv_infos):
return
timer_start = perf_counter()
_calculate_psv_info_content(group_name, psv_infos, min_samples, out.psv_filtering)
_filter_close_psvs(psv_infos, out.psv_filtering, close_psv_dist=100)
em_psv_ixs = np.array([psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em])
if not len(em_psv_ixs):
return
common.log(' Searching for reliable PSVs')
sample_genotypes = variants_.all_gt_counts(n_copies, cn)
sample_genotypes_str = [','.join(map(str, gt)) for gt in sample_genotypes]
sample_genotype_priors = _define_sample_gt_priors(n_copies, sample_genotypes)
# ===== EM iterations, try several clusters =====
best_cluster = None
psv_clusters = _cluster_psvs(psv_infos, region_group_extra.psv_read_counts, n_samples)
for cluster_i, cluster in enumerate(psv_clusters, start=1):
psv_f_values = np.full((n_psvs, n_copies), 0.5)
psv_f_values[cluster] = 0.9
total_lik = -np.inf
for iteration in range(1, 101):
if iteration > 1:
psv_f_values, _ = _m_step(sample_gt_probs, psv_infos, em_psv_ixs, psv_f_values)
for psv_ix in em_psv_ixs:
psv_info = psv_infos[psv_ix]
out.interm_psv_f_values.write('{}\t{}\t{}\t{}:{}\t{:.6f}\t{}\n'.format(group_name, cluster_i,
iteration, psv_info.chrom, psv_info.start + 1, psv_info.info_content,
'\t'.join(map('{:.6f}'.format, psv_f_values[psv_ix]))))
old_lik = total_lik
total_lik, sample_gt_probs = _e_step(psv_infos, psv_f_values, sample_genotype_priors, n_samples)
for gt_i, gt in enumerate(sample_genotypes_str):
out.em_sample_gts.write('{}\t{}\t{}\t{}\t{:.3f}\t'.format(group_name, cluster_i, iteration, gt,
sample_genotype_priors[gt_i] / common.LOG10))
out.em_sample_gts.write('\t'.join(map('{:.3f}'.format, sample_gt_probs[:, gt_i] / common.LOG10)))
out.em_sample_gts.write('\n')
common.log(' Cluster {}, iteration {:2}, EM likelihood: {:,.3f}'.format(
cluster_i, iteration, total_lik / common.LOG10))
with np.errstate(invalid='ignore'):
curr_reliable = np.where(np.all(psv_f_values >= reliable_threshold, axis=1))[0]
n_reliable = len(curr_reliable)
mean_info_content = np.mean([psv_infos[i].info_content for i in curr_reliable]) if n_reliable else 0.0
out.em_likelihoods.write('{}\t{}\t{}\t{}\t{:.7f}\t{}\t{:.6f}\n'.format(group_name, cluster_i, iteration,
str(timedelta(seconds=perf_counter() - timer_start))[:-5], total_lik / common.LOG10,
n_reliable, mean_info_content))
if total_lik < old_lik + 0.01:
break
if best_cluster is None or best_cluster.likelihood < total_lik:
best_cluster = _BestCluster(cluster_i, n_reliable, mean_info_content, total_lik,
psv_f_values, sample_gt_probs)
# ===== Save results from the last cluster =====
psv_f_values = best_cluster.psv_f_values
sample_gt_probs = best_cluster.sample_gt_probs
if len(psv_clusters) > 1:
common.log(' === Best cluster {}: likelihood {:.3f}, {} reliable PSVs with mean information content {:.3f}'
.format(best_cluster.cluster_i, best_cluster.likelihood / common.LOG10,
best_cluster.n_reliable, best_cluster.info_content))
if best_cluster.n_reliable and best_cluster.info_content < 0.8:
common.log('WARN: Many reliable PSVs have low information content.')
discarded_psvs = np.array([psv_info.psv_ix for psv_info in psv_infos
if not psv_info.in_em and psv_info.n_used_samples > 0])
if len(discarded_psvs):
oth_f_values, _ = _m_step(sample_gt_probs, psv_infos, discarded_psvs, np.full((n_psvs, n_copies), 0.5))
psv_f_values[discarded_psvs, :] = oth_f_values[discarded_psvs, :]
for psv_info in psv_infos:
out.psv_f_values.write('{}\t{}:{}\t{}\t{}\t{:.6f}\t{}\n'.format(group_name, psv_info.chrom,
psv_info.start + 1, psv_info.n_used_samples, 'T' if psv_info.in_em else 'F', psv_info.info_content,
'\t'.join(map('{:.6f}'.format, psv_f_values[psv_info.psv_ix]))))
region_group_extra.set_f_values(psv_f_values)
def _single_sample_e_step(sample_id, sample_cn, psv_infos, reliable_psv_ixs):
"""
Returns sample genotype probabilities.
"""
n_sample_genotypes = len(psv_infos[reliable_psv_ixs[0]].support_matrix[sample_id])
res = np.zeros(n_sample_genotypes)
for psv_ix in reliable_psv_ixs:
psv_info = psv_infos[psv_ix]
assert sample_cn == psv_info.sample_cns[sample_id]
res += psv_info.support_matrix[sample_id]
return res
def calculate_marginal_probs(genotypes, gt_probs, n_copies, cn):
"""
Returns
- marginal probabilities (n_copies x cn + 1), matrix[x, y] represents log probability of the CN x at copy y,
- paralog-specific CN (n_copies),
- paralog-specific CN qual (n_copies).
"""
marginal_probs = np.full((n_copies, cn + 1), -np.nan)
gt_probs -= logsumexp(gt_probs)
for copy in range(n_copies):
for curr_copy_cn in range(cn + 1):
ixs = [i for i, gt in enumerate(genotypes) if gt[copy] == curr_copy_cn]
marginal_probs[copy, curr_copy_cn] = logsumexp(gt_probs[ixs])
paralog_cn = np.zeros(n_copies, dtype=np.int8)
paralog_qual = np.zeros(n_copies)
for copy in range(n_copies):
best_cn = np.argmax(marginal_probs[copy])
paralog_cn[copy] = best_cn
paralog_qual[copy] = common.phred_qual(marginal_probs[copy], best_cn)
return marginal_probs, paralog_cn, paralog_qual
def paralog_cn_str(paralog_cn, paralog_qual, min_qual_value=5):
"""
Returns
- paralog CN: string,
- paralog qual: tuple of integers,
- any_known: bool (any of the values over the threshold).
If paralog quality is less than min_qual_value, corresponding CN is replaced with '?' and quality
is replaced with 0. Additionally, quality is rounded down to integers.
"""
paralog_cn_str = []
new_paralog_qual = []
any_known = False
for cn, qual in zip(paralog_cn, paralog_qual):
if qual < min_qual_value:
paralog_cn_str.append('?')
new_paralog_qual.append(0)
else:
paralog_cn_str.append(str(cn))
new_paralog_qual.append(int(qual))
any_known = True
return ','.join(paralog_cn_str), tuple(new_paralog_qual), any_known
def _add_paralog_filter(results, filt):
for res in results:
res.paralog_filter.add(filt)
class GeneConversionHmm(cn_hmm.HmmModel):
def __init__(self, best_gt, n_gts, n_observations, stay_prob=0.99, initial_best_prob=0.5):
n_samples = 1
super().__init__(n_samples, n_gts, n_observations, max_state_dist=n_gts * 2)
transition = np.full((n_gts, n_gts), -np.inf)
stay_log = np.log(stay_prob)
single_trans = np.log1p(-stay_prob)
mult_trans = np.log((1 - stay_prob) / (n_gts - 1))
for state in range(n_gts):
if state == best_gt:
transition[state] = mult_trans
else:
transition[state, best_gt] = single_trans
transition[state, state] = stay_log
self.set_transition(transition)
initial = np.full(n_gts, np.log((1 - initial_best_prob) / (n_gts - 1)))
initial[best_gt] = np.log(initial_best_prob)
self.set_initial(initial)
GeneConversion = collections.namedtuple('GeneConversion', 'start end main_gt replacement_gt qual n_psvs')
def _detect_gene_conversion(sample_id, genotypes_str, sample_gt_probs, psv_infos, semirel_psv_ixs):
n_psvs = len(semirel_psv_ixs)
n_genotypes = len(genotypes_str)
best_gt = np.argmax(sample_gt_probs)
model = GeneConversionHmm(best_gt, n_genotypes, n_psvs)
emission_matrix = np.zeros((1, n_genotypes, n_psvs))
HMM_SAMPLE_ID = 0
for i, psv_ix in enumerate(semirel_psv_ixs):
emission_matrix[HMM_SAMPLE_ID, :, i] = psv_infos[psv_ix].support_matrix[sample_id]
model.set_emission_matrices(emission_matrix)
prob, states_vec = model.viterbi(HMM_SAMPLE_ID)
model.run_forward_backward()
res = []
for segment in cn_hmm.get_simple_path(states_vec):
if segment.state == best_gt or segment.end_ix == segment.start_ix + 1:
continue
segment0 = cn_hmm.SimpleSegment(segment.start_ix, segment.end_ix, best_gt)
probs = np.array((
model.path_likelihood(HMM_SAMPLE_ID, (segment0,)),
model.path_likelihood(HMM_SAMPLE_ID, (segment,))))
probs -= logsumexp(probs)
qual = common.phred_qual(probs, best_ix=1)
start_psv = psv_infos[semirel_psv_ixs[segment.start_ix]].psv
end_psv = psv_infos[semirel_psv_ixs[segment.end_ix - 1]].psv
res.append(GeneConversion(start_psv.start, end_psv.start + len(end_psv.ref),
genotypes_str[best_gt], genotypes_str[segment.state], qual, segment.end_ix - segment.start_ix))
return res
def _create_sample_results_from_agcn(sample_id, region_group_extra):
sample_results = []
linked_ranges = []
group_name = region_group_extra.region_group.name
for sample_const_region in region_group_extra.sample_const_regions[sample_id]:
entry = ResultEntry(sample_id, sample_const_region)
entry.info['group'] = group_name
entry.info.update(sample_const_region.info)
reg_start = sample_const_region.region1.start
reg_end = sample_const_region.region1.end
entry.info['n_windows'] = region_group_extra.group_windows_searcher.overlap_size(reg_start, reg_end)
entry.info['hmm_windows'] = region_group_extra.hmm_windows_searcher.overlap_size(reg_start, reg_end)
psv_start_ix, psv_end_ix = region_group_extra.psv_searcher.contained_ixs(reg_start, reg_end)
entry.info['n_psvs'] = psv_end_ix - psv_start_ix
entry.info['rel_psvs'] = np.sum(region_group_extra.psv_is_reliable[psv_start_ix : psv_end_ix])
curr_res_ix = len(sample_results)
if sample_results and sample_results[-1].pred_cn == entry.pred_cn:
linked_ranges[-1][1] = curr_res_ix + 1
else:
linked_ranges.append([curr_res_ix, curr_res_ix + 1])
sample_results.append(entry)
return sample_results, linked_ranges
def _genotypes_str(sample_genotypes, genotypes_str_cache):
"""
Returns
- string representations of sample genotypes,
- string representations of various marginal probabilities in form (0??, 1??, ..., ?0?, ...).
"""
n_copies = len(sample_genotypes[0])
sample_cn = sum(sample_genotypes[0])
if sample_cn in genotypes_str_cache:
return genotypes_str_cache[sample_cn]
sample_genotypes_str = [','.join(map(str, gt)) for gt in sample_genotypes]
marginal_str = []
if n_copies > 2:
gt_str = ['?'] * n_copies
for copy in range(n_copies):
for curr_copy_cn in range(sample_cn + 1):
gt_str[copy] = str(curr_copy_cn)
marginal_str.append(''.join(gt_str))
gt_str[copy] = '?'
res = (sample_genotypes_str, marginal_str)
genotypes_str_cache[sample_cn] = res
return res
def _single_sample_pscn(sample_id, sample_name, sample_results, linked_ranges, region_group_extra, genome,
out, genotypes_str_cache, max_genotypes):
# ====== Defining useful variables ======
psv_infos = region_group_extra.psv_infos
n_psvs = len(psv_infos)
region_group = region_group_extra.region_group
group_name = region_group.name
n_copies = region_group.cn // 2
outp = out.paralog_cn
region_chrom = region_group.region1.chrom_name(genome)
psv_searcher = region_group_extra.psv_searcher
# ====== Calculate psCN for a set of consecutive regions with the same agCN ======
for link_ix, (start_ix, end_ix) in enumerate(linked_ranges):
# ===== Check if psCN can be calculated =====
curr_results = sample_results[start_ix:end_ix]
if not curr_results[0].sample_const_region.cn_is_known:
_add_paralog_filter(curr_results, Filter.UncertainCN)
continue
psv_ixs = []
for subresults in curr_results:
for psv_ix in range(*psv_searcher.contained_ixs(subresults.region1.start, subresults.region1.end)):
if psv_infos[psv_ix].psv_gt_probs[sample_id] is not None:
psv_ixs.append(psv_ix)
sample_cn = curr_results[0].pred_cn
if sample_cn == 0:
continue
psv_ixs = np.array(psv_ixs)
if len(psv_ixs) == 0:
_add_paralog_filter(curr_results, Filter.NoPSVs)
continue
reliable_psv_ixs = psv_ixs[region_group_extra.psv_is_reliable[psv_ixs]]
if len(reliable_psv_ixs) == 0:
_add_paralog_filter(curr_results, Filter.NoReliable)
continue
sample_genotypes = variants_.all_gt_counts(n_copies, sample_cn)
if len(sample_genotypes) > max_genotypes:
_add_paralog_filter(curr_results, Filter.HighCN)
continue
# ===== Run E-step once again to calculate psCN =====
sample_gt_probs = _single_sample_e_step(sample_id, sample_cn, psv_infos, reliable_psv_ixs)
assert len(sample_gt_probs) == len(sample_genotypes)
marginal_probs, paralog_cn, paralog_qual = calculate_marginal_probs(sample_genotypes, sample_gt_probs,
n_copies, sample_cn)
sample_genotypes_str, marginal_str = _genotypes_str(sample_genotypes, genotypes_str_cache)
# ===== Detect gene conversion =====
GENE_CONV_QUAL_THRESHOLD = 20
MIN_SEMIREL_PSVS = 3
semirel_psv_ixs = psv_ixs[region_group_extra.psv_is_semirel[psv_ixs]]
if np.all(paralog_qual >= GENE_CONV_QUAL_THRESHOLD) and len(semirel_psv_ixs) >= MIN_SEMIREL_PSVS:
gene_conv = _detect_gene_conversion(sample_id, sample_genotypes_str, sample_gt_probs,
psv_infos, semirel_psv_ixs)
for entry in gene_conv:
out.gene_conversion.write('{}\t{}\t{}\t'.format(region_chrom, entry.start, entry.end))
out.gene_conversion.write('{}\t{}\t{}\t{}\t{:.1f}\t{}\n'.format(sample_name, group_name,
entry.main_gt, entry.replacement_gt, entry.qual, entry.n_psvs))
else:
gene_conv = None
region1 = Interval(curr_results[0].region1.chrom_id,
curr_results[0].region1.start, curr_results[-1].region1.end).to_str(genome)
outp.write('{}\t{}\t{}\t'.format(group_name, sample_name, region1))
outp.write(' '.join(map('%s=%.1f'.__mod__,
zip(sample_genotypes_str, np.abs(sample_gt_probs / common.LOG10)))))
outp.write('\t')
if n_copies > 2:
outp.write(' '.join(map('%s=%.1f'.__mod__,
zip(marginal_str, np.abs(marginal_probs.flatten() / common.LOG10)))))
else:
outp.write('*')
outp.write('\n')
mean_info = np.mean([psv_infos[psv_ix].info_content for psv_ix in reliable_psv_ixs])
max_f_value = np.max(np.min(region_group_extra.psv_f_values[reliable_psv_ixs], axis=1))
if mean_info < 0.9:
_add_paralog_filter(curr_results, Filter.LowInfoContent)
if max_f_value < 0.99:
_add_paralog_filter(curr_results, Filter.NoComplReliable)
if len(reliable_psv_ixs) < 3:
_add_paralog_filter(curr_results, Filter.FewReliable)
info_update = dict(
n_psvs=len(psv_ixs),
rel_psvs=len(reliable_psv_ixs),
semirel_psvs=len(semirel_psv_ixs) - len(reliable_psv_ixs),
psv_info='{:.3f}'.format(mean_info),
max_f_value='{:.3f}'.format(max_f_value),
gene_conv='T' if gene_conv else 'F')
if end_ix > start_ix + 1:
info_update['link'] = link_ix
for res_entry in curr_results:
res_entry.paralog_cn = paralog_cn
res_entry.paralog_qual = paralog_qual
res_entry.info.update(info_update)
def estimate_paralog_cn(region_group_extra, samples, genome, out, *, max_agcn, max_genotypes):
common.log(' Calculating paralog-specific copy number profiles')
# ===== Defining useful variables =====
region_group = region_group_extra.region_group
n_copies = region_group.cn // 2
genotypes_str_cache = {}
has_reliable_psvs = np.sum(region_group_extra.psv_is_reliable) > 0
results = []
for sample_id in range(len(samples)):
sample_results, linked_ranges = _create_sample_results_from_agcn(sample_id, region_group_extra)
results.extend(sample_results)
if not has_reliable_psvs:
if n_copies > max_agcn // 2:
_add_paralog_filter(sample_results, Filter.HighCN)
elif n_copies > 1:
_add_paralog_filter(sample_results, Filter.NoReliable)
continue
_single_sample_pscn(sample_id, samples[sample_id], sample_results, linked_ranges, region_group_extra,
genome, out, genotypes_str_cache, max_genotypes)
return results
class Filter(Enum):
Pass = 0
HighCN = 11
NoReliable = 20
FewReliable = 21
NoComplReliable = 22
LowInfoContent = 23
UncertainCN = 24
NoPSVs = 25
def __str__(self):
if self == Filter.Pass:
return 'PASS'
if self == Filter.HighCN:
return 'HighCN'
if self == Filter.NoReliable:
return 'NoReliable'
if self == Filter.FewReliable:
return 'FewReliable'
if self == Filter.NoComplReliable:
return 'NoComplReliable'
if self == Filter.LowInfoContent:
return 'LowInfoCont'
if self == Filter.UncertainCN:
return 'UncertainCN'
if self == Filter.NoPSVs:
return 'NoPSVs'
@classmethod
def from_str(cls, s):
if s == 'PASS':
return Filter.Pass
if s == 'HighCN':
return Filter.HighCN
if s == 'NoReliable':
return Filter.NoReliable
if s == 'FewReliable':
return Filter.FewReliable
if s == 'NoComplReliable':
return Filter.NoComplReliable
if s == 'LowInfoCont':
return Filter.LowInfoContent
if s == 'UncertainCN':
return Filter.UncertainCN
if s == 'NoPSVs':
return Filter.NoPSVs
class Filters:
def __init__(self):
self.filters = None
def add(self, filt):
if self.filters is None:
self.filters = set()
self.filters.add(filt)
def to_str(self, value_defined):
if not self.filters:
return 'PASS' if value_defined else '*'
return ';'.join(map(str, self.filters))
def to_tuple(self):
if not self.filters:
return ('PASS',)
return tuple(map(str, self.filters))
def __len__(self):
return 0 if self.filters is None else len(self.filters)
def __bool__(self):
return bool(self.filters)
def copy(self):
res = Filters()
if self.filters:
res.filters = self.filters.copy()
return res
class ResultEntry:
def __init__(self, sample_id, sample_const_region):
self.sample_id = sample_id
self.sample_const_region = sample_const_region
self.n_copies = sample_const_region.cn // 2
self.agcn_filter = Filters()
self.paralog_filter = Filters()
self.paralog_cn = None
self.paralog_qual = None
self.info = {}
@property
def region1(self):
return self.sample_const_region.region1
@property
def pred_cn(self):
return self.sample_const_region.pred_cn
def copy_num_to_str(self):
if self.pred_cn is None:
return (self.agcn_filter.to_str(False), '?', '*')
return (self.agcn_filter.to_str(True), self.sample_const_region.pred_cn_str,
'{:.2f}'.format(self.sample_const_region.qual))
def paralog_to_str(self):
if self.paralog_cn is None:
if self.n_copies == 1:
self.paralog_cn = (self.sample_const_region.pred_cn_str,)
self.paralog_qual = (self.sample_const_region.qual,)
elif self.pred_cn == 0:
self.paralog_cn = (0,) * self.n_copies
self.paralog_qual = (self.sample_const_region.qual,) * self.n_copies
paralog_filter = self.paralog_filter.to_str(self.paralog_cn is not None)
if self.paralog_cn is None:
return paralog_filter, ','.join('?' * self.n_copies), ','.join('0' * self.n_copies)
pscn, pscn_qual, _ = paralog_cn_str(self.paralog_cn, self.paralog_qual)
return paralog_filter, pscn, ','.join(map(str, pscn_qual))
def to_str(self, region_name, genome, samples):
res = '{}\t{}\t{}\t'.format(self.sample_const_region.region1.to_bed(genome), region_name,
samples[self.sample_id])
res += '{}\t{}\t{}\t'.format(*self.copy_num_to_str())
res += '{}\t{}\t{}\t'.format(*self.paralog_to_str())
if self.info:
res += ';'.join(map('%s=%s'.__mod__, self.info.items()))
else:
res += '*'
res += '\t'
res += self.sample_const_region.regions2_str(genome)
return res
def __lt__(self, other):
return self.region1.__lt__(other.region1)
@classmethod
def from_dict(cls, row, genome, samples):
sample_id = samples.id(row['sample'])
region1 = Interval(genome.chrom_id(row['chrom']), int(row['start']), int(row['end']))
hom_regions = row['homologous_regions']
regions2 = None
if hom_regions != '*':
regions2 = []
for entry in hom_regions.split(','):
regions2.append(Interval.parse_with_strand(entry, genome))
pred_cn_str = row['agCN']
pred_cn = int(pred_cn_str) if pred_cn_str.isdigit() else None
pred_cn_qual = float(row['agCN_qual'])
sample_const_region = cn_tools.CopyNumPrediction(region1, regions2, pred_cn, pred_cn_str, pred_cn_qual)
res = cls(sample_id, sample_const_region)
pscn = row['psCN'].split(',')
pscn_qual = row['psCN_qual'].split(',')
for i in range(len(pscn)):
pscn[i] = None if pscn[i] == '?' else int(pscn[i])
pscn_qual[i] = int(pscn_qual[i])
res.paralog_cn = pscn
res.paralog_qual = pscn_qual
for entry in row['agCN_filter'].split(';'):
res.agcn_filter.add(Filter.from_str(entry))
for entry in row['psCN_filter'].split(';'):
res.paralog_filter.add(Filter.from_str(entry))
info = res.info
row_info = row['info']
if row_info != '*':
for entry in row_info.split(';'):
key, val = entry.split('=')
info[key] = val
return res
def _process_sample_entries(start, end, by_sample, searchers):
# Order of ResultEntry.copy_num_to_str() + entry.paralog_to_str()
CNF = 0
CN = 1
CNQ = 2
PCNF = 3
PCN = 4
PCNQ = 5
all_copy_nums = collections.Counter()
all_paralog_cns = collections.Counter()
sample_results = ''
for sample_entries, searcher in zip(by_sample, searchers):
start_ix, end_ix = searcher.overlap_ixs(start, end)
sample_results += '\t'
if end_ix <= start_ix:
sample_results += '*'
continue
info_sets = [set() for _ in range(6)]
coverage = 0
# Convert to int because islice does not work with numpy.int.
for entry in itertools.islice(sample_entries, int(start_ix), int(end_ix)):
curr_info = entry.copy_num_to_str() + entry.paralog_to_str()
for i, value in enumerate(curr_info):
info_sets[i].add(value)
curr_cov = min(end, entry.region1.end) - max(start, entry.region1.start)
all_copy_nums[curr_info[CN]] += curr_cov
all_paralog_cns[curr_info[PCN]] += curr_cov
coverage += curr_cov
coverage = 100 * coverage / (end - start)
if len(info_sets[1]) != 1:
sample_results += '! ! ! ! ! ! {:.1f}'.format(coverage)
continue
# Replace with constants.
for i in (CN, CNF, CNQ, PCN, PCNF, PCNQ):
if len(info_sets[i]) == 1:
sample_results += '{} '.format(info_sets[i].pop())
else:
sample_results += '! '
sample_results += '{:.1f}'.format(coverage)
return all_copy_nums, all_paralog_cns, sample_results
def write_matrix_summary(results, region_name, genome, samples, out):
if not results:
return
out.write('## {}\n'.format(' '.join(sys.argv)))
out.write('## {} {}\n'.format(__pkg_name__, __version__))
out.write('## For each sample 7 values are stored: agCN, agCN_filter, agCN_qual; '
'psCN, psCN_filter, psCN_qual; overlap.\n')
out.write('## overlap - percentage of the region covered by the sample entries.\n')
out.write('## Entries for sample can contain "!", that means that several entries '
'cover the region and have different values.\n')
out.write('#chrom\tstart\tend\tlocus\trefCN\tagCN_freq\tpsCN_freq\tinfo\thomologous_regions\t')
out.write('\t'.join(samples))
out.write('\n')
by_sample = [[] for _ in range(len(samples))]
unique_events = collections.defaultdict(list)
for entry in results:
by_sample[entry.sample_id].append(entry)
key = (entry.region1.start, entry.region1.end)
unique_events[key].append(entry)
searchers = []
for sample_entries in by_sample:
searchers.append(itree.NonOverlTree(sample_entries, itree.region1_start, itree.region1_end))
for (start, end) in sorted(unique_events.keys()):
templates = unique_events[(start, end)]
template = templates[0]
out.write('{}\t{}\t{}\t'.format(template.region1.to_bed(genome), region_name,
template.sample_const_region.cn))
all_copy_nums, all_paralog_cns, sample_results = _process_sample_entries(start, end, by_sample, searchers)
copy_num_freqs = []
for copy_num, freq in all_copy_nums.items():
if copy_num.isdigit():
# Store sorting key, copy number, and frequency.
copy_num_freqs.append((int(copy_num), copy_num, freq))
elif copy_num.startswith('<'):
copy_num_freqs.append((-1, copy_num, freq))
elif copy_num.startswith('>'):
copy_num_freqs.append((1000, copy_num, freq))
# else: ignore
copy_num_freqs.sort()
copy_num_freq_sum = sum(map(operator.itemgetter(2), copy_num_freqs))
out.write(' '.join('{}={:.5g}'.format(copy_num, freq / copy_num_freq_sum)
for _, copy_num, freq in copy_num_freqs))
out.write('\t')
paralog_freq_sum = sum(all_paralog_cns.values())
out.write(' '.join('{}={:.5g}'.format(paralog, freq / paralog_freq_sum)
for paralog, freq in all_paralog_cns.most_common()))
info = 'len={:.1f}kb;samples={}:{}{}'.format((end - start) / 1000, len(templates),
','.join(samples[entry.sample_id] for entry in itertools.islice(templates, 0, 10)),
',...' if len(templates) > 10 else '')
out.write('\t{}\t'.format(info))
out.write(template.sample_const_region.regions2_str(genome))
out.write(sample_results)
out.write('\n')
| [
"numpy.log",
"scipy.stats.beta.logcdf",
"numpy.array",
"scipy.stats.pearsonr",
"operator.itemgetter",
"scipy.special.logsumexp",
"scipy.cluster.hierarchy.fcluster",
"numpy.mean",
"numpy.full_like",
"numpy.where",
"time.perf_counter",
"numpy.max",
"numpy.exp",
"scipy.cluster.hierarchy.linka... | [((16173, 16297), 'collections.namedtuple', 'collections.namedtuple', (['"""_BestCluster"""', '"""cluster_i n_reliable info_content likelihood psv_f_values sample_gt_probs"""'], {}), "('_BestCluster',\n 'cluster_i n_reliable info_content likelihood psv_f_values sample_gt_probs'\n )\n", (16195, 16297), False, 'import collections\n'), ((26396, 26488), 'collections.namedtuple', 'collections.namedtuple', (['"""GeneConversion"""', '"""start end main_gt replacement_gt qual n_psvs"""'], {}), "('GeneConversion',\n 'start end main_gt replacement_gt qual n_psvs')\n", (26418, 26488), False, 'import collections\n'), ((617, 633), 'numpy.max', 'np.max', (['f_values'], {}), '(f_values)\n', (623, 633), True, 'import numpy as np\n'), ((650, 739), 'scipy.stats.beta.logcdf', 'stats.beta.logcdf', (['(max_f_value, max_f_value - EPSILON)', '_BETA_DISTR_A', '_BETA_DISTR_B'], {}), '((max_f_value, max_f_value - EPSILON), _BETA_DISTR_A,\n _BETA_DISTR_B)\n', (667, 739), False, 'from scipy import stats\n'), ((747, 778), 'scipy.special.logsumexp', 'logsumexp', (['beta_cdfs'], {'b': '(1, -1)'}), '(beta_cdfs, b=(1, -1))\n', (756, 778), False, 'from scipy.special import logsumexp\n'), ((1100, 1141), 'numpy.zeros', 'np.zeros', (['(n_samples, n_sample_genotypes)'], {}), '((n_samples, n_sample_genotypes))\n', (1108, 1141), True, 'import numpy as np\n'), ((1163, 1197), 'numpy.zeros', 'np.zeros', (['n_samples'], {'dtype': 'np.bool'}), '(n_samples, dtype=np.bool)\n', (1171, 1197), True, 'import numpy as np\n'), ((3362, 3401), 'numpy.full_like', 'np.full_like', (['prev_psv_f_values', 'np.nan'], {}), '(prev_psv_f_values, np.nan)\n', (3374, 3401), True, 'import numpy as np\n'), ((3649, 3672), 'numpy.exp', 'np.exp', (['sample_gt_probs'], {}), '(sample_gt_probs)\n', (3655, 3672), True, 'import numpy as np\n'), ((12737, 12757), 'numpy.full', 'np.full', (['n_copies', '(2)'], {}), '(n_copies, 2)\n', (12744, 12757), True, 'import numpy as np\n'), ((13014, 13031), 'scipy.special.logsumexp', 'logsumexp', (['priors'], {}), '(priors)\n', (13023, 13031), False, 'from scipy.special import logsumexp\n'), ((13154, 13190), 'numpy.full', 'np.full', (['(n_psvs, n_samples)', 'np.nan'], {}), '((n_psvs, n_samples), np.nan)\n', (13161, 13190), True, 'import numpy as np\n'), ((13578, 13611), 'numpy.full', 'np.full', (['(n_psvs, n_psvs)', 'np.nan'], {}), '((n_psvs, n_psvs), np.nan)\n', (13585, 13611), True, 'import numpy as np\n'), ((13616, 13649), 'numpy.fill_diagonal', 'np.fill_diagonal', (['cor_matrix', '(1.0)'], {}), '(cor_matrix, 1.0)\n', (13632, 13649), True, 'import numpy as np\n'), ((14284, 14317), 'numpy.full', 'np.full', (['(n_psvs, n_psvs)', 'np.nan'], {}), '((n_psvs, n_psvs), np.nan)\n', (14291, 14317), True, 'import numpy as np\n'), ((14322, 14354), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dist_matrix', '(0)'], {}), '(dist_matrix, 0)\n', (14338, 14354), True, 'import numpy as np\n'), ((14370, 14400), 'numpy.ones', 'np.ones', (['n_psvs'], {'dtype': 'np.bool'}), '(n_psvs, dtype=np.bool)\n', (14377, 14400), True, 'import numpy as np\n'), ((15221, 15292), 'numpy.array', 'np.array', (['[psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em]'], {}), '([psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em])\n', (15229, 15292), True, 'import numpy as np\n'), ((15468, 15541), 'scipy.spatial.distance.squareform', 'scipy.spatial.distance.squareform', (['dist_matrix[psv_ixs[:, None], psv_ixs]'], {}), '(dist_matrix[psv_ixs[:, None], psv_ixs])\n', (15501, 15541), False, 'import scipy\n'), ((15556, 15608), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['condensed_dist'], {'method': '"""complete"""'}), "(condensed_dist, method='complete')\n", (15573, 15608), False, 'from scipy.cluster import hierarchy\n'), ((15625, 15677), 'scipy.cluster.hierarchy.fcluster', 'hierarchy.fcluster', (['linkage', '(2)'], {'criterion': '"""maxclust"""'}), "(linkage, 2, criterion='maxclust')\n", (15643, 15677), False, 'from scipy.cluster import hierarchy\n'), ((15698, 15719), 'numpy.bincount', 'np.bincount', (['clusters'], {}), '(clusters)\n', (15709, 15719), True, 'import numpy as np\n'), ((18779, 18793), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (18791, 18793), False, 'from time import perf_counter\n'), ((18971, 19042), 'numpy.array', 'np.array', (['[psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em]'], {}), '([psv_info.psv_ix for psv_info in psv_infos if psv_info.in_em])\n', (18979, 19042), True, 'import numpy as np\n'), ((22402, 22513), 'numpy.array', 'np.array', (['[psv_info.psv_ix for psv_info in psv_infos if not psv_info.in_em and \n psv_info.n_used_samples > 0]'], {}), '([psv_info.psv_ix for psv_info in psv_infos if not psv_info.in_em and\n psv_info.n_used_samples > 0])\n', (22410, 22513), True, 'import numpy as np\n'), ((23338, 23366), 'numpy.zeros', 'np.zeros', (['n_sample_genotypes'], {}), '(n_sample_genotypes)\n', (23346, 23366), True, 'import numpy as np\n'), ((23891, 23927), 'numpy.full', 'np.full', (['(n_copies, cn + 1)', '(-np.nan)'], {}), '((n_copies, cn + 1), -np.nan)\n', (23898, 23927), True, 'import numpy as np\n'), ((23944, 23963), 'scipy.special.logsumexp', 'logsumexp', (['gt_probs'], {}), '(gt_probs)\n', (23953, 23963), False, 'from scipy.special import logsumexp\n'), ((24216, 24249), 'numpy.zeros', 'np.zeros', (['n_copies'], {'dtype': 'np.int8'}), '(n_copies, dtype=np.int8)\n', (24224, 24249), True, 'import numpy as np\n'), ((24269, 24287), 'numpy.zeros', 'np.zeros', (['n_copies'], {}), '(n_copies)\n', (24277, 24287), True, 'import numpy as np\n'), ((26673, 26699), 'numpy.argmax', 'np.argmax', (['sample_gt_probs'], {}), '(sample_gt_probs)\n', (26682, 26699), True, 'import numpy as np\n'), ((26783, 26817), 'numpy.zeros', 'np.zeros', (['(1, n_genotypes, n_psvs)'], {}), '((1, n_genotypes, n_psvs))\n', (26791, 26817), True, 'import numpy as np\n'), ((42157, 42178), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (42176, 42178), False, 'import collections\n'), ((42201, 42222), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (42220, 42222), False, 'import collections\n'), ((44450, 44479), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (44473, 44479), False, 'import collections\n'), ((2018, 2047), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (2029, 2047), True, 'import numpy as np\n'), ((2168, 2198), 'scipy.special.logsumexp', 'logsumexp', (['prob_matrix'], {'axis': '(1)'}), '(prob_matrix, axis=1)\n', (2177, 2198), False, 'from scipy.special import logsumexp\n'), ((2547, 2567), 'numpy.isinf', 'np.isinf', (['beta_prior'], {}), '(beta_prior)\n', (2555, 2567), True, 'import numpy as np\n'), ((2621, 2662), 'numpy.full', 'np.full', (['(n_samples, n_genotypes)', 'np.nan'], {}), '((n_samples, n_genotypes), np.nan)\n', (2628, 2662), True, 'import numpy as np\n'), ((4152, 4246), 'scipy.optimize.minimize', 'optimize.minimize', (['minus_lik'], {'x0': 'prev_f_values', 'bounds': 'bounds', 'options': 'OPTS', 'method': 'METHOD'}), '(minus_lik, x0=prev_f_values, bounds=bounds, options=OPTS,\n method=METHOD)\n', (4169, 4246), False, 'from scipy import optimize\n'), ((6460, 6494), 'numpy.zeros', 'np.zeros', (['n_samples'], {'dtype': 'np.bool'}), '(n_samples, dtype=np.bool)\n', (6468, 6494), True, 'import numpy as np\n'), ((9745, 9818), 'numpy.zeros', 'np.zeros', (['(self.n_used_samples, self.precomp_data_ref_cn.n_psv_genotypes)'], {}), '((self.n_used_samples, self.precomp_data_ref_cn.n_psv_genotypes))\n', (9753, 9818), True, 'import numpy as np\n'), ((12712, 12722), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (12718, 12722), True, 'import numpy as np\n'), ((13659, 13684), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (13682, 13684), False, 'import warnings\n'), ((13694, 13772), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'stats.PearsonRConstantInputWarning'}), "('ignore', category=stats.PearsonRConstantInputWarning)\n", (13717, 13772), False, 'import warnings\n'), ((15352, 15370), 'numpy.where', 'np.where', (['use_psvs'], {}), '(use_psvs)\n', (15360, 15370), True, 'import numpy as np\n'), ((19616, 19648), 'numpy.full', 'np.full', (['(n_psvs, n_copies)', '(0.5)'], {}), '((n_psvs, n_copies), 0.5)\n', (19623, 19648), True, 'import numpy as np\n'), ((24339, 24370), 'numpy.argmax', 'np.argmax', (['marginal_probs[copy]'], {}), '(marginal_probs[copy])\n', (24348, 24370), True, 'import numpy as np\n'), ((25757, 25789), 'numpy.full', 'np.full', (['(n_gts, n_gts)', '(-np.inf)'], {}), '((n_gts, n_gts), -np.inf)\n', (25764, 25789), True, 'import numpy as np\n'), ((25809, 25826), 'numpy.log', 'np.log', (['stay_prob'], {}), '(stay_prob)\n', (25815, 25826), True, 'import numpy as np\n'), ((25850, 25870), 'numpy.log1p', 'np.log1p', (['(-stay_prob)'], {}), '(-stay_prob)\n', (25858, 25870), True, 'import numpy as np\n'), ((25892, 25929), 'numpy.log', 'np.log', (['((1 - stay_prob) / (n_gts - 1))'], {}), '((1 - stay_prob) / (n_gts - 1))\n', (25898, 25929), True, 'import numpy as np\n'), ((26317, 26342), 'numpy.log', 'np.log', (['initial_best_prob'], {}), '(initial_best_prob)\n', (26323, 26342), True, 'import numpy as np\n'), ((27536, 27552), 'scipy.special.logsumexp', 'logsumexp', (['probs'], {}), '(probs)\n', (27545, 27552), False, 'from scipy.special import logsumexp\n'), ((28875, 28942), 'numpy.sum', 'np.sum', (['region_group_extra.psv_is_reliable[psv_start_ix:psv_end_ix]'], {}), '(region_group_extra.psv_is_reliable[psv_start_ix:psv_end_ix])\n', (28881, 28942), True, 'import numpy as np\n'), ((31517, 31534), 'numpy.array', 'np.array', (['psv_ixs'], {}), '(psv_ixs)\n', (31525, 31534), True, 'import numpy as np\n'), ((33972, 34044), 'numpy.mean', 'np.mean', (['[psv_infos[psv_ix].info_content for psv_ix in reliable_psv_ixs]'], {}), '([psv_infos[psv_ix].info_content for psv_ix in reliable_psv_ixs])\n', (33979, 34044), True, 'import numpy as np\n'), ((35384, 35426), 'numpy.sum', 'np.sum', (['region_group_extra.psv_is_reliable'], {}), '(region_group_extra.psv_is_reliable)\n', (35390, 35426), True, 'import numpy as np\n'), ((2219, 2251), 'numpy.sum', 'np.sum', (['row_sums[usable_samples]'], {}), '(row_sums[usable_samples])\n', (2225, 2251), True, 'import numpy as np\n'), ((2915, 2961), 'scipy.special.logsumexp', 'logsumexp', (['(psv_gt_coefs + psv_gt_probs)'], {'axis': '(1)'}), '(psv_gt_coefs + psv_gt_probs, axis=1)\n', (2924, 2961), False, 'from scipy.special import logsumexp\n'), ((8076, 8097), 'numpy.where', 'np.where', (['use_samples'], {}), '(use_samples)\n', (8084, 8097), True, 'import numpy as np\n'), ((11655, 11691), 'itertools.count', 'itertools.count', (['(psv_ix + step)', 'step'], {}), '(psv_ix + step, step)\n', (11670, 11691), False, 'import itertools\n'), ((14713, 14743), 'numpy.logical_and', 'np.logical_and', (['mask_i', 'mask_j'], {}), '(mask_i, mask_j)\n', (14727, 14743), True, 'import numpy as np\n'), ((14768, 14780), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (14774, 14780), True, 'import numpy as np\n'), ((15778, 15799), 'numpy.max', 'np.max', (['cluster_sizes'], {}), '(cluster_sizes)\n', (15784, 15799), True, 'import numpy as np\n'), ((22624, 22656), 'numpy.full', 'np.full', (['(n_psvs, n_copies)', '(0.5)'], {}), '((n_psvs, n_copies), 0.5)\n', (22631, 22656), True, 'import numpy as np\n'), ((24173, 24197), 'scipy.special.logsumexp', 'logsumexp', (['gt_probs[ixs]'], {}), '(gt_probs[ixs])\n', (24182, 24197), False, 'from scipy.special import logsumexp\n'), ((26243, 26288), 'numpy.log', 'np.log', (['((1 - initial_best_prob) / (n_gts - 1))'], {}), '((1 - initial_best_prob) / (n_gts - 1))\n', (26249, 26288), True, 'import numpy as np\n'), ((32725, 32773), 'numpy.all', 'np.all', (['(paralog_qual >= GENE_CONV_QUAL_THRESHOLD)'], {}), '(paralog_qual >= GENE_CONV_QUAL_THRESHOLD)\n', (32731, 32773), True, 'import numpy as np\n'), ((34074, 34139), 'numpy.min', 'np.min', (['region_group_extra.psv_f_values[reliable_psv_ixs]'], {'axis': '(1)'}), '(region_group_extra.psv_f_values[reliable_psv_ixs], axis=1)\n', (34080, 34139), True, 'import numpy as np\n'), ((1897, 1943), 'scipy.special.logsumexp', 'logsumexp', (['(psv_gt_coefs + psv_gt_probs)'], {'axis': '(1)'}), '(psv_gt_coefs + psv_gt_probs, axis=1)\n', (1906, 1943), False, 'from scipy.special import logsumexp\n'), ((2978, 3015), 'numpy.sum', 'np.sum', (['(inner_term * gt_probs_regular)'], {}), '(inner_term * gt_probs_regular)\n', (2984, 3015), True, 'import numpy as np\n'), ((12919, 12938), 'numpy.abs', 'np.abs', (['(ref_gt - gt)'], {}), '(ref_gt - gt)\n', (12925, 12938), True, 'import numpy as np\n'), ((14120, 14190), 'scipy.stats.pearsonr', 'stats.pearsonr', (['ref_fractions[psv_i, mask]', 'ref_fractions[psv_j, mask]'], {}), '(ref_fractions[psv_i, mask], ref_fractions[psv_j, mask])\n', (14134, 14190), False, 'from scipy import stats\n'), ((14616, 14643), 'numpy.isnan', 'np.isnan', (['cor_matrix[psv_i]'], {}), '(cor_matrix[psv_i])\n', (14624, 14643), True, 'import numpy as np\n'), ((14666, 14693), 'numpy.isnan', 'np.isnan', (['cor_matrix[psv_j]'], {}), '(cor_matrix[psv_j])\n', (14674, 14693), True, 'import numpy as np\n'), ((20956, 20985), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""'}), "(invalid='ignore')\n", (20967, 20985), True, 'import numpy as np\n'), ((21160, 21219), 'numpy.mean', 'np.mean', (['[psv_infos[i].info_content for i in curr_reliable]'], {}), '([psv_infos[i].info_content for i in curr_reliable])\n', (21167, 21219), True, 'import numpy as np\n'), ((45745, 45767), 'operator.itemgetter', 'operator.itemgetter', (['(2)'], {}), '(2)\n', (45764, 45767), False, 'import operator\n'), ((10753, 10776), 'numpy.exp', 'np.exp', (['psv_gt_probs[i]'], {}), '(psv_gt_probs[i])\n', (10759, 10776), True, 'import numpy as np\n'), ((14048, 14060), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (14054, 14060), True, 'import numpy as np\n'), ((14830, 14844), 'numpy.sum', 'np.sum', (['mask_i'], {}), '(mask_i)\n', (14836, 14844), True, 'import numpy as np\n'), ((14847, 14861), 'numpy.sum', 'np.sum', (['mask_j'], {}), '(mask_j)\n', (14853, 14861), True, 'import numpy as np\n'), ((15111, 15196), 'scipy.spatial.distance.pdist', 'scipy.spatial.distance.pdist', (['(cor_matrix[psv_i, mask], cor_matrix[psv_j, mask])'], {}), '((cor_matrix[psv_i, mask], cor_matrix[psv_j, mask])\n )\n', (15139, 15196), False, 'import scipy\n'), ((13965, 13995), 'numpy.isnan', 'np.isnan', (['ref_fractions[psv_i]'], {}), '(ref_fractions[psv_i])\n', (13973, 13995), True, 'import numpy as np\n'), ((13997, 14027), 'numpy.isnan', 'np.isnan', (['ref_fractions[psv_j]'], {}), '(ref_fractions[psv_j])\n', (14005, 14027), True, 'import numpy as np\n'), ((21028, 21078), 'numpy.all', 'np.all', (['(psv_f_values >= reliable_threshold)'], {'axis': '(1)'}), '(psv_f_values >= reliable_threshold, axis=1)\n', (21034, 21078), True, 'import numpy as np\n'), ((33649, 33687), 'numpy.abs', 'np.abs', (['(sample_gt_probs / common.LOG10)'], {}), '(sample_gt_probs / common.LOG10)\n', (33655, 33687), True, 'import numpy as np\n'), ((46300, 46334), 'itertools.islice', 'itertools.islice', (['templates', '(0)', '(10)'], {}), '(templates, 0, 10)\n', (46316, 46334), False, 'import itertools\n'), ((21398, 21412), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (21410, 21412), False, 'from time import perf_counter\n')] |
import matplotlib, os
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from flarestack.analyses.ccsn.necker_2019.ccsn_helpers import (
updated_sn_catalogue_name,
sn_cats,
sn_times,
pdf_names,
raw_output_dir,
)
import logging
from flarestack.shared import plot_output_dir
plot_dir = plot_output_dir(
raw_output_dir + "/catalogue_visualization/difference_stasik/"
)
if not os.path.isdir(plot_dir):
os.makedirs(plot_dir)
def autolabel(rects, axis):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
axis.annotate(
"{}".format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 2), # 3 points vertical offset
textcoords="offset points",
ha="center",
va="bottom",
)
def plot_difference_tot(filename):
N = {}
for flagged in [True, False]:
N["flagged" if flagged else "unflagged"] = []
Nnew = []
for i, cat in enumerate(sn_cats):
catarr = np.load(updated_sn_catalogue_name(cat, flagged=flagged))
N["flagged" if flagged else "unflagged"].append(len(catarr))
newcat = np.load(updated_sn_catalogue_name(cat, pdf_name="missed_objects"))
Nnew.append(len(newcat))
fig, ax = plt.subplots()
x = np.arange(len(sn_cats))
width = 0.4
r1 = ax.bar(x + width / 2, N["unflagged"], width, label="new catalogue")
r2 = ax.bar(x - width / 2, N["flagged"], width, label="old catalogue")
r1new = ax.bar(
x + width / 2,
Nnew,
width,
bottom=np.array(N["unflagged"]) - np.array(Nnew),
hatch="//",
color=r1[0].get_facecolor(),
label="previously not included",
)
ax.set_ylabel("number of objects")
ax.set_xlabel("SN types")
ax.set_xticks(x)
ax.set_xticklabels(sn_cats)
ax.set_ylim(np.array(ax.get_ylim()) * [1, 1.1])
ax.set_title("number of objects in catalogues")
ax.legend()
autolabel(r1, ax)
autolabel(r2, ax)
fig.savefig(filename)
plt.close()
def plot_difference_individual(sn_types, filename):
fig, axs = plt.subplots(len(sn_types))
for i, sn_type in enumerate(sn_types):
ax = axs[i]
N = {}
cats = []
for pdf_type in sn_times[sn_type]:
for pdf_time in sn_times[sn_type][pdf_type]:
cats.append(pdf_names(pdf_type, pdf_time))
for flagged in [True, False]:
N["flagged" if flagged else "unflagged"] = []
for cat in cats:
catarr = np.load(
updated_sn_catalogue_name(sn_type, pdf_name=cat, flagged=flagged)
)
N["flagged" if flagged else "unflagged"].append(len(catarr))
x = np.arange(len(cats))
width = 0.4
r1 = ax.bar(x + width / 2, N["unflagged"], width, label="new")
r2 = ax.bar(x - width / 2, N["flagged"], width, label="old")
ax.set_ylabel(f"SN type {sn_type}")
ax.set_xticks(x)
ax.set_xticklabels(cats)
ax.set_ylim(np.array(ax.get_ylim()) * [1, 1.1])
autolabel(r1, ax)
autolabel(r2, ax)
axs[-1].set_xlabel("PDF type")
axs[0].set_title("catalogues with wrong classifications")
axs[-1].legend()
fig.savefig(filename)
plt.close()
plot_difference_tot(plot_dir + "total.pdf")
plot_difference_individual(["Ibc", "IIn"], plot_dir + "individual.pdf")
| [
"flarestack.analyses.ccsn.necker_2019.ccsn_helpers.updated_sn_catalogue_name",
"os.makedirs",
"matplotlib.use",
"flarestack.shared.plot_output_dir",
"matplotlib.pyplot.close",
"numpy.array",
"os.path.isdir",
"flarestack.analyses.ccsn.necker_2019.ccsn_helpers.pdf_names",
"matplotlib.pyplot.subplots"
... | [((23, 44), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (37, 44), False, 'import matplotlib, os\n'), ((329, 408), 'flarestack.shared.plot_output_dir', 'plot_output_dir', (["(raw_output_dir + '/catalogue_visualization/difference_stasik/')"], {}), "(raw_output_dir + '/catalogue_visualization/difference_stasik/')\n", (344, 408), False, 'from flarestack.shared import plot_output_dir\n'), ((423, 446), 'os.path.isdir', 'os.path.isdir', (['plot_dir'], {}), '(plot_dir)\n', (436, 446), False, 'import matplotlib, os\n'), ((452, 473), 'os.makedirs', 'os.makedirs', (['plot_dir'], {}), '(plot_dir)\n', (463, 473), False, 'import matplotlib, os\n'), ((1406, 1420), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1418, 1420), True, 'import matplotlib.pyplot as plt\n'), ((2176, 2187), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2185, 2187), True, 'import matplotlib.pyplot as plt\n'), ((3442, 3453), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3451, 3453), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1190), 'flarestack.analyses.ccsn.necker_2019.ccsn_helpers.updated_sn_catalogue_name', 'updated_sn_catalogue_name', (['cat'], {'flagged': 'flagged'}), '(cat, flagged=flagged)\n', (1168, 1190), False, 'from flarestack.analyses.ccsn.necker_2019.ccsn_helpers import updated_sn_catalogue_name, sn_cats, sn_times, pdf_names, raw_output_dir\n'), ((1295, 1352), 'flarestack.analyses.ccsn.necker_2019.ccsn_helpers.updated_sn_catalogue_name', 'updated_sn_catalogue_name', (['cat'], {'pdf_name': '"""missed_objects"""'}), "(cat, pdf_name='missed_objects')\n", (1320, 1352), False, 'from flarestack.analyses.ccsn.necker_2019.ccsn_helpers import updated_sn_catalogue_name, sn_cats, sn_times, pdf_names, raw_output_dir\n'), ((1710, 1734), 'numpy.array', 'np.array', (["N['unflagged']"], {}), "(N['unflagged'])\n", (1718, 1734), True, 'import numpy as np\n'), ((1737, 1751), 'numpy.array', 'np.array', (['Nnew'], {}), '(Nnew)\n', (1745, 1751), True, 'import numpy as np\n'), ((2512, 2541), 'flarestack.analyses.ccsn.necker_2019.ccsn_helpers.pdf_names', 'pdf_names', (['pdf_type', 'pdf_time'], {}), '(pdf_type, pdf_time)\n', (2521, 2541), False, 'from flarestack.analyses.ccsn.necker_2019.ccsn_helpers import updated_sn_catalogue_name, sn_cats, sn_times, pdf_names, raw_output_dir\n'), ((2724, 2789), 'flarestack.analyses.ccsn.necker_2019.ccsn_helpers.updated_sn_catalogue_name', 'updated_sn_catalogue_name', (['sn_type'], {'pdf_name': 'cat', 'flagged': 'flagged'}), '(sn_type, pdf_name=cat, flagged=flagged)\n', (2749, 2789), False, 'from flarestack.analyses.ccsn.necker_2019.ccsn_helpers import updated_sn_catalogue_name, sn_cats, sn_times, pdf_names, raw_output_dir\n')] |
import numpy as np
import pytest
from ephysiopy.common import utils
def test_smooth():
x = np.random.rand(100)
x = list(x)
with pytest.raises(ValueError):
utils.smooth(np.atleast_2d(x))
with pytest.raises(ValueError):
utils.smooth(x, window_len=len(x)+1)
utils.smooth(x, window_len=2)
utils.smooth(x, window_len=6)
with pytest.raises(ValueError):
utils.smooth(x, window='deliberate_error')
utils.smooth(x, window='flat')
y = utils.smooth(x, window='hamming')
assert(isinstance(y, np.ndarray))
def test_blur_image(basic_ratemap):
filt = ['box', 'gaussian']
rmap1D = basic_ratemap[0, :]
rmap2D = basic_ratemap
rmap3D = np.atleast_3d(rmap2D)
rmaps = [rmap1D, rmap2D, rmap3D]
b = utils.blurImage(rmap2D, 3, 4)
for f in filt:
for rmap in rmaps:
b = utils.blurImage(rmap, 3, ftype=f)
assert(isinstance(b, np.ndarray))
def test_count_to():
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
n = np.array(n)
with pytest.raises(Exception):
utils.count_to(np.atleast_2d(n))
y = utils.count_to(n)
assert(isinstance(y, np.ndarray))
def test_repeat_ind():
n = [0, 0, 3, 0, 0, 2, 0, 2, 1]
n = np.array(n)
with pytest.raises(Exception):
utils.repeat_ind(np.atleast_2d(n))
res = utils.repeat_ind(n)
assert(isinstance(res, np.ndarray))
def test_rect():
from numpy.random import default_rng
rng = default_rng()
x = rng.vonmises(0, 0.1, 100)
y = rng.vonmises(0, 0.1, 100)
utils.rect(x, y)
r, _ = utils.rect(
np.rad2deg(x),
np.rad2deg(y),
deg=True)
assert(isinstance(r, np.ndarray))
def test_polar():
x = np.random.randint(0, 10, 20)
y = np.random.randint(0, 10, 20)
r, _ = utils.polar(x, y)
assert(isinstance(r, np.ndarray))
r, _ = utils.polar(x, y, deg=True)
assert(isinstance(r, np.ndarray))
def test_bwperim(basic_ratemap):
with pytest.raises(ValueError):
utils.bwperim(basic_ratemap, n=2)
res = utils.bwperim(basic_ratemap, n=8)
assert(isinstance(res, np.ndarray))
| [
"ephysiopy.common.utils.polar",
"ephysiopy.common.utils.bwperim",
"numpy.atleast_2d",
"numpy.random.rand",
"numpy.random.default_rng",
"ephysiopy.common.utils.blurImage",
"ephysiopy.common.utils.smooth",
"ephysiopy.common.utils.repeat_ind",
"numpy.array",
"numpy.random.randint",
"pytest.raises",... | [((97, 116), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (111, 116), True, 'import numpy as np\n'), ((293, 322), 'ephysiopy.common.utils.smooth', 'utils.smooth', (['x'], {'window_len': '(2)'}), '(x, window_len=2)\n', (305, 322), False, 'from ephysiopy.common import utils\n'), ((327, 356), 'ephysiopy.common.utils.smooth', 'utils.smooth', (['x'], {'window_len': '(6)'}), '(x, window_len=6)\n', (339, 356), False, 'from ephysiopy.common import utils\n'), ((448, 478), 'ephysiopy.common.utils.smooth', 'utils.smooth', (['x'], {'window': '"""flat"""'}), "(x, window='flat')\n", (460, 478), False, 'from ephysiopy.common import utils\n'), ((487, 520), 'ephysiopy.common.utils.smooth', 'utils.smooth', (['x'], {'window': '"""hamming"""'}), "(x, window='hamming')\n", (499, 520), False, 'from ephysiopy.common import utils\n'), ((701, 722), 'numpy.atleast_3d', 'np.atleast_3d', (['rmap2D'], {}), '(rmap2D)\n', (714, 722), True, 'import numpy as np\n'), ((768, 797), 'ephysiopy.common.utils.blurImage', 'utils.blurImage', (['rmap2D', '(3)', '(4)'], {}), '(rmap2D, 3, 4)\n', (783, 797), False, 'from ephysiopy.common import utils\n'), ((1007, 1018), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (1015, 1018), True, 'import numpy as np\n'), ((1103, 1120), 'ephysiopy.common.utils.count_to', 'utils.count_to', (['n'], {}), '(n)\n', (1117, 1120), False, 'from ephysiopy.common import utils\n'), ((1228, 1239), 'numpy.array', 'np.array', (['n'], {}), '(n)\n', (1236, 1239), True, 'import numpy as np\n'), ((1328, 1347), 'ephysiopy.common.utils.repeat_ind', 'utils.repeat_ind', (['n'], {}), '(n)\n', (1344, 1347), False, 'from ephysiopy.common import utils\n'), ((1458, 1471), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (1469, 1471), False, 'from numpy.random import default_rng\n'), ((1544, 1560), 'ephysiopy.common.utils.rect', 'utils.rect', (['x', 'y'], {}), '(x, y)\n', (1554, 1560), False, 'from ephysiopy.common import utils\n'), ((1714, 1742), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (1731, 1742), True, 'import numpy as np\n'), ((1751, 1779), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(20)'], {}), '(0, 10, 20)\n', (1768, 1779), True, 'import numpy as np\n'), ((1791, 1808), 'ephysiopy.common.utils.polar', 'utils.polar', (['x', 'y'], {}), '(x, y)\n', (1802, 1808), False, 'from ephysiopy.common import utils\n'), ((1858, 1885), 'ephysiopy.common.utils.polar', 'utils.polar', (['x', 'y'], {'deg': '(True)'}), '(x, y, deg=True)\n', (1869, 1885), False, 'from ephysiopy.common import utils\n'), ((2047, 2080), 'ephysiopy.common.utils.bwperim', 'utils.bwperim', (['basic_ratemap'], {'n': '(8)'}), '(basic_ratemap, n=8)\n', (2060, 2080), False, 'from ephysiopy.common import utils\n'), ((142, 167), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (155, 167), False, 'import pytest\n'), ((217, 242), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (230, 242), False, 'import pytest\n'), ((366, 391), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (379, 391), False, 'import pytest\n'), ((401, 443), 'ephysiopy.common.utils.smooth', 'utils.smooth', (['x'], {'window': '"""deliberate_error"""'}), "(x, window='deliberate_error')\n", (413, 443), False, 'from ephysiopy.common import utils\n'), ((1028, 1052), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1041, 1052), False, 'import pytest\n'), ((1249, 1273), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (1262, 1273), False, 'import pytest\n'), ((1592, 1605), 'numpy.rad2deg', 'np.rad2deg', (['x'], {}), '(x)\n', (1602, 1605), True, 'import numpy as np\n'), ((1615, 1628), 'numpy.rad2deg', 'np.rad2deg', (['y'], {}), '(y)\n', (1625, 1628), True, 'import numpy as np\n'), ((1968, 1993), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1981, 1993), False, 'import pytest\n'), ((2003, 2036), 'ephysiopy.common.utils.bwperim', 'utils.bwperim', (['basic_ratemap'], {'n': '(2)'}), '(basic_ratemap, n=2)\n', (2016, 2036), False, 'from ephysiopy.common import utils\n'), ((190, 206), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (203, 206), True, 'import numpy as np\n'), ((860, 893), 'ephysiopy.common.utils.blurImage', 'utils.blurImage', (['rmap', '(3)'], {'ftype': 'f'}), '(rmap, 3, ftype=f)\n', (875, 893), False, 'from ephysiopy.common import utils\n'), ((1077, 1093), 'numpy.atleast_2d', 'np.atleast_2d', (['n'], {}), '(n)\n', (1090, 1093), True, 'import numpy as np\n'), ((1300, 1316), 'numpy.atleast_2d', 'np.atleast_2d', (['n'], {}), '(n)\n', (1313, 1316), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.