code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os
import numpy as np
import h5py
import multiprocessing
import argparse
def calc_syncParam(file):
'''
function to pass to multiprocessing pool to calculate
order paramter in parallel
'''
print('Reading {f}'.format(f=file.split(os.path.sep)[-2]))
with h5py.File(file) as d:
t_points = d['data']['t_points'][:]
t_epr = np.where(t_points > 10)[0]
nCompartments = d['params']['nCompartments'][()]
nSim = d['params']['nSim'][()]
phases = np.zeros((nSim, len(t_epr), nCompartments))
syncParam = np.zeros((nSim, len(t_epr)))
# s = np.zeros(nSim)
# nt, nx = d['data']['trajs'][0, 0, t_epr, :].shape
# rhos = np.zeros((nSim, nt - (nt + 1) % 2, nx - (nx + 1) % 2))
for traj_index, traj in enumerate(d['data']['trajs'][..., t_epr, :]):
delta_x = traj[0] - traj[0].mean()
delta_y = traj[1] - traj[1].mean()
# phases[traj_index] = np.unwrap(np.arctan2(delta_y, delta_x), axis=0)
phases[traj_index] = np.arctan2(delta_y, delta_x)
syncParam[traj_index] = np.abs(np.mean(np.exp(1j * phases[traj_index]), axis=1))
if '/data/phases' in d:
del d['data']['phases']
d['data'].create_dataset('phases', data=phases)
if '/data/syncParam' in d:
del d['data']['syncParam']
d['data'].create_dataset('syncParam', data=syncParam)
return phases, syncParam
parser = argparse.ArgumentParser()
parser.add_argument('files', type=str, nargs='+',
help='files to calculate entropy for')
args = parser.parse_args()
files = args.files
if len(files) < 4:
nProcesses = len(files)
else:
nProcesses = 4
print('Calculating synchronization order parameters...')
with multiprocessing.Pool(processes=nProcesses) as pool:
result = pool.map(calc_syncParam, files)
print('Done.')
| [
"argparse.ArgumentParser",
"numpy.where",
"h5py.File",
"numpy.exp",
"numpy.arctan2",
"multiprocessing.Pool"
] | [((1477, 1502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1500, 1502), False, 'import argparse\n'), ((1795, 1837), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'nProcesses'}), '(processes=nProcesses)\n', (1815, 1837), False, 'import multiprocessing\n'), ((285, 300), 'h5py.File', 'h5py.File', (['file'], {}), '(file)\n', (294, 300), False, 'import h5py\n'), ((367, 390), 'numpy.where', 'np.where', (['(t_points > 10)'], {}), '(t_points > 10)\n', (375, 390), True, 'import numpy as np\n'), ((1052, 1080), 'numpy.arctan2', 'np.arctan2', (['delta_y', 'delta_x'], {}), '(delta_y, delta_x)\n', (1062, 1080), True, 'import numpy as np\n'), ((1132, 1165), 'numpy.exp', 'np.exp', (['(1.0j * phases[traj_index])'], {}), '(1.0j * phases[traj_index])\n', (1138, 1165), True, 'import numpy as np\n')] |
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from scipy import optimize, stats
def impulse(t, h0, h1p, h2, t1, t2, beta):
''' A parametric impulse function defined by two conjoined sigmoids.
See ImpulseDE2 paper.
'''
return 1. / (h1p + 1) * \
(h0 + (h1p - h0) / (1. + np.exp(-np.exp(beta) * (t - t1)))) * \
(h2 + (h1p - h2) / (1. + np.exp( np.exp(beta) * (t - t2))))
def make_p0(run_data):
''' Reasonable parameter initialization for the impulse model.
'''
h0_init = run_data.loc[run_data.hour.argmin(), 'expr']
peak = run_data.loc[run_data.expr.argmax()]
h1_init = peak['expr']
h2_init = run_data.loc[run_data.hour.argmax(), 'expr']
t1_init = (run_data['hour'].min() + peak['hour']) / 2
t2_init = (run_data['hour'].max() + peak['hour']) / 2
beta_init = -1.
set_start_values = (0., h1_init, 0., t1_init, t2_init, beta_init)
return set_start_values
def make_run_data(gene, t, expression_matrix):
return pd.DataFrame({
'hour': t,
'expr': expression_matrix.loc[gene]
})
# Helpers for the optimize.leastsq function
def func(p, x, y):
return impulse(x, *p)
def residuals(p, x, y):
return func(p, x, y) - y
def impulse_tests(t, expression_matrix, maxfev=50):
''' Least squares version of the ImpulseDE2 test
t : A Series or vector with time values
expresion_matrix: Assume columns are genes and rows are sampels
maxfev: Maximum number of function evaluations per gene, higher numbers
give better accuracy at the cost of speed. Defualt is 50.
'''
t = np.array(t)
n = expression_matrix.shape[1]
gene_params = pd.DataFrame(index=expression_matrix.index,
columns=['h0', 'h1', 'h2', 't1', 't2', 'beta', \
'llr', 'pval', 'res_alt', 'res_null'])
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
for gene in tqdm(expression_matrix.index):
run_data = make_run_data(gene, t, expression_matrix)
opt_res = optimize.leastsq(residuals,
make_p0(run_data),
args=(t, run_data.expr.values),
maxfev=maxfev)
gene_params.loc[gene, ['h0', 'h1', 'h2', 't1', 't2', 'beta']] = opt_res[0]
res_alt = np.sum(np.square(residuals(opt_res[0], t, run_data.expr.values)))
gene_params.loc[gene, 'res_alt'] = res_alt
ll_alt = -n / 2. * np.log(2 * np.pi) - n / 2. * np.log(res_alt / n) - n / 2.
res_null = np.sum(np.square(run_data.expr.values.mean() - run_data.expr.values))
gene_params.loc[gene, 'res_null'] = res_null
ll_null = -n / 2. * np.log(2 * np.pi) - n / 2. * np.log(res_null / n) - n / 2.
gene_params.loc[gene, 'llr'] = ll_alt - ll_null
gene_params['llr'] = gene_params['llr'].replace(-np.inf, 0).fillna(0)
gene_params['pval'] = stats.chi2.sf(2 * gene_params['llr'], df=5)
return gene_params
| [
"tqdm.tqdm",
"warnings.catch_warnings",
"numpy.log",
"numpy.exp",
"numpy.array",
"scipy.stats.chi2.sf",
"warnings.simplefilter",
"pandas.DataFrame"
] | [((1044, 1106), 'pandas.DataFrame', 'pd.DataFrame', (["{'hour': t, 'expr': expression_matrix.loc[gene]}"], {}), "({'hour': t, 'expr': expression_matrix.loc[gene]})\n", (1056, 1106), True, 'import pandas as pd\n'), ((1674, 1685), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1682, 1685), True, 'import numpy as np\n'), ((1740, 1873), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'expression_matrix.index', 'columns': "['h0', 'h1', 'h2', 't1', 't2', 'beta', 'llr', 'pval', 'res_alt', 'res_null']"}), "(index=expression_matrix.index, columns=['h0', 'h1', 'h2', 't1',\n 't2', 'beta', 'llr', 'pval', 'res_alt', 'res_null'])\n", (1752, 1873), True, 'import pandas as pd\n'), ((3133, 3176), 'scipy.stats.chi2.sf', 'stats.chi2.sf', (["(2 * gene_params['llr'])"], {'df': '(5)'}), "(2 * gene_params['llr'], df=5)\n", (3146, 3176), False, 'from scipy import optimize, stats\n'), ((1953, 1978), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1976, 1978), False, 'import warnings\n'), ((1988, 2044), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (2009, 2044), False, 'import warnings\n'), ((2065, 2094), 'tqdm.tqdm', 'tqdm', (['expression_matrix.index'], {}), '(expression_matrix.index)\n', (2069, 2094), False, 'from tqdm import tqdm\n'), ((2658, 2675), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2664, 2675), True, 'import numpy as np\n'), ((2687, 2706), 'numpy.log', 'np.log', (['(res_alt / n)'], {}), '(res_alt / n)\n', (2693, 2706), True, 'import numpy as np\n'), ((2911, 2928), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2917, 2928), True, 'import numpy as np\n'), ((2940, 2960), 'numpy.log', 'np.log', (['(res_null / n)'], {}), '(res_null / n)\n', (2946, 2960), True, 'import numpy as np\n'), ((414, 426), 'numpy.exp', 'np.exp', (['beta'], {}), '(beta)\n', (420, 426), True, 'import numpy as np\n'), ((339, 351), 'numpy.exp', 'np.exp', (['beta'], {}), '(beta)\n', (345, 351), True, 'import numpy as np\n')] |
from univariate._base import Base
import matplotlib.pyplot as plt
import numpy as np
def plot(Distribution:Base, x:np.ndarray)->None:
# plot multiple versions of the distirbution in
# different parameterization. Annotate vaules on
# legend and include annotations on the image.
plt.plot(x,Distribution.pdf(x))
plt.savefig('../docs/img/' + type(Distribution).__name__ + 'PDF.png')
# unpack arguments and set constructor values on tuple
if __name__ == "__main__":
from univariate.Infinite import Gaussian
N = Gaussian()
plot(N, np.linspace(-1,1,1000)) | [
"univariate.Infinite.Gaussian",
"numpy.linspace"
] | [((556, 566), 'univariate.Infinite.Gaussian', 'Gaussian', ([], {}), '()\n', (564, 566), False, 'from univariate.Infinite import Gaussian\n'), ((580, 604), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (591, 604), True, 'import numpy as np\n')] |
# Copyright 2020 FZI Forschungszentrum Informatik
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
import random
class RandomAgent(object):
"""Basic random agent for DeepMind Lab."""
def __init__(self, action_spec, forbidden_actions=[]):
self.action_spec = action_spec
self.action_count = len(action_spec)
self.forbidden_actions = forbidden_actions
self.prev_action = None
def step(self):
"""Choose a random amount of a randomly selected action."""
action_choice = None
while action_choice is None:
action_choice = random.randint(0, self.action_count - 1)
if self.action_spec[action_choice]['name'] in self.forbidden_actions:
action_choice = None
action_amount = random.randint(self.action_spec[action_choice]['min'],
self.action_spec[action_choice]['max'])
action = np.zeros([self.action_count], dtype=np.intc)
action[action_choice] = action_amount
return action
def reset(self):
self.prev_action = None
class RodentDynamicModel(object):
def __init__(self, min_x, min_y, max_x, max_y):
self.size_x = max_x - min_x
self.size_y = max_y - min_y
self.mu_x = min_x + self.size_x/2.
self.mu_y = min_y + self.size_y/2.
self.cov = np.diag([(self.size_x/3.)**2.0, (self.size_y/3.)**2.])
def step(self):
x, y = np.random.multivariate_normal(
[self.mu_x, self.mu_y], self.cov, (1, 2))
class GaussRandomAgent(object):
def __init__(self, action_spec, forbidden_actions=[]):
self.action_spec = action_spec
self.action_count = len(action_spec)
self.forbidden_actions = forbidden_actions
self.max_speed = 1.
self.max_rotation = 512
self.reset()
def step(self, ego_vel_trans, ego_vel_rot):
"""Choose a random amount of a randomly selected action."""
action = np.zeros([self.action_count], dtype=np.intc)
speed = np.random.choice([-1, 0, 1], p=[0.05, 0.25, 0.7])
action[3] = speed
left_right_pixel = max(min(self.max_rotation * np.random.normal(0.0, 0.2), self.max_rotation), -self.max_rotation)
# left_right_pixel = self.max_rotation * np.random.vonmises(
# ego_vel_trans * self.max_rotation, 1.)
action[0] = left_right_pixel + 0.2 * self.prev_action[0]
self.prev_action_count += 1
self.prev_action = action
return action
def reset(self):
self.prev_action_count = 0
self.prev_action = np.zeros([self.action_count], dtype=np.intc)
self.prev_amount = None
| [
"numpy.random.normal",
"numpy.random.multivariate_normal",
"numpy.random.choice",
"numpy.diag",
"numpy.zeros",
"random.randint"
] | [((1407, 1506), 'random.randint', 'random.randint', (["self.action_spec[action_choice]['min']", "self.action_spec[action_choice]['max']"], {}), "(self.action_spec[action_choice]['min'], self.action_spec[\n action_choice]['max'])\n", (1421, 1506), False, 'import random\n'), ((1558, 1602), 'numpy.zeros', 'np.zeros', (['[self.action_count]'], {'dtype': 'np.intc'}), '([self.action_count], dtype=np.intc)\n', (1566, 1602), True, 'import numpy as np\n'), ((1990, 2055), 'numpy.diag', 'np.diag', (['[(self.size_x / 3.0) ** 2.0, (self.size_y / 3.0) ** 2.0]'], {}), '([(self.size_x / 3.0) ** 2.0, (self.size_y / 3.0) ** 2.0])\n', (1997, 2055), True, 'import numpy as np\n'), ((2081, 2152), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['[self.mu_x, self.mu_y]', 'self.cov', '(1, 2)'], {}), '([self.mu_x, self.mu_y], self.cov, (1, 2))\n', (2110, 2152), True, 'import numpy as np\n'), ((2629, 2673), 'numpy.zeros', 'np.zeros', (['[self.action_count]'], {'dtype': 'np.intc'}), '([self.action_count], dtype=np.intc)\n', (2637, 2673), True, 'import numpy as np\n'), ((2690, 2739), 'numpy.random.choice', 'np.random.choice', (['[-1, 0, 1]'], {'p': '[0.05, 0.25, 0.7]'}), '([-1, 0, 1], p=[0.05, 0.25, 0.7])\n', (2706, 2739), True, 'import numpy as np\n'), ((3255, 3299), 'numpy.zeros', 'np.zeros', (['[self.action_count]'], {'dtype': 'np.intc'}), '([self.action_count], dtype=np.intc)\n', (3263, 3299), True, 'import numpy as np\n'), ((1223, 1263), 'random.randint', 'random.randint', (['(0)', '(self.action_count - 1)'], {}), '(0, self.action_count - 1)\n', (1237, 1263), False, 'import random\n'), ((2823, 2849), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.2)'], {}), '(0.0, 0.2)\n', (2839, 2849), True, 'import numpy as np\n')] |
#!python3
import numpy as np
import matplotlib.pyplot as plt
def set_aspect(ax, ratio):
xleft, xright = ax.get_xlim()
ybottom, ytop = ax.get_ylim()
ax.set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
def main():
hour = np.arange(24)
power = np.array([1.24,1.82,1.32,1.57,1.72,1.84,1.99,5.3,6.49,3.98,3.35,2.98,2.09,4.76,2.51,2.46,3.47,6.02,7.52,7.08,5,4.08,1.63,0.41])
plt.grid(True)
plt.plot(hour, power, '.:')
plt.xticks(np.arange(0, 23+1, 2.0))
plt.xlabel("Time of day [hour]")
plt.ylabel("Average electric power [kW]")
set_aspect(plt.gca(), 0.5)
plt.savefig('hourly_power.png', dpi=300, bbox_inches='tight')
plt.show()
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((230, 243), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (239, 243), True, 'import numpy as np\n'), ((253, 408), 'numpy.array', 'np.array', (['[1.24, 1.82, 1.32, 1.57, 1.72, 1.84, 1.99, 5.3, 6.49, 3.98, 3.35, 2.98, \n 2.09, 4.76, 2.51, 2.46, 3.47, 6.02, 7.52, 7.08, 5, 4.08, 1.63, 0.41]'], {}), '([1.24, 1.82, 1.32, 1.57, 1.72, 1.84, 1.99, 5.3, 6.49, 3.98, 3.35, \n 2.98, 2.09, 4.76, 2.51, 2.46, 3.47, 6.02, 7.52, 7.08, 5, 4.08, 1.63, 0.41])\n', (261, 408), True, 'import numpy as np\n'), ((382, 396), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (390, 396), True, 'import matplotlib.pyplot as plt\n'), ((398, 425), 'matplotlib.pyplot.plot', 'plt.plot', (['hour', 'power', '""".:"""'], {}), "(hour, power, '.:')\n", (406, 425), True, 'import matplotlib.pyplot as plt\n'), ((464, 496), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time of day [hour]"""'], {}), "('Time of day [hour]')\n", (474, 496), True, 'import matplotlib.pyplot as plt\n'), ((498, 539), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Average electric power [kW]"""'], {}), "('Average electric power [kW]')\n", (508, 539), True, 'import matplotlib.pyplot as plt\n'), ((569, 630), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""hourly_power.png"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('hourly_power.png', dpi=300, bbox_inches='tight')\n", (580, 630), True, 'import matplotlib.pyplot as plt\n'), ((632, 642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (640, 642), True, 'import matplotlib.pyplot as plt\n'), ((438, 463), 'numpy.arange', 'np.arange', (['(0)', '(23 + 1)', '(2.0)'], {}), '(0, 23 + 1, 2.0)\n', (447, 463), True, 'import numpy as np\n'), ((552, 561), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (559, 561), True, 'import matplotlib.pyplot as plt\n')] |
import typing as t
import numpy as np
from gym.spaces import Box
def box_action_scaler(action_space: Box) -> t.Callable[[np.ndarray], np.ndarray]:
shift = action_space.low
scale = action_space.high - action_space.low
return lambda action: scale / (1.0 + np.exp(-action)) + shift
| [
"numpy.exp"
] | [((269, 284), 'numpy.exp', 'np.exp', (['(-action)'], {}), '(-action)\n', (275, 284), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Compare automatic block id using "training" set from human transcription
#
import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser
from glob import glob
from pathlib import Path
from psycopg2.extras import RealDictCursor
from time import localtime, strftime
from fuzzywuzzy import fuzz
import pandas as pd
from datetime import date
from tqdm import tqdm
import numpy as np
from multiprocessing import Pool
ver = "0.2.1"
##Import settings from settings.py file
import settings
############################################
# Logging
############################################
if not os.path.exists('logs'):
os.makedirs('logs')
current_time = strftime("%Y%m%d%H%M%S", localtime())
logfile_name = 'comparison_{}.log'.format(current_time)
logfile = 'logs/{logfile_name}'.format(logfile_name = logfile_name)
# from http://stackoverflow.com/a/9321890
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logger1 = logging.getLogger("compare")
logging.getLogger('compare').addHandler(console)
logger1.info("compare version {}".format(ver))
############################################
#OCR Database
conn = psycopg2.connect(host = settings.ocr_host, database = settings.ocr_db, user = settings.ocr_user, password = settings.ocr_password, connect_timeout = 60)
conn.autocommit = True
db_cursor = conn.cursor(cursor_factory=RealDictCursor)
query_transcription = """
SELECT
DISTINCT collector as data,
'collector' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT verbatim_date as data,
'verbatim_date' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT verbatim_locality as data,
'verbatim_locality' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT country as data,
'country' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT state_territory as data,
'state_territory' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT district_county as data,
'district_county' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT precise_locality as data,
'precise_locality' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
UNION
SELECT
DISTINCT elevation as data,
'elevation' as field
FROM
ocr_transcription_ento
WHERE
replace(filename, '.jpg', '') IN (
SELECT
filename
FROM
ocr_auto_sample
WHERE
reference_size = %(refsize)s AND
ref_or_test = 'ref'
)
"""
query_test = """
SELECT
b.document_id,
replace(d.filename, '.jpg', '') as filename,
b.block::text,
string_agg(a.word_text, ' ') as block_text
FROM
ocr_blocks b,
ocr_documents d,
(
SELECT
document_id,
block,
word_line,
word,
word_text
FROM
ocr_entries
WHERE
document_id IN
(
SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
(
SELECT
filename
FROM
ocr_auto_sample
WHERE
ref_or_test = 'test' AND
reference_size = %(refsize)s
)
)
ORDER BY
page, block, word_line, word
) a
WHERE
d.document_id = b.document_id AND
a.document_id = b.document_id AND
a.block = b.block AND
b.confidence >= 0.85
GROUP BY
b.document_id,
b.block,
d.filename
"""
# db_cursor.execute("DELETE FROM ocr_transcription_ento_auto")
# db_cursor.execute("VACUUM ocr_transcription_ento_auto")
# #for refsize in ['0.05', '0.1', '0.2', '0.3', '0.4', '0.5']:
# for refsize in ['0.05', '0.1', '0.2']:
# print(refsize)
# db_cursor.execute(query_transcription, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# transcription_data = pd.DataFrame(db_cursor.fetchall())
# db_cursor.execute(query_test, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# test_data = pd.DataFrame(db_cursor.fetchall())
# for data_type in transcription_data['field'].unique():
# print("Processing {}...\n".format(data_type))
# for index, record in test_data.iterrows():
# #split string into all possible sequences
# logger1.debug(record['block_text'])
# block_text = record['block_text'].split(' ')
# len_block_text = len(block_text)
# text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# for i in range(len_block_text-1):
# for j in range(i+1, len_block_text):
# #print(i, j)
# this_text = ' '.join(block_text[i:j])
# #Get alpha chars only
# alpha_block = re.sub(r'\W+ ,-/', '', this_text)
# #Add space after periods
# alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
# if len(alpha_block) > 3:
# #print(this_text)
# text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# logger1.debug(this_text)
# results_df = pd.DataFrame(columns=('data', 'field', 'text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# for ind, rcrd in text_to_test.iterrows():
# #tr_data = transcription_data.copy()
# tr_data = transcription_data[transcription_data.field == data_type].copy()
# tr_data['score1'] = tr_data.apply(lambda row : fuzz.partial_ratio(rcrd['text'].lower(), row['data'].lower()), axis = 1)
# tr_data['score2'] = tr_data.apply(lambda row : fuzz.ratio(rcrd['text'].lower(), row['data'].lower()), axis = 1)
# tr_data['score'] = tr_data.apply(lambda row : row['score1'] + row['score2'], axis = 1).astype(int)
# tr_data['score3'] = tr_data.apply(lambda row : fuzz.token_set_ratio(rcrd['text'].lower(), row['data'].lower()), axis = 1)
# tr_data['text'] = rcrd['text']
# tr_data['string_len'] = rcrd['string_len']
# results_df = results_df.append(tr_data)
# results_df['score'] = pd.to_numeric(results_df['score'])
# results_df['score3'] = pd.to_numeric(results_df['score3'])
# results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# res = results_df.nlargest(1, ['score', 'string_len'])
# if res.shape[0] > 0:
# if res.iloc[0]['score'] > settings.insert_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = res.iloc[0]['field']), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# logger1.info(db_cursor.query.decode("utf-8"))
# else:
# #Check for token_set_ratio
# max_score = results_df['score3'].max()
# res_top = results_df[results_df.score3 == max_score]
# #Choose string with the least number of words that has the max score
# res = results_df.nsmallest(1, 'string_len')
# if res.shape[0] > 0:
# if res.iloc[0]['score3'] > settings.token_set_ratio_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = res.iloc[0]['field']), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# logger1.info(db_cursor.query.decode("utf-8"))
# #Cleanup
# for refsize in ['0.05', '0.1', '0.2']:
# db_cursor.execute(query_transcription, {'refsize': refsize})
# transcription_data = pd.DataFrame(db_cursor.fetchall())
# for data_type in transcription_data['field'].unique():
# db_cursor.execute("UPDATE ocr_transcription_ento_auto SET {field} = REPLACE({field}, '. , ', '., ')".format(field = data_type))
# logger1.info(db_cursor.query.decode("utf-8"))
##################
#GIS database
conn2 = psycopg2.connect(host = settings.gis_host, database = settings.gis_db, user = settings.gis_user, password = settings.gis_password, connect_timeout = 60)
db_cursor2 = conn2.cursor(cursor_factory=RealDictCursor)
# #Get state/provinces from GIS database
# db_cursor2.execute("SELECT name_1 as name, name_0 as country, 'locality:state' as name_type, uid FROM gadm1")
# states = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# #Get countries from GIS database
db_cursor2.execute("SELECT name_0 as name, 'locality:country' as name_type, uid FROM gadm0")
countries = pd.DataFrame(db_cursor2.fetchall())
logger1.debug(db_cursor2.query.decode("utf-8"))
# #Get counties, state
# db_cursor2.execute("SELECT name_2 || ' Co., ' || name_1 as name, 'locality:county' as name_type, uid FROM gadm2 WHERE name_0 = 'United States' AND type_2 = 'County'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = pd.DataFrame(counties)
# db_cursor2.execute("SELECT name_2 || ' ' || type_2 || ', ' || name_1 as name, 'locality:county' as name_type, uid FROM gadm2 WHERE name_0 = 'United States'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = counties_list.append(counties, ignore_index=True)
# db_cursor2.execute("SELECT DISTINCT g.name_2 || ', ' || s.abbreviation as name, 'locality:county' as name_type, g.uid FROM gadm2 g, us_state_abbreviations s WHERE g.name_1 = s.state AND g.name_0 = 'United States'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = counties_list.append(counties, ignore_index=True)
# db_cursor2.execute("SELECT DISTINCT g.name_2 || ' Co., ' || s.abbreviation as name, 'locality:county' as name_type, g.name_1 AS state, g.name_0 as country, g.uid FROM gadm2 g, us_state_abbreviations s WHERE g.name_1 = s.state AND g.name_0 = 'United States'")
# counties = pd.DataFrame(db_cursor2.fetchall())
# logger1.debug(db_cursor2.query.decode("utf-8"))
# counties_list = counties_list.append(counties, ignore_index=True)
# #Close GIS database connection
db_cursor2.close()
conn2.close()
# ##################
# db_cursor.execute("DROP TABLE ocr_transcription_ento_auto_geo")
# db_cursor.execute("CREATE TABLE ocr_transcription_ento_auto_geo AS SELECT * FROM ocr_transcription_ento_auto")
# db_cursor.execute("ALTER TABLE ocr_transcription_ento_auto_geo ADD CONSTRAINT ocr_tra_ento_auto_geo_c UNIQUE (filename, reference_size)")
# #country
query_country = """
SELECT
b.document_id,
replace(d.filename, '.jpg', '') as filename,
b.block::text,
string_agg(a.word_text, ' ') as block_text
FROM
ocr_blocks b,
ocr_documents d,
(
SELECT
document_id,
block,
word_line,
word,
word_text
FROM
ocr_entries
WHERE
document_id IN
(
SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
(
SELECT
filename
FROM
ocr_auto_sample
WHERE
ref_or_test = 'test' AND
reference_size = %(refsize)s
)
)
ORDER BY
page, block, word_line, word
) a
WHERE
d.document_id = b.document_id AND
a.document_id = b.document_id AND
a.block = b.block AND
b.confidence >= 0.85
GROUP BY
b.document_id,
b.block,
d.filename
"""
# query_state = """
# SELECT
# b.document_id,
# replace(d.filename, '.jpg', '') as filename,
# b.block::text,
# string_agg(a.word_text, ' ') as block_text
# FROM
# ocr_blocks b,
# ocr_documents d,
# (
# SELECT
# document_id,
# block,
# word_line,
# word,
# word_text
# FROM
# ocr_entries
# WHERE
# document_id IN
# (
# SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
# (
# SELECT
# filename
# FROM
# ocr_auto_sample
# WHERE
# ref_or_test = 'test' AND
# reference_size = %(refsize)s
# )
# )
# ORDER BY
# page, block, word_line, word
# ) a
# WHERE
# d.document_id = b.document_id AND
# a.document_id = b.document_id AND
# a.block = b.block AND
# b.confidence >= 0.85
# GROUP BY
# b.document_id,
# b.block,
# d.filename
# """
# query_county = """
# SELECT
# b.document_id,
# replace(d.filename, '.jpg', '') as filename,
# b.block::text,
# string_agg(a.word_text, ' ') as block_text
# FROM
# ocr_blocks b,
# ocr_documents d,
# (
# SELECT
# document_id,
# block,
# word_line,
# word,
# word_text
# FROM
# ocr_entries
# WHERE
# document_id IN
# (
# SELECT document_id FROM ocr_documents WHERE replace(filename, '.jpg', '') IN
# (
# SELECT
# filename
# FROM
# ocr_auto_sample
# WHERE
# ref_or_test = 'test' AND
# reference_size = %(refsize)s
# )
# )
# ORDER BY
# page, block, word_line, word
# ) a
# WHERE
# d.document_id = b.document_id AND
# a.document_id = b.document_id AND
# a.block = b.block AND
# b.confidence >= 0.85
# GROUP BY
# b.document_id,
# b.block,
# d.filename
# """
def match_country(this_record):
try:
record = this_record.iloc[0]
except:
return
logger1.debug(record['block_text'])
block_text = record['block_text'].split(' ')
len_block_text = len(block_text)
text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
for i in range(len_block_text-1):
for j in range(i+1, len_block_text):
#print(i, j)
#this_text = ' '.join(block_text[i:j])
this_text = ' '.join(map(str, block_text[i:j]))
alpha_block = re.sub(r'\W+ ,-/', '', this_text)
#Add space after periods
alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
if len(alpha_block) > 3:
#print(this_text)
text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
logger1.debug(this_text)
results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
results_df['score2'] = pd.to_numeric(results_df['score2'])
results_df['string_len'] = pd.to_numeric(results_df['string_len'])
for idx, rcrd in text_to_test.iterrows():
tr_data = countries[['name', 'uid']].copy()
tr_data['score2'] = tr_data.apply(lambda row : fuzz.ratio(rcrd['text'].lower(), row['name'].lower()), axis = 1)
tr_data['text'] = rcrd['text']
tr_data['string_len'] = rcrd['string_len']
results_df = results_df.append(tr_data)
res = results_df.nlargest(1, ['score2', 'string_len'])
if res.shape[0] > 0:
logger1.info(res)
if res.iloc[0]['score2'] > settings.geo_min:
db_cursor.execute("INSERT INTO ocr_transcription_ento_auto (filename, {field}, reference_size) VALUES (%(filename)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = 'country'), {'filename': record['filename'], 'text': res.iloc[0]['name'], 'reference_size': refsize})
logger1.info(db_cursor.query.decode("utf-8"))
return res
#match_country(record)
#Check for country
#for refsize in ['0.05', '0.1', '0.2', '0.3', '0.4', '0.5']:
for refsize in ['0.05', '0.1', '0.2']:
print(refsize)
db_cursor.execute(query_country, {'refsize': refsize})
logger1.debug(db_cursor.query.decode("utf-8"))
test_data = pd.DataFrame(db_cursor.fetchall())
df_split = np.array_split(test_data, test_data.size)
pool = Pool(settings.pool_workers)
df = pd.concat(pool.map(match_country, df_split))
pool.close()
pool.join()
# # for index, record in test_data.iterrows():
# # #split string into all possible sequences
# # logger1.info(record['block_text'])
# # block_text = record['block_text'].split(' ')
# # len_block_text = len(block_text)
# # text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# # for i in range(len_block_text-1):
# # for j in range(i+1, len_block_text):
# # #print(i, j)
# # this_text = ' '.join(block_text[i:j])
# # alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
# # if len(alpha_block) > 3:
# # #print(this_text)
# # text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# # logger1.debug(this_text)
# # results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# # results_df['score2'] = pd.to_numeric(results_df['score2'])
# # results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# # for idx, rcrd in text_to_test.iterrows():
# # tr_data = countries[['name', 'uid']].copy()
# # tqdm.pandas()
# # tr_data['score2'] = tr_data.progress_apply(lambda row : fuzz.ratio(rcrd['text'], row['name']), axis = 1)
# # tr_data['text'] = rcrd['text']
# # tr_data['string_len'] = rcrd['string_len']
# # results_df = results_df.append(tr_data)
# # res = results_df.nlargest(1, ['score2', 'string_len'])
# # if res.shape[0] > 0:
# # print(res)
# # if res.iloc[0]['score'] > settings.geo_min:
# # db_cursor.execute("INSERT INTO ocr_transcription_ento_auto_geo (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = 'country'), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# # logger1.info(db_cursor.query.decode("utf-8"))
# def match_state(record):
# logger1.debug(record['block_text'])
# block_text = record['block_text'].str.split(' ').tolist()[0]
# len_block_text = len(block_text)
# text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# for i in range(len_block_text-1):
# for j in range(i+1, len_block_text):
# #print(i, j)
# #this_text = ' '.join(block_text[i:j])
# this_text = ' '.join(map(str, block_text[i:j]))
# alpha_block = re.sub(r'\W+ ,-/', '', this_text)
# #Add space after periods
# alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
# if len(alpha_block) > 3:
# #print(this_text)
# text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# logger1.debug(this_text)
# results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# results_df['score2'] = pd.to_numeric(results_df['score2'])
# results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# for idx, rcrd in text_to_test.iterrows():
# tr_data = states[['name', 'uid', 'country']].copy()
# tr_data['score2'] = tr_data.apply(lambda row : fuzz.ratio(rcrd['text'], row['name']), axis = 1)
# tr_data['text'] = rcrd['text']
# tr_data['string_len'] = rcrd['string_len']
# results_df = results_df.append(tr_data)
# res = results_df.nlargest(1, ['score2', 'string_len'])
# if res.shape[0] > 0:
# logger1.info(res)
# if res.iloc[0]['score'] > settings.geo_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto_geo (filename, state_territory, country, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s", {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize, 'country': res.iloc[0]['country']})
# logger1.info(db_cursor.query.decode("utf-8"))
# return res
# #Check for state/province
# #for refsize in ['0.05', '0.1', '0.2', '0.3', '0.4', '0.5']:
# for refsize in ['0.05', '0.1', '0.2']:
# print(refsize)
# db_cursor.execute(query_state, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# test_data = pd.DataFrame(db_cursor.fetchall())
# df_split = np.array_split(test_data, settings.pool_workers * 4)
# pool = Pool(settings.pool_workers)
# df = pd.concat(pool.map(match_state, df_split))
# pool.close()
# pool.join()
# # for index, record in test_data.iterrows():
# # #split string into all possible sequences
# # logger1.info(record['block_text'])
# # block_text = record['block_text'].split(' ')
# # len_block_text = len(block_text)
# # text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# # for i in range(len_block_text-1):
# # for j in range(i+1, len_block_text):
# # #print(i, j)
# # this_text = ' '.join(block_text[i:j])
# # alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
# # if len(alpha_block) > 3:
# # #print(this_text)
# # text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# # logger1.debug(this_text)
# # results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# # results_df['score2'] = pd.to_numeric(results_df['score2'])
# # results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# # for idx, rcrd in text_to_test.iterrows():
# # tr_data = states[['name', 'uid']].copy()
# # tqdm.pandas()
# # tr_data['score2'] = tr_data.progress_apply(lambda row : fuzz.ratio(rcrd['text'], row['name']), axis = 1)
# # tr_data['text'] = rcrd['text']
# # tr_data['string_len'] = rcrd['string_len']
# # results_df = results_df.append(tr_data)
# # res = results_df.nlargest(1, ['score2', 'string_len'])
# # if res.shape[0] > 0:
# # print(res)
# # if res.iloc[0]['score'] > settings.geo_min:
# # db_cursor.execute("INSERT INTO ocr_transcription_ento_auto_geo (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = 'state_territory'), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# # logger1.info(db_cursor.query.decode("utf-8"))
# def match_county(record):
# logger1.debug(record['block_text'])
# block_text = record['block_text'].str.split(' ').tolist()[0]
# len_block_text = len(block_text)
# text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# for i in range(len_block_text-1):
# for j in range(i+1, len_block_text):
# #print(i, j)
# #this_text = ' '.join(block_text[i:j])
# this_text = ' '.join(map(str, block_text[i:j]))
# alpha_block = re.sub(r'\W+ ,-/', '', this_text)
# #Add space after periods
# alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
# if len(alpha_block) > 3:
# #print(this_text)
# text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# logger1.debug(this_text)
# results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# results_df['score2'] = pd.to_numeric(results_df['score2'])
# results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# for idx, rcrd in text_to_test.iterrows():
# tr_data = counties_list[['name', 'uid', 'state', 'country']].copy()
# tr_data['score2'] = tr_data.apply(lambda row : fuzz.ratio(rcrd['text'], row['name']), axis = 1)
# tr_data['text'] = rcrd['text']
# tr_data['string_len'] = rcrd['string_len']
# results_df = results_df.append(tr_data)
# res = results_df.nlargest(1, ['score2', 'string_len'])
# if res.shape[0] > 0:
# logger1.info(res)
# if res.iloc[0]['score'] > settings.geo_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto_geo (filename, district_county, state_territory, country, reference_size) VALUES (%(document_id)s, %(text)s, %(state)s, %(country)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s", {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize, 'state': res.iloc[0]['state'], 'country': res.iloc[0]['country']})
# logger1.info(db_cursor.query.decode("utf-8"))
# return res
# #Check for Counties_list
# #for refsize in ['0.05', '0.1', '0.2', '0.3', '0.4', '0.5']:
# for refsize in ['0.05', '0.1', '0.2']:
# print(refsize)
# db_cursor.execute(query_county, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# test_data = pd.DataFrame(db_cursor.fetchall())
# df_split = np.array_split(test_data, settings.pool_workers * 4)
# pool = Pool(settings.pool_workers)
# df = pd.concat(pool.map(match_state, df_split))
# pool.close()
# pool.join()
# # for index, record in test_data.iterrows():
# # #split string into all possible sequences
# # logger1.info(record['block_text'])
# # block_text = record['block_text'].split(' ')
# # len_block_text = len(block_text)
# # text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# # for i in range(len_block_text-1):
# # for j in range(i+1, len_block_text):
# # #print(i, j)
# # this_text = ' '.join(block_text[i:j])
# # alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
# # if len(alpha_block) > 3:
# # #print(this_text)
# # text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# # logger1.debug(this_text)
# # results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# # results_df['score2'] = pd.to_numeric(results_df['score2'])
# # results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# # for idx, rcrd in text_to_test.iterrows():
# # tr_data = counties_list[['name', 'uid']].copy()
# # tqdm.pandas()
# # tr_data['score2'] = tr_data.progress_apply(lambda row : fuzz.ratio(rcrd['text'], row['name']), axis = 1)
# # tr_data['text'] = rcrd['text']
# # tr_data['string_len'] = rcrd['string_len']
# # results_df = results_df.append(tr_data)
# # res = results_df.nlargest(1, ['score2', 'string_len'])
# # if res.shape[0] > 0:
# # print(res)
# # if res.iloc[0]['score'] > settings.geo_min:
# # db_cursor.execute("INSERT INTO ocr_transcription_ento_auto_geo (filename, {field}, reference_size) VALUES (%(document_id)s, %(text)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s".format(field = 'district_county'), {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize})
# # logger1.info(db_cursor.query.decode("utf-8"))
# #Date
# from_year = 1800
# #Iterate blocks
# for ocr_block in ocr_blocks:
# logger1.info("Block text: {}".format(ocr_block['block_text']))
# #Identify year
# #This year
# today = date.today()
# cur_year = today.strftime("%Y")
# interpreted_value = ""
# alpha_block = re.sub(r'\W+ ,-/', '', ocr_block['block_text']).strip()
# if len(alpha_block) < 5 or len(re.sub(r'\W+', '', ocr_block['block_text']).strip()) < 5:
# #Too short to parse
# alpha_block_yr = re.sub(r'\W+', '', alpha_block).strip()
# if len(alpha_block_yr) == 4:
# #Year
# try:
# for y in range(from_year, int(cur_year)):
# if int(alpha_block_yr) == y:
# interpreted_value = "{}".format(alpha_block_yr)
# db_cursor.execute(insert_q, {'document_id': ocr_block['document_id'], 'block_id': ocr_block['block'], 'data_type': 'verbatim_date', 'data_format': 'Date (year)', 'interpreted_value': interpreted_value, 'verbatim_value': alpha_block, 'data_source': '', 'match_score': 0})
# logger1.info('Date (year): {}'.format(interpreted_value))
# for i in range(from_year, int(cur_year)):
# if interpreted_value == "":
# if str(i) in ocr_block['block_text']:
# #Check if can directly parse the date
# for d_format in ['DMY', 'YMD', 'MDY']:
# if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
# this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
# interpreted_value = this_date.strftime("%Y-%m-%d")
# verbatim_value = alpha_block
# continue
# #Check if there is a month in roman numerals
# roman_month = {"I": "Jan", "II": "Feb", "III": "Mar", "IV": "Apr", "V": "May", "VI": "Jun", "VII": "Jul", "VIII": "Aug", "IX": "Sep", "X": "Oct", "XI": "Nov", "X11": "Dec"}
# for m in roman_month:
# if m in ocr_block['block_text']:
# #Possible year and month found
# this_text = ocr_block['block_text'].replace(m, roman_month[m])
# alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
# #Try to parse date
# for d_format in ['DMY', 'YMD', 'MDY']:
# if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
# this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
# interpreted_value = this_date.strftime("%Y-%m-%d")
# verbatim_value = alpha_block
# continue
# if interpreted_value == "":
# for i in range(99):
# if interpreted_value == "":
# if i < 10:
# i = "0{}".format(i)
# else:
# i = str(i)
# if "-{}".format(i) in ocr_block['block_text'] or "\'{}".format(i) in ocr_block['block_text'] or " {}".format(i) in ocr_block['block_text'] or "/{}".format(i) in ocr_block['block_text']:
# #Check if can directly parse the date
# alpha_block = re.sub(r'\W+ ,-/', '', ocr_block['block_text']).strip()
# for d_format in ['DMY', 'YMD', 'MDY']:
# if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
# this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
# if int(this_date.strftime("%Y")) > int(cur_year):
# #If it interprets year 64 as 2064
# this_date_year = int(this_date.strftime("%Y")) - 1000
# else:
# this_date_year = this_date.strftime("%Y")
# interpreted_value = "{}-{}".format(this_date_year, this_date.strftime("%m-%d"))
# verbatim_value = alpha_block
# break
# #Check if there is a month in roman numerals
# roman_month = {"I": "Jan", "II": "Feb", "III": "Mar", "IV": "Apr", "V": "May", "VI": "Jun", "VII": "Jul", "VIII": "Aug", "IX": "Sep", "X": "Oct", "XI": "Nov", "X11": "Dec"}
# for m in roman_month:
# if m in ocr_block['block_text']:
# #Possible year and month found
# this_text = ocr_block['block_text'].replace(m, roman_month[m])
# alpha_block = re.sub(r'\W+ ,-/', '', this_text).strip()
# #Try to parse date
# for d_format in ['DMY', 'YMD', 'MDY']:
# if dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']}) != None:
# this_date = dateparser.parse(alpha_block, settings={'DATE_ORDER': d_format, 'PREFER_DATES_FROM': 'past', 'PREFER_DAY_OF_MONTH': 'first', 'REQUIRE_PARTS': ['month', 'year']})
# if int(this_date.strftime("%Y")) > int(cur_year):
# #If it interprets year 64 as 2064
# this_date_year = int(this_date.strftime("%Y")) - 1000
# else:
# this_date_year = this_date.strftime("%Y")
# interpreted_value = "{}-{}".format(this_date_year, this_date.strftime("%m-%d"))
# verbatim_value = alpha_block
# def parse_dates(record):
# logger1.debug(record['block_text'])
# block_text = record['block_text'].str.split(' ').tolist()[0]
# len_block_text = len(block_text)
# text_to_test = pd.DataFrame(columns=('document_id', 'block', 'text', 'string_len'))
# for i in range(len_block_text-1):
# for j in range(i+1, len_block_text):
# #print(i, j)
# #this_text = ' '.join(block_text[i:j])
# this_text = ' '.join(map(str, block_text[i:j]))
# alpha_block = re.sub(r'\W+ ,-/', '', this_text)
# #Add space after periods
# alpha_block = ' '.join(alpha_block.split()).replace(' .', '.').replace('.', '. ').strip()
# if len(alpha_block) > 3:
# #print(this_text)
# text_to_test = text_to_test.append([{'document_id': record['document_id'], 'block': record['block'], 'text': this_text, 'string_len': len(alpha_block)}], ignore_index=True)
# logger1.debug(this_text)
# results_df = pd.DataFrame(columns=('text', 'score1', 'score2', 'score3', 'score', 'string_len'))
# results_df['score2'] = pd.to_numeric(results_df['score2'])
# results_df['string_len'] = pd.to_numeric(results_df['string_len'])
# for idx, rcrd in text_to_test.iterrows():
# tr_data = counties_list[['name', 'uid', 'state', 'country']].copy()
# tr_data['score2'] = tr_data.apply(lambda row : fuzz.ratio(rcrd['text'], row['name']), axis = 1)
# tr_data['text'] = rcrd['text']
# tr_data['string_len'] = rcrd['string_len']
# results_df = results_df.append(tr_data)
# res = results_df.nlargest(1, ['score2', 'string_len'])
# if res.shape[0] > 0:
# logger1.info(res)
# if res.iloc[0]['score'] > settings.geo_min:
# db_cursor.execute("INSERT INTO ocr_transcription_ento_auto_geo (filename, district_county, state_territory, country, reference_size) VALUES (%(document_id)s, %(text)s, %(state)s, %(country)s, %(reference_size)s) ON CONFLICT (filename, reference_size) DO UPDATE SET {field} = %(text)s", {'document_id': record['filename'], 'text': res.iloc[0]['text'], 'reference_size': refsize, 'state': res.iloc[0]['state'], 'country': res.iloc[0]['country']})
# logger1.info(db_cursor.query.decode("utf-8"))
# return res
# #Check for Counties_list
# for refsize in ['0.05', '0.1', '0.2']:
# print(refsize)
# db_cursor.execute(query_county, {'refsize': refsize})
# logger1.debug(db_cursor.query.decode("utf-8"))
# test_data = pd.DataFrame(db_cursor.fetchall())
# df_split = np.array_split(test_data, settings.pool_workers * 4)
# pool = Pool(settings.pool_workers)
# df = pd.concat(pool.map(parse_dates, df_split))
# pool.close()
# pool.join()
#Close database connection
db_cursor.close()
conn.close()
#Compress log files
script_dir = os.getcwd()
os.chdir('logs')
for file in glob('*.log'):
subprocess.run(["zip", "{}.zip".format(file), file])
os.remove(file)
os.chdir(script_dir)
sys.exit(0) | [
"logging.basicConfig",
"logging.getLogger",
"psycopg2.connect",
"logging.StreamHandler",
"os.path.exists",
"os.makedirs",
"logging.Formatter",
"os.getcwd",
"os.chdir",
"numpy.array_split",
"pandas.to_numeric",
"multiprocessing.Pool",
"sys.exit",
"pandas.DataFrame",
"re.sub",
"time.loca... | [((910, 1080), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(name)-12s %(levelname)-8s %(message)s"""', 'datefmt': '"""%m-%d %H:%M:%S"""', 'filename': 'logfile', 'filemode': '"""a"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt=\n '%m-%d %H:%M:%S', filename=logfile, filemode='a')\n", (929, 1080), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((1162, 1185), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1183, 1185), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((1229, 1290), 'logging.Formatter', 'logging.Formatter', (['"""%(name)-12s: %(levelname)-8s %(message)s"""'], {}), "('%(name)-12s: %(levelname)-8s %(message)s')\n", (1246, 1290), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((1333, 1361), 'logging.getLogger', 'logging.getLogger', (['"""compare"""'], {}), "('compare')\n", (1350, 1361), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((1526, 1673), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': 'settings.ocr_host', 'database': 'settings.ocr_db', 'user': 'settings.ocr_user', 'password': 'settings.ocr_password', 'connect_timeout': '(60)'}), '(host=settings.ocr_host, database=settings.ocr_db, user=\n settings.ocr_user, password=settings.ocr_password, connect_timeout=60)\n', (1542, 1673), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((12781, 12928), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': 'settings.gis_host', 'database': 'settings.gis_db', 'user': 'settings.gis_user', 'password': 'settings.gis_password', 'connect_timeout': '(60)'}), '(host=settings.gis_host, database=settings.gis_db, user=\n settings.gis_user, password=settings.gis_password, connect_timeout=60)\n', (12797, 12928), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((45046, 45057), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (45055, 45057), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((45058, 45074), 'os.chdir', 'os.chdir', (['"""logs"""'], {}), "('logs')\n", (45066, 45074), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((45087, 45100), 'glob.glob', 'glob', (['"""*.log"""'], {}), "('*.log')\n", (45091, 45100), False, 'from glob import glob\n'), ((45179, 45199), 'os.chdir', 'os.chdir', (['script_dir'], {}), '(script_dir)\n', (45187, 45199), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((45203, 45214), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (45211, 45214), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((642, 664), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (656, 664), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((670, 689), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {}), "('logs')\n", (681, 689), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((731, 742), 'time.localtime', 'localtime', ([], {}), '()\n', (740, 742), False, 'from time import localtime, strftime\n'), ((20633, 20701), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "('document_id', 'block', 'text', 'string_len')"}), "(columns=('document_id', 'block', 'text', 'string_len'))\n", (20645, 20701), True, 'import pandas as pd\n'), ((21439, 21526), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "('text', 'score1', 'score2', 'score3', 'score', 'string_len')"}), "(columns=('text', 'score1', 'score2', 'score3', 'score',\n 'string_len'))\n", (21451, 21526), True, 'import pandas as pd\n'), ((21550, 21585), 'pandas.to_numeric', 'pd.to_numeric', (["results_df['score2']"], {}), "(results_df['score2'])\n", (21563, 21585), True, 'import pandas as pd\n'), ((21617, 21656), 'pandas.to_numeric', 'pd.to_numeric', (["results_df['string_len']"], {}), "(results_df['string_len'])\n", (21630, 21656), True, 'import pandas as pd\n'), ((22938, 22979), 'numpy.array_split', 'np.array_split', (['test_data', 'test_data.size'], {}), '(test_data, test_data.size)\n', (22952, 22979), True, 'import numpy as np\n'), ((22991, 23018), 'multiprocessing.Pool', 'Pool', (['settings.pool_workers'], {}), '(settings.pool_workers)\n', (22995, 23018), False, 'from multiprocessing import Pool\n'), ((45163, 45178), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (45172, 45178), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((1362, 1390), 'logging.getLogger', 'logging.getLogger', (['"""compare"""'], {}), "('compare')\n", (1379, 1390), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n'), ((20947, 20980), 're.sub', 're.sub', (['"""\\\\W+ ,-/"""', '""""""', 'this_text'], {}), "('\\\\W+ ,-/', '', this_text)\n", (20953, 20980), False, 'import io, json, sys, os, psycopg2, logging, subprocess, swifter, re, dateparser\n')] |
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Scaling
img = cv2.imread('a.jpg')
print(img.shape)
cv2.imshow('img', img)
res = cv2.resize(img, None, fx=2, fy=2, interpolation = cv2.INTER_AREA)
print(res.shape)
print(img.shape)
cv2.imshow('res', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Translation
img = cv2.imread('b.jpg', 0)
rows, cols = img.shape
M = np.float32([[1, 0, 100], [0, 1, 50]])
dst = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('img', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Rotation
img = cv2.imread('b.jpg', 0)
rows,cols = img.shape
M = cv2.getRotationMatrix2D((cols/2, rows/2), 270, 1)
dst = cv2.warpAffine(img, M, (cols, rows))
cv2.imshow('Rotation', dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.warpAffine takes a 2x3 transformation matrix while cv2.warpPerspective takes a 3x3 transformation matrix as input.
# Affine Transformation
img = cv2.imread('a.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rows,cols,ch = img.shape
pts1 = np.float32([[50, 50], [200, 50], [50, 200]])
pts2 = np.float32([[10, 100], [200, 50], [100, 250]])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(img, M, (cols, rows))
plt.subplot(121)
plt.imshow(img)
plt.title('Input')
plt.subplot(122)
plt.imshow(dst)
plt.title('Output')
plt.show()
# Perspective Transformation
img = cv2.imread('b.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rows, cols, ch = img.shape
pts1 = np.float32([[56, 65], [368, 52], [28, 387], [389, 390]])
pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(img, M, (300, 300))
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
| [
"matplotlib.pyplot.imshow",
"cv2.warpAffine",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"cv2.getPerspectiveTransform",
"cv2.imshow",
"matplotlib.pyplot.subplot",
"cv2.warpPerspective",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.getAffineTransform",
"cv2.getRotationMatrix2D",
"c... | [((84, 103), 'cv2.imread', 'cv2.imread', (['"""a.jpg"""'], {}), "('a.jpg')\n", (94, 103), False, 'import cv2\n'), ((122, 144), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (132, 144), False, 'import cv2\n'), ((151, 214), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': '(2)', 'fy': '(2)', 'interpolation': 'cv2.INTER_AREA'}), '(img, None, fx=2, fy=2, interpolation=cv2.INTER_AREA)\n', (161, 214), False, 'import cv2\n'), ((251, 273), 'cv2.imshow', 'cv2.imshow', (['"""res"""', 'res'], {}), "('res', res)\n", (261, 273), False, 'import cv2\n'), ((274, 288), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (285, 288), False, 'import cv2\n'), ((289, 312), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (310, 312), False, 'import cv2\n'), ((334, 356), 'cv2.imread', 'cv2.imread', (['"""b.jpg"""', '(0)'], {}), "('b.jpg', 0)\n", (344, 356), False, 'import cv2\n'), ((385, 422), 'numpy.float32', 'np.float32', (['[[1, 0, 100], [0, 1, 50]]'], {}), '([[1, 0, 100], [0, 1, 50]])\n', (395, 422), True, 'import numpy as np\n'), ((429, 465), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (443, 465), False, 'import cv2\n'), ((467, 489), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'dst'], {}), "('img', dst)\n", (477, 489), False, 'import cv2\n'), ((490, 504), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (501, 504), False, 'import cv2\n'), ((505, 528), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (526, 528), False, 'import cv2\n'), ((548, 570), 'cv2.imread', 'cv2.imread', (['"""b.jpg"""', '(0)'], {}), "('b.jpg', 0)\n", (558, 570), False, 'import cv2\n'), ((598, 651), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(cols / 2, rows / 2)', '(270)', '(1)'], {}), '((cols / 2, rows / 2), 270, 1)\n', (621, 651), False, 'import cv2\n'), ((654, 690), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (668, 690), False, 'import cv2\n'), ((691, 718), 'cv2.imshow', 'cv2.imshow', (['"""Rotation"""', 'dst'], {}), "('Rotation', dst)\n", (701, 718), False, 'import cv2\n'), ((719, 733), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (730, 733), False, 'import cv2\n'), ((734, 757), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (755, 757), False, 'import cv2\n'), ((912, 931), 'cv2.imread', 'cv2.imread', (['"""a.jpg"""'], {}), "('a.jpg')\n", (922, 931), False, 'import cv2\n'), ((938, 974), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (950, 974), False, 'import cv2\n'), ((1008, 1052), 'numpy.float32', 'np.float32', (['[[50, 50], [200, 50], [50, 200]]'], {}), '([[50, 50], [200, 50], [50, 200]])\n', (1018, 1052), True, 'import numpy as np\n'), ((1060, 1106), 'numpy.float32', 'np.float32', (['[[10, 100], [200, 50], [100, 250]]'], {}), '([[10, 100], [200, 50], [100, 250]])\n', (1070, 1106), True, 'import numpy as np\n'), ((1112, 1146), 'cv2.getAffineTransform', 'cv2.getAffineTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1134, 1146), False, 'import cv2\n'), ((1154, 1190), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (1168, 1190), False, 'import cv2\n'), ((1192, 1208), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1203, 1208), True, 'from matplotlib import pyplot as plt\n'), ((1209, 1224), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1219, 1224), True, 'from matplotlib import pyplot as plt\n'), ((1225, 1243), 'matplotlib.pyplot.title', 'plt.title', (['"""Input"""'], {}), "('Input')\n", (1234, 1243), True, 'from matplotlib import pyplot as plt\n'), ((1244, 1260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1255, 1260), True, 'from matplotlib import pyplot as plt\n'), ((1261, 1276), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst'], {}), '(dst)\n', (1271, 1276), True, 'from matplotlib import pyplot as plt\n'), ((1277, 1296), 'matplotlib.pyplot.title', 'plt.title', (['"""Output"""'], {}), "('Output')\n", (1286, 1296), True, 'from matplotlib import pyplot as plt\n'), ((1297, 1307), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1305, 1307), True, 'from matplotlib import pyplot as plt\n'), ((1344, 1363), 'cv2.imread', 'cv2.imread', (['"""b.jpg"""'], {}), "('b.jpg')\n", (1354, 1363), False, 'import cv2\n'), ((1370, 1406), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1382, 1406), False, 'import cv2\n'), ((1442, 1498), 'numpy.float32', 'np.float32', (['[[56, 65], [368, 52], [28, 387], [389, 390]]'], {}), '([[56, 65], [368, 52], [28, 387], [389, 390]])\n', (1452, 1498), True, 'import numpy as np\n'), ((1506, 1558), 'numpy.float32', 'np.float32', (['[[0, 0], [300, 0], [0, 300], [300, 300]]'], {}), '([[0, 0], [300, 0], [0, 300], [300, 300]])\n', (1516, 1558), True, 'import numpy as np\n'), ((1564, 1603), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1591, 1603), False, 'import cv2\n'), ((1611, 1650), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(300, 300)'], {}), '(img, M, (300, 300))\n', (1630, 1650), False, 'import cv2\n'), ((1757, 1767), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1765, 1767), True, 'from matplotlib import pyplot as plt\n'), ((1652, 1668), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (1663, 1668), True, 'from matplotlib import pyplot as plt\n'), ((1669, 1684), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1679, 1684), True, 'from matplotlib import pyplot as plt\n'), ((1685, 1703), 'matplotlib.pyplot.title', 'plt.title', (['"""Input"""'], {}), "('Input')\n", (1694, 1703), True, 'from matplotlib import pyplot as plt\n'), ((1704, 1720), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (1715, 1720), True, 'from matplotlib import pyplot as plt\n'), ((1721, 1736), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst'], {}), '(dst)\n', (1731, 1736), True, 'from matplotlib import pyplot as plt\n'), ((1737, 1756), 'matplotlib.pyplot.title', 'plt.title', (['"""Output"""'], {}), "('Output')\n", (1746, 1756), True, 'from matplotlib import pyplot as plt\n')] |
import numpy as np
from modules.DataStructures import AccelData
def GenADot(_AccelData:AccelData):
""" Outputs an array of doubles that represents the first-order derivative of the acceleration data """
adot_list = np.array([])
dt = _AccelData.t[1]-_AccelData.t[0]
for i in range(len(_AccelData.getSingleAxis(0))-1):
# basically appends a new dy/dt to the list
adot_list = np.array(list(adot_list)+[(_AccelData.getSingleAxis(1)[i+1]-_AccelData.getSingleAxis(1)[i])/dt])
return adot_list
def Genyx2(_AccelData:AccelData):
"""does y*x^2 for the given data set"""
olist = (np.square(_AccelData.getSingleAxis(0))*_AccelData.getSingleAxis(1))[:-1]
return olist # double[]
| [
"numpy.array"
] | [((231, 243), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (239, 243), True, 'import numpy as np\n')] |
# Copyright (c) 2019 MindAffect B.V.
# Author: <NAME> <<EMAIL>>
# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.
#
# pymindaffectBCI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pymindaffectBCI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>
import numpy as np
from mindaffectBCI.utopiaclient import StimulusEvent
def devent2stimSequence(devents):
'''
convert a set of STIMULUSEVENT messages into a stimulus-sequence array with stimulus-times as expected by the utopia RECOGNISER
Inputs:
devents - [nEvt]:UtopiaMessage list of UtopiaMessage messages
i.e. a decoded utopia STIMULUSEVENT message, should be of type
{ msgID:'E'byte, timeStamp:int, objIDs:(nObj:byte), objState:(nObj:byte) }
Outputs:
Me - (nEvt,nY :int) The extract stimulus sequence
objIDs - (nY :byte) The set of used object IDs
stimTimes_ms - (nEvt :int) The timestamp of each event in milliseconds
isistimEvent - (nEp :bool) Indicator which input events are stimulus events
Copyright (c) MindAffect B.V. 2018
'''
if devents is None:
return (None,None,None,None)
Me = np.zeros((len(devents), 256), dtype=int)
stimTimes_ms = np.zeros(len(devents))
allobjIDs = np.arange(0, 256)
usedobjIDs = np.zeros((256), dtype=bool)
isstimEvent = np.zeros((len(devents)), dtype=bool)
# loop over the messages, extracting info from stimEvents and putting into the stim-sequence
for ei, evt in enumerate(devents):
if not evt.msgID == StimulusEvent.msgID:
continue
isstimEvent[ei] = True
# extract the stimulusState info
timeStamp = evt.timestamp
objIDs = evt.objIDs
objState = evt.objState
stimTimes_ms[ei] = timeStamp
# hold value of used objIDs from previous time stamp
if ei > 0 and np.any(usedobjIDs):
Me[ei, usedobjIDs] = Me[ei-1, usedobjIDs]
# 2) overwrite with updated state
# N.B. indexing hack with [int,tuple,] to index correctly
Me[ei, objIDs, ] = objState
usedobjIDs[objIDs, ] = True
# restrict to only stim-event info
# Note: horrible integer indexing tricks to get only the elements we want..
Me = Me[np.flatnonzero(isstimEvent)[:, np.newaxis], usedobjIDs]
stimTimes_ms = stimTimes_ms[isstimEvent]
objIDs = allobjIDs[usedobjIDs]
return Me, stimTimes_ms, objIDs, isstimEvent
def upsample_stimseq(sample_ts, ss, stimulus_ts, objIDs=None, usedobjIDs=None, trlen=None):
''' upsample a set of timestamped stimulus states to a sample rate
WARNING: assumes sample_ts and stimulus_ts are in *ascending* sorted order! '''
if trlen is None:
trlen = len(sample_ts)
if objIDs is None:
objIDs = list(range(ss.shape[-1]))
if usedobjIDs is None: # use the whole Y
usedobjIDs = objIDs
obj_idx = slice(len(usedobjIDs))
else: # match objIDs and to get idxs to use
obj_idx = np.zeros((len(objIDs),), dtype=int)
for oi, oid in enumerate(objIDs):
obj_idx[oi] = np.flatnonzero(usedobjIDs == oid)[0]
# convert to sample-rate version
Y = np.zeros((trlen, len(usedobjIDs)), dtype=ss.dtype)
samp_ts_iterator = enumerate(iter(sample_ts))
data_i=None
stimulus_idx=np.zeros(stimulus_ts.shape,dtype=int)
for stim_i, stim_ts in enumerate(stimulus_ts):
#print("{}) ts={} ".format(stim_i,stim_ts),end='')
odata_i = data_i # store when old stim sample index
for data_i, data_ts in samp_ts_iterator:
#print("{}={} ".format(data_i,data_ts),end='')
# N.B. assumes samp_ts are already interpolated!
if data_ts > stim_ts:
#print("*")
break
if data_i is None:
raise ValueError("Ran out of data!")
if data_i > Y.shape[0]:
# events after end of the allowed trial length
break
# data_i is one sample too far?
data_i = max(data_i-1, 0)
# nearest index for the stim_ts
if odata_i is not None:
# hold the previous stimulus state until now
Y[odata_i:data_i+1, obj_idx] = Y[odata_i, obj_idx]
# insert the new state
stimulus_idx[stim_i] = data_i
Y[data_i, obj_idx] = ss[stim_i, :]
return (Y, stimulus_idx)
if __name__=="__main__":
from devent2stimsequence import devent2stimSequence
from mindaffectBCI.utopiaclient import StimulusEvent
# make a periodic type stimulus sequence, period, 3,4,5
se = [StimulusEvent(i*3, (1, 2, 3),
(i%3 == 0, i%4 == 0, i%5 == 0)) for i in range(100)]
Me, st, oid, isse = devent2stimSequence(se)
# print the decoded sequence
print("oID {}".format(oid))
print("---")
for i in range(Me.shape[0]):
print("{:3.0f}) {}".format(st[i], Me[i, :]))
# now check the upsampling function, where we sampled at 1hz
used_objIDs = np.arange(5)
samp_ts = np.arange(len(se)*5)
trlen = int(st[-1]*.6) # make trial shorter than data
Y,_ = upsample_stimseq(samp_ts, Me, st, oid, used_objIDs, trlen)
print("oID {}".format(used_objIDs))
print("\n---\n")
for i in range(Y.shape[0]):
print("{:3d}) {}".format(i,Y[i,:]))
| [
"numpy.flatnonzero",
"mindaffectBCI.utopiaclient.StimulusEvent",
"numpy.any",
"numpy.zeros",
"devent2stimsequence.devent2stimSequence",
"numpy.arange"
] | [((1797, 1814), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (1806, 1814), True, 'import numpy as np\n'), ((1832, 1857), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'bool'}), '(256, dtype=bool)\n', (1840, 1857), True, 'import numpy as np\n'), ((3860, 3898), 'numpy.zeros', 'np.zeros', (['stimulus_ts.shape'], {'dtype': 'int'}), '(stimulus_ts.shape, dtype=int)\n', (3868, 3898), True, 'import numpy as np\n'), ((5271, 5294), 'devent2stimsequence.devent2stimSequence', 'devent2stimSequence', (['se'], {}), '(se)\n', (5290, 5294), False, 'from devent2stimsequence import devent2stimSequence\n'), ((5549, 5561), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (5558, 5561), True, 'import numpy as np\n'), ((5140, 5209), 'mindaffectBCI.utopiaclient.StimulusEvent', 'StimulusEvent', (['(i * 3)', '(1, 2, 3)', '(i % 3 == 0, i % 4 == 0, i % 5 == 0)'], {}), '(i * 3, (1, 2, 3), (i % 3 == 0, i % 4 == 0, i % 5 == 0))\n', (5153, 5209), False, 'from mindaffectBCI.utopiaclient import StimulusEvent\n'), ((2408, 2426), 'numpy.any', 'np.any', (['usedobjIDs'], {}), '(usedobjIDs)\n', (2414, 2426), True, 'import numpy as np\n'), ((2798, 2825), 'numpy.flatnonzero', 'np.flatnonzero', (['isstimEvent'], {}), '(isstimEvent)\n', (2812, 2825), True, 'import numpy as np\n'), ((3640, 3673), 'numpy.flatnonzero', 'np.flatnonzero', (['(usedobjIDs == oid)'], {}), '(usedobjIDs == oid)\n', (3654, 3673), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_SemicircleDistribution [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_SemicircleDistribution&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExerRandomMatrix).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace
from numpy.linalg import eig, eigvals
from numpy.random import rand, randn
from scipy.stats import expon, lognorm
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure, plot, bar, legend, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from HistogramFP import HistogramFP
# initialize variables
i_ = 1000 # matrix size
# -
# ## Generate matrix Y
X_1 = randn(i_,i_) # standard normal invariants
Y_1 = (X_1 + X_1.T) / (2*sqrt(2*i_))
X_2 = expon.rvs(scale=1, size=(i_,i_)) - 1 # exponential invariants
Y_2 = (X_2 + X_2.T) / (2*sqrt(2*i_))
X_3 = (rand(i_,i_) - 0.5)*sqrt(12) # uniform invariants
Y_3 = (X_3 + X_3.T) / (2*sqrt(2*i_))
X_4 = (lognorm.rvs(1, scale=1, size=(i_,i_))- exp(0.5)) / sqrt(exp(2) - exp(1)) # log-normal distribution
Y_4 = (X_4 + X_4.T) / (2*sqrt(2*i_))
# ## Compute the sample eigenvalues and the corresponding normalized histograms
# +
nbins = int(ceil(10*log(i_)))
option = namedtuple('option', 'n_bins')
option.n_bins = nbins
# standard normal
Lambda2_1 = eigvals(Y_1)
p_flat = ones((1, len(Lambda2_1))) / len(Lambda2_1)
hgram_1, x_1 = HistogramFP(Lambda2_1.reshape(1,-1), p_flat, option)
# exponential
Lambda2_2 = eigvals(Y_2)
hgram_2, x_2 = HistogramFP(Lambda2_2.reshape(1,-1), p_flat, option)
# uniform
Lambda2_3 = eigvals(Y_3)
hgram_3, x_3 = HistogramFP(Lambda2_3.reshape(1,-1), p_flat, option)
# log-normal
Lambda2_4 = eigvals(Y_4)
hgram_4, x_4 = HistogramFP(Lambda2_4.reshape(1,-1), p_flat, option)
# -
# ## Compute the semicircle function
# +
x = linspace(-1,1,200)
g = 2 / pi*sqrt(1 - x ** 2)
# -
# ## Create figures
# +
figure()
bar(x_1[:-1], hgram_1[0], width=x_1[1]-x_1[0], facecolor= [.7, .7, .7], edgecolor= [.5, .5, .5])
plot(x, g, 'r',lw= 2)
title('Standard Normal variables')
legend(['Sample eigenvalues','Semicircle function']);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
# exponential
figure()
bar(x_2[:-1], hgram_2[0], width=x_2[1]-x_2[0],facecolor= [.7, .7, .7], edgecolor= [.5, .5, .5])
plot(x, g, 'r',lw= 2)
title('Exponential variables')
legend(['Sample eigenvalues','Semicircle function']);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
# uniform
figure()
bar(x_3[:-1], hgram_3[0], width=x_3[1]-x_3[0],facecolor= [.7, .7, .7], edgecolor= [.5, .5, .5])
plot(x, g, 'r',lw= 2)
title('Uniform variables')
legend(['Sample eigenvalues','Semicircle function']);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
# log-normal
figure()
bar(x_4[:-1], hgram_4[0], width=x_4[1]-x_4[0],facecolor= [.7, .7, .7], edgecolor= [.5, .5, .5])
plot(x, g, 'r',lw= 2)
title('Log-normal variables')
legend(['Sample eigenvalues','Semicircle function']);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"collections.namedtuple",
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.plot",
"numpy.log",
"matplotlib.pyplot.style.use",
"scipy.stats.lognorm.rvs",
"numpy.exp",
"numpy.linalg.eigvals",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"os.path.abspath",
"matp... | [((1075, 1099), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1088, 1099), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1267), 'numpy.random.randn', 'randn', (['i_', 'i_'], {}), '(i_, i_)\n', (1259, 1267), False, 'from numpy.random import rand, randn\n'), ((1803, 1833), 'collections.namedtuple', 'namedtuple', (['"""option"""', '"""n_bins"""'], {}), "('option', 'n_bins')\n", (1813, 1833), False, 'from collections import namedtuple\n'), ((1888, 1900), 'numpy.linalg.eigvals', 'eigvals', (['Y_1'], {}), '(Y_1)\n', (1895, 1900), False, 'from numpy.linalg import eig, eigvals\n'), ((2047, 2059), 'numpy.linalg.eigvals', 'eigvals', (['Y_2'], {}), '(Y_2)\n', (2054, 2059), False, 'from numpy.linalg import eig, eigvals\n'), ((2150, 2162), 'numpy.linalg.eigvals', 'eigvals', (['Y_3'], {}), '(Y_3)\n', (2157, 2162), False, 'from numpy.linalg import eig, eigvals\n'), ((2256, 2268), 'numpy.linalg.eigvals', 'eigvals', (['Y_4'], {}), '(Y_4)\n', (2263, 2268), False, 'from numpy.linalg import eig, eigvals\n'), ((2388, 2408), 'numpy.linspace', 'linspace', (['(-1)', '(1)', '(200)'], {}), '(-1, 1, 200)\n', (2396, 2408), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((2466, 2474), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (2472, 2474), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2475, 2581), 'matplotlib.pyplot.bar', 'bar', (['x_1[:-1]', 'hgram_1[0]'], {'width': '(x_1[1] - x_1[0])', 'facecolor': '[0.7, 0.7, 0.7]', 'edgecolor': '[0.5, 0.5, 0.5]'}), '(x_1[:-1], hgram_1[0], width=x_1[1] - x_1[0], facecolor=[0.7, 0.7, 0.7],\n edgecolor=[0.5, 0.5, 0.5])\n', (2478, 2581), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2572, 2593), 'matplotlib.pyplot.plot', 'plot', (['x', 'g', '"""r"""'], {'lw': '(2)'}), "(x, g, 'r', lw=2)\n", (2576, 2593), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2594, 2628), 'matplotlib.pyplot.title', 'title', (['"""Standard Normal variables"""'], {}), "('Standard Normal variables')\n", (2599, 2628), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2629, 2682), 'matplotlib.pyplot.legend', 'legend', (["['Sample eigenvalues', 'Semicircle function']"], {}), "(['Sample eigenvalues', 'Semicircle function'])\n", (2635, 2682), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2809, 2817), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (2815, 2817), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2818, 2924), 'matplotlib.pyplot.bar', 'bar', (['x_2[:-1]', 'hgram_2[0]'], {'width': '(x_2[1] - x_2[0])', 'facecolor': '[0.7, 0.7, 0.7]', 'edgecolor': '[0.5, 0.5, 0.5]'}), '(x_2[:-1], hgram_2[0], width=x_2[1] - x_2[0], facecolor=[0.7, 0.7, 0.7],\n edgecolor=[0.5, 0.5, 0.5])\n', (2821, 2924), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2914, 2935), 'matplotlib.pyplot.plot', 'plot', (['x', 'g', '"""r"""'], {'lw': '(2)'}), "(x, g, 'r', lw=2)\n", (2918, 2935), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2936, 2966), 'matplotlib.pyplot.title', 'title', (['"""Exponential variables"""'], {}), "('Exponential variables')\n", (2941, 2966), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((2967, 3020), 'matplotlib.pyplot.legend', 'legend', (["['Sample eigenvalues', 'Semicircle function']"], {}), "(['Sample eigenvalues', 'Semicircle function'])\n", (2973, 3020), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3143, 3151), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (3149, 3151), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3152, 3258), 'matplotlib.pyplot.bar', 'bar', (['x_3[:-1]', 'hgram_3[0]'], {'width': '(x_3[1] - x_3[0])', 'facecolor': '[0.7, 0.7, 0.7]', 'edgecolor': '[0.5, 0.5, 0.5]'}), '(x_3[:-1], hgram_3[0], width=x_3[1] - x_3[0], facecolor=[0.7, 0.7, 0.7],\n edgecolor=[0.5, 0.5, 0.5])\n', (3155, 3258), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3248, 3269), 'matplotlib.pyplot.plot', 'plot', (['x', 'g', '"""r"""'], {'lw': '(2)'}), "(x, g, 'r', lw=2)\n", (3252, 3269), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3270, 3296), 'matplotlib.pyplot.title', 'title', (['"""Uniform variables"""'], {}), "('Uniform variables')\n", (3275, 3296), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3297, 3350), 'matplotlib.pyplot.legend', 'legend', (["['Sample eigenvalues', 'Semicircle function']"], {}), "(['Sample eigenvalues', 'Semicircle function'])\n", (3303, 3350), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3476, 3484), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (3482, 3484), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3485, 3591), 'matplotlib.pyplot.bar', 'bar', (['x_4[:-1]', 'hgram_4[0]'], {'width': '(x_4[1] - x_4[0])', 'facecolor': '[0.7, 0.7, 0.7]', 'edgecolor': '[0.5, 0.5, 0.5]'}), '(x_4[:-1], hgram_4[0], width=x_4[1] - x_4[0], facecolor=[0.7, 0.7, 0.7],\n edgecolor=[0.5, 0.5, 0.5])\n', (3488, 3591), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3581, 3602), 'matplotlib.pyplot.plot', 'plot', (['x', 'g', '"""r"""'], {'lw': '(2)'}), "(x, g, 'r', lw=2)\n", (3585, 3602), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3603, 3632), 'matplotlib.pyplot.title', 'title', (['"""Log-normal variables"""'], {}), "('Log-normal variables')\n", (3608, 3632), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((3633, 3686), 'matplotlib.pyplot.legend', 'legend', (["['Sample eigenvalues', 'Semicircle function']"], {}), "(['Sample eigenvalues', 'Semicircle function'])\n", (3639, 3686), False, 'from matplotlib.pyplot import figure, plot, bar, legend, title\n'), ((720, 758), 'os.path.abspath', 'path.abspath', (['"""../../functions-legacy"""'], {}), "('../../functions-legacy')\n", (732, 758), True, 'import os.path as path\n'), ((1340, 1373), 'scipy.stats.expon.rvs', 'expon.rvs', ([], {'scale': '(1)', 'size': '(i_, i_)'}), '(scale=1, size=(i_, i_))\n', (1349, 1373), False, 'from scipy.stats import expon, lognorm\n'), ((1466, 1474), 'numpy.sqrt', 'sqrt', (['(12)'], {}), '(12)\n', (1470, 1474), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((2419, 2435), 'numpy.sqrt', 'sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (2423, 2435), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1322, 1334), 'numpy.sqrt', 'sqrt', (['(2 * i_)'], {}), '(2 * i_)\n', (1326, 1334), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1428, 1440), 'numpy.sqrt', 'sqrt', (['(2 * i_)'], {}), '(2 * i_)\n', (1432, 1440), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1447, 1459), 'numpy.random.rand', 'rand', (['i_', 'i_'], {}), '(i_, i_)\n', (1451, 1459), False, 'from numpy.random import rand, randn\n'), ((1522, 1534), 'numpy.sqrt', 'sqrt', (['(2 * i_)'], {}), '(2 * i_)\n', (1526, 1534), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1541, 1579), 'scipy.stats.lognorm.rvs', 'lognorm.rvs', (['(1)'], {'scale': '(1)', 'size': '(i_, i_)'}), '(1, scale=1, size=(i_, i_))\n', (1552, 1579), False, 'from scipy.stats import expon, lognorm\n'), ((1580, 1588), 'numpy.exp', 'exp', (['(0.5)'], {}), '(0.5)\n', (1583, 1588), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1666, 1678), 'numpy.sqrt', 'sqrt', (['(2 * i_)'], {}), '(2 * i_)\n', (1670, 1678), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1597, 1603), 'numpy.exp', 'exp', (['(2)'], {}), '(2)\n', (1600, 1603), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1606, 1612), 'numpy.exp', 'exp', (['(1)'], {}), '(1)\n', (1609, 1612), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n'), ((1784, 1791), 'numpy.log', 'log', (['i_'], {}), '(i_)\n', (1787, 1791), False, 'from numpy import arange, ones, pi, ceil, log, exp, sqrt, linspace\n')] |
import tensorflow as tf
import numpy as np
import RecordLoader as rloader
from RecordUtil import converIntToLabels
from RecordConver import readVideo
def readVideos(filenames):
videos = []
videoLens = []
for filename in filenames:
video = readVideo(filename)
videoLens.append(len(video))
videos.append(video)
videos = np.array(videos, dtype=np.uint8)
return videos, videoLens
def predictVideos(videoPaths, utilDict, modelGraph, model):
with tf.Session() as sess:
saver = tf.train.import_meta_graph(modelGraph)
saver.restore(sess, tf.train.latest_checkpoint(model))
graph = tf.get_default_graph()
videoInput = graph.get_tensor_by_name('video_input:0')
videoLengths = graph.get_tensor_by_name('video_length:0')
channel_keep_prob = graph.get_tensor_by_name('channel_keep_prob:0')
_y = graph.get_tensor_by_name('out_decoded:0')
videos, videoLens = readVideos(videoPaths)
y = sess.run([_y], feed_dict={videoInput: videos,
videoLengths: videoLens,
channel_keep_prob: 1.})
result = converIntToLabels(utilDict, y[0])
return result
utilDict = rloader.loadUtilDict('utilDict.pkl')
modelGraph = 'put your model graph'
model = 'put your model here'
print(predictVideos(['put your video path here'], utilDict, modelGraph, model))
| [
"RecordConver.readVideo",
"tensorflow.Session",
"numpy.array",
"RecordLoader.loadUtilDict",
"tensorflow.train.import_meta_graph",
"RecordUtil.converIntToLabels",
"tensorflow.train.latest_checkpoint",
"tensorflow.get_default_graph"
] | [((1283, 1319), 'RecordLoader.loadUtilDict', 'rloader.loadUtilDict', (['"""utilDict.pkl"""'], {}), "('utilDict.pkl')\n", (1303, 1319), True, 'import RecordLoader as rloader\n'), ((374, 406), 'numpy.array', 'np.array', (['videos'], {'dtype': 'np.uint8'}), '(videos, dtype=np.uint8)\n', (382, 406), True, 'import numpy as np\n'), ((270, 289), 'RecordConver.readVideo', 'readVideo', (['filename'], {}), '(filename)\n', (279, 289), False, 'from RecordConver import readVideo\n'), ((512, 524), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (522, 524), True, 'import tensorflow as tf\n'), ((551, 589), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['modelGraph'], {}), '(modelGraph)\n', (577, 589), True, 'import tensorflow as tf\n'), ((671, 693), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (691, 693), True, 'import tensorflow as tf\n'), ((1214, 1247), 'RecordUtil.converIntToLabels', 'converIntToLabels', (['utilDict', 'y[0]'], {}), '(utilDict, y[0])\n', (1231, 1247), False, 'from RecordUtil import converIntToLabels\n'), ((619, 652), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model'], {}), '(model)\n', (645, 652), True, 'import tensorflow as tf\n')] |
import logging
import os
import textwrap
import numpy as np
import pytest
from yt.config import ytcfg
from yt.frontends.enzo.api import EnzoDataset
from yt.loaders import load_sample
from yt.sample_data.api import get_data_registry_table
from yt.testing import requires_module_pytest
from yt.utilities.logger import ytLogger
@pytest.fixture()
def tmp_data_dir(tmp_path):
pre_test_data_dir = ytcfg["yt", "test_data_dir"]
ytcfg.set("yt", "test_data_dir", str(tmp_path))
yield tmp_path
ytcfg.set("yt", "test_data_dir", pre_test_data_dir)
@pytest.fixture()
def capturable_logger(caplog):
"""
This set the minimal conditions to make pytest's caplog fixture usable.
"""
propagate = ytLogger.propagate
ytLogger.propagate = True
with caplog.at_level(logging.INFO):
yield
ytLogger.propagate = propagate
@requires_module_pytest("pandas", "pooch")
@pytest.mark.usefixtures("capturable_logger")
def test_load_sample_small_dataset(tmp_data_dir, caplog):
ds = load_sample("ToroShockTube", progressbar=False, timeout=30)
assert isinstance(ds, EnzoDataset)
text = textwrap.dedent(
f"""
'ToroShockTube' is not available locally. Looking up online.
Downloading from https://yt-project.org/data/ToroShockTube.tar.gz
Untaring downloaded file to '{str(tmp_data_dir)}'
Parameters: current_time = 0.2
Parameters: domain_dimensions = [100 1 1]
Parameters: domain_left_edge = [0. 0. 0.]
Parameters: domain_right_edge = [1. 1. 1.]
Parameters: cosmological_simulation = 0
"""
).strip("\n")
expected = [("yt", 20, message) for message in text.split("\n")]
assert caplog.record_tuples == expected
caplog.clear()
# loading a second time should not result in a download request
ds2 = load_sample("ToroShockTube")
assert isinstance(ds2, EnzoDataset)
text = textwrap.dedent(
f"""
Sample dataset found in '{os.path.join(str(tmp_data_dir), 'ToroShockTube')}'
Parameters: current_time = 0.2
Parameters: domain_dimensions = [100 1 1]
Parameters: domain_left_edge = [0. 0. 0.]
Parameters: domain_right_edge = [1. 1. 1.]
Parameters: cosmological_simulation = 0
"""
).strip("\n")
expected = [("yt", 20, message) for message in text.split("\n")]
assert caplog.record_tuples == expected
@requires_module_pytest("pandas", "pooch")
@pytest.mark.usefixtures("capturable_logger")
def test_load_sample_timeout(tmp_data_dir, caplog):
from requests.exceptions import ConnectTimeout, ReadTimeout
# note that requests is a direct dependency to pooch,
# so we don't need to mark it in a decorator.
with pytest.raises((ConnectTimeout, ReadTimeout)):
load_sample("IsolatedGalaxy", progressbar=False, timeout=0.00001)
@requires_module_pytest("pandas", "requests")
@pytest.mark.xfail(reason="Registry is currently incomplete.")
def test_registry_integrity():
reg = get_data_registry_table()
assert not any(reg.isna())
@pytest.fixture()
def sound_subreg():
# this selection is needed because the full dataset is incomplete
# so we test only the values that can be parsed
reg = get_data_registry_table()
return reg[["size", "byte_size"]][reg["size"].notna()]
@requires_module_pytest("pandas", "requests")
def test_registry_byte_size_dtype(sound_subreg):
from pandas import Int64Dtype
assert sound_subreg["byte_size"].dtype == Int64Dtype()
@requires_module_pytest("pandas", "requests")
def test_registry_byte_size_sign(sound_subreg):
np.testing.assert_array_less(0, sound_subreg["byte_size"])
@requires_module_pytest("pandas", "requests")
def test_unknown_filename():
fake_name = "these_are_not_the_files_your_looking_for"
with pytest.raises(KeyError) as err:
load_sample(fake_name)
assert err.exc == f"Could not find '{fake_name}' in the registry."
| [
"numpy.testing.assert_array_less",
"yt.testing.requires_module_pytest",
"pytest.mark.xfail",
"pandas.Int64Dtype",
"yt.loaders.load_sample",
"yt.sample_data.api.get_data_registry_table",
"pytest.raises",
"pytest.mark.usefixtures",
"pytest.fixture",
"yt.config.ytcfg.set"
] | [((330, 346), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (344, 346), False, 'import pytest\n'), ((560, 576), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (574, 576), False, 'import pytest\n'), ((860, 901), 'yt.testing.requires_module_pytest', 'requires_module_pytest', (['"""pandas"""', '"""pooch"""'], {}), "('pandas', 'pooch')\n", (882, 901), False, 'from yt.testing import requires_module_pytest\n'), ((903, 947), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""capturable_logger"""'], {}), "('capturable_logger')\n", (926, 947), False, 'import pytest\n'), ((2560, 2601), 'yt.testing.requires_module_pytest', 'requires_module_pytest', (['"""pandas"""', '"""pooch"""'], {}), "('pandas', 'pooch')\n", (2582, 2601), False, 'from yt.testing import requires_module_pytest\n'), ((2603, 2647), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""capturable_logger"""'], {}), "('capturable_logger')\n", (2626, 2647), False, 'import pytest\n'), ((3005, 3049), 'yt.testing.requires_module_pytest', 'requires_module_pytest', (['"""pandas"""', '"""requests"""'], {}), "('pandas', 'requests')\n", (3027, 3049), False, 'from yt.testing import requires_module_pytest\n'), ((3051, 3112), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Registry is currently incomplete."""'}), "(reason='Registry is currently incomplete.')\n", (3068, 3112), False, 'import pytest\n'), ((3214, 3230), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (3228, 3230), False, 'import pytest\n'), ((3471, 3515), 'yt.testing.requires_module_pytest', 'requires_module_pytest', (['"""pandas"""', '"""requests"""'], {}), "('pandas', 'requests')\n", (3493, 3515), False, 'from yt.testing import requires_module_pytest\n'), ((3662, 3706), 'yt.testing.requires_module_pytest', 'requires_module_pytest', (['"""pandas"""', '"""requests"""'], {}), "('pandas', 'requests')\n", (3684, 3706), False, 'from yt.testing import requires_module_pytest\n'), ((3821, 3865), 'yt.testing.requires_module_pytest', 'requires_module_pytest', (['"""pandas"""', '"""requests"""'], {}), "('pandas', 'requests')\n", (3843, 3865), False, 'from yt.testing import requires_module_pytest\n'), ((505, 556), 'yt.config.ytcfg.set', 'ytcfg.set', (['"""yt"""', '"""test_data_dir"""', 'pre_test_data_dir'], {}), "('yt', 'test_data_dir', pre_test_data_dir)\n", (514, 556), False, 'from yt.config import ytcfg\n'), ((1015, 1074), 'yt.loaders.load_sample', 'load_sample', (['"""ToroShockTube"""'], {'progressbar': '(False)', 'timeout': '(30)'}), "('ToroShockTube', progressbar=False, timeout=30)\n", (1026, 1074), False, 'from yt.loaders import load_sample\n'), ((1912, 1940), 'yt.loaders.load_sample', 'load_sample', (['"""ToroShockTube"""'], {}), "('ToroShockTube')\n", (1923, 1940), False, 'from yt.loaders import load_sample\n'), ((3154, 3179), 'yt.sample_data.api.get_data_registry_table', 'get_data_registry_table', ([], {}), '()\n', (3177, 3179), False, 'from yt.sample_data.api import get_data_registry_table\n'), ((3383, 3408), 'yt.sample_data.api.get_data_registry_table', 'get_data_registry_table', ([], {}), '()\n', (3406, 3408), False, 'from yt.sample_data.api import get_data_registry_table\n'), ((3759, 3817), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', (['(0)', "sound_subreg['byte_size']"], {}), "(0, sound_subreg['byte_size'])\n", (3787, 3817), True, 'import numpy as np\n'), ((2882, 2926), 'pytest.raises', 'pytest.raises', (['(ConnectTimeout, ReadTimeout)'], {}), '((ConnectTimeout, ReadTimeout))\n', (2895, 2926), False, 'import pytest\n'), ((2936, 2999), 'yt.loaders.load_sample', 'load_sample', (['"""IsolatedGalaxy"""'], {'progressbar': '(False)', 'timeout': '(1e-05)'}), "('IsolatedGalaxy', progressbar=False, timeout=1e-05)\n", (2947, 2999), False, 'from yt.loaders import load_sample\n'), ((3646, 3658), 'pandas.Int64Dtype', 'Int64Dtype', ([], {}), '()\n', (3656, 3658), False, 'from pandas import Int64Dtype\n'), ((3963, 3986), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (3976, 3986), False, 'import pytest\n'), ((4003, 4025), 'yt.loaders.load_sample', 'load_sample', (['fake_name'], {}), '(fake_name)\n', (4014, 4025), False, 'from yt.loaders import load_sample\n')] |
#Calculation of the Simulated Impedance
# ------------------------------------------------------
# Copyright (C) 2020 <NAME>
# Licensed under the MIT license, see LICENSE.
# ------------------------------------------------------
import numpy as np
import math
from arv import *
from replot2 import *
#===#Initial parameters to test the algorithm#===#
#circ = 's(R1,p(R1,C1),p(R1,C1),p(R1,C1))' #Circuit Configuration
#param = [30,30,60,30,60,30,60]
#freq = np.array([1.0000e+06, 7.9436e+05, 6.3100e+05, 5.0127e+05, 3.9814e+05, 3.1623e+05, 2.5119e+05, 1.9963e+05, 1.5850e+05, 1.2592e+05, 1.0002e+05, 7.9512e+04, 6.3105e+04, 5.0215e+04, 3.9902e+04, 3.1699e+04, 2.5137e+04, 1.9980e+04, 1.5879e+04, 1.2598e+04, 1.0020e+04, 7.9282e+03, 6.3079e+03, 5.0347e+03, 3.9931e+03, 3.1441e+03, 2.5195e+03, 2.0008e+03, 1.5807e+03, 1.2536e+03, 1.0007e+03, 7.9003e+02, 6.2881e+02, 5.0223e+02, 4.0015e+02, 3.1550e+02, 2.5202e+02, 2.0032e+02, 1.5801e+02, 1.2556e+02, 9.9734e+01, 7.9449e+01, 6.3345e+01, 4.9867e+01, 3.8422e+01, 3.1250e+01, 2.4934e+01, 2.0032e+01, 1.5625e+01, 1.2467e+01, 9.9734e+00, 7.9719e+00, 6.3516e+00, 5.0134e+00, 3.9860e+00, 3.1758e+00, 2.5161e+00, 1.9955e+00, 1.5879e+00, 1.2581e+00, 9.9777e-01, 7.9274e-01, 6.2903e-01, 4.9888e-01, 3.9860e-01, 3.1672e-01, 2.5148e-01, 1.9998e-01, 1.5879e-01, 1.2601e-01, 1.0016e-01])
#Function for impedance calculation
def zcalc(elem,p,f):
#Global parameters
w = 2*f*(math.pi)
ii = complex(0,1)
if (elem=='R'):
z = p[0]
elif (elem=='C'):
z = 1/(ii*w*p[0])
elif (elem=='L'):
z = ii*w*p[0]
elif (elem=='E'):
z = 1/(p[0]*(ii*w)**p[1])
else:
print('Error')
return z
def simulated (circ,param,freq):
#Simulated data matrix (list)
DataSim = []
#Creating Impedance Matrix
z = np.zeros((len(freq),len(param)),dtype=complex)
#print(z)
#Parameter to delete from the Circuit String
delete = ['p','s','(',')',',']
element = circ
comp=[]
#Deleting the characters from the circuit string
for i in range(len(delete)):
element = element.replace(delete[i],'')
#Calculation for impedance for each element of the circuit
k=0 #index of element we are using
idd=0
for j in range(0,len(element),2):
nlp = int(element[j+1]) #Getting quantity of parameters of the element
actparam = param[0:nlp] #Getting the actual initial parameter for the circuit element
param = param[nlp:] #Removing the used parameter
#Calculate the impedance for the actual element
for i in range(len(freq)):
z[i][k] = zcalc(element[j],actparam,freq[i])
#Updating index of the element
if idd!=circ.index(element[j+1]):
circ = circ.replace(element[j:j+2],element[j]+str(k),1)
idd=circ.index(element[j+1])
comp.append((element[j]+str(k)))
else:
novo = circ[idd+1:]
id2 = novo.index(element[j+1])
idd = idd + id2+1
circ = circ[:idd]+str(k)+circ[idd+1:]
comp.append((element[j]+str(k)))
k = k + 1
#print('circuito: ',circ)
#Calculating Simulated Data for all the frequencys
#Simulated Data will be stored in the DataSim variable
for h in range(len(z)):
impe = impedance(freq[h],circ,comp,z[h])
DataSim.append(impe)
# print('\n'.join([''.join(['{:4}'.format(item) for item in row])
# for row in z]))
#print(z)
return np.array(DataSim)
#data = simulated (circ,param,freq)
#datat = [data]
#print(datat)
#replot2(datat,'Bode')
| [
"numpy.array"
] | [((3651, 3668), 'numpy.array', 'np.array', (['DataSim'], {}), '(DataSim)\n', (3659, 3668), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pickle
import numpy as np
import datetime
import os
data_filename = "/Yep/data/sleep_data_%s.pckl"
fig_name = "sandbox/.figures/tmp_sleep.png"
def get_hour_min(time_str):
fields = time_str.split(':')
return [int(fields[0]), int(fields[1])]
def get_year_month_day(date_str):
fields = date_str.split('-')
return [int(fields[0]), int(fields[1]), int(fields[2])]
def format_minutes_graph(x, pos):
return "%dh%d" % divmod(x, 60)
class sleep_handler:
def __init__(self,user):
self.user = user
self.today = datetime.date.today()
self.asleep_hours = 0
self.asleep_minutes = 0
self.mean_sleep = 0
try:
f = open(data_filename % self.user, 'rb')
self.dic = pickle.load(f)
f.close()
except IOError:
self.dic = {'Date' : [], 'Asleep' : [], 'Woke' : [], 'Sleep' : np.array([])}
def get_average(self):
return divmod(int(self.mean_sleep), 60)
def update_dic(self, colname, data):
self.dic[colname].append(data)
def get_dic(self):
return self.dic
def get_time_slept(self):
return [self.asleep_hours, self.asleep_minutes]
def save_data(self, dic=None):
if dic is None:
dic = self.dic
f = open(data_filename % self.user, 'wb')
pickle.dump(dic, f)
f.close()
os.sync()
def add_new_time(self, time_bed_str, date_bed_str, time_up_str, date_up_str):
toBed_time = datetime.datetime(get_year_month_day(date_bed_str)[0], get_year_month_day(date_bed_str)[1], get_year_month_day(date_bed_str)[2], get_hour_min(time_bed_str)[0], get_hour_min(time_bed_str)[1])
upBed_time = datetime.datetime(get_year_month_day(date_up_str)[0], get_year_month_day(date_up_str)[1], get_year_month_day(date_up_str)[2], get_hour_min(time_up_str)[0], get_hour_min(time_up_str)[1])
date = datetime.date(get_year_month_day(date_up_str)[0], get_year_month_day(date_up_str)[1], get_year_month_day(date_up_str)[2])
asleep = upBed_time - toBed_time
self.asleep_hours, self.asleep_minutes = divmod(divmod(asleep.seconds, 60)[0], 60)
asleep_stored = self.asleep_hours * 60 + self.asleep_minutes
# If today is already tracked, we don't add a new entry, just add the sleep time
if date in self.dic['Date']:
idx = self.dic['Date'].index(date)
self.dic['Sleep'][idx] = self.dic['Sleep'][idx] + asleep_stored
else:
self.update_dic('Date', date)
self.update_dic('Asleep', toBed_time)
self.update_dic('Woke', upBed_time)
self.dic['Sleep'] = np.append(self.dic['Sleep'], asleep_stored)
def gen_graph_last_7days(self):
len_dic = 7
last_week_delta = datetime.timedelta(days=6)
last_dates = [(self.today - datetime.timedelta(days=i)).strftime("%a") for i in range(6,-1,-1)]
last_dates[-1] = "Tod"
last_dates[-2] = "Yes"
last_sleep = np.zeros(len_dic)
for i in range(0, len(self.dic['Date'])):
delta = self.today - self.dic['Date'][i]
if delta <= last_week_delta:
last_sleep[6-delta.days] += self.dic['Sleep'][i]
last_non_zero = np.array([i for i in last_sleep if i != 0])
if len(last_non_zero) >= 1:
self.mean_sleep = last_non_zero.mean()
days = last_dates
x = np.arange(len_dic)
fig, axs = plt.subplots()
axs.bar(x, last_sleep, color='b')
axs.set_ylabel("Sleep time")
formater = FuncFormatter(format_minutes_graph)
axs.yaxis.set_major_formatter(formater)
axs.get_xaxis().set_tick_params(direction='out')
axs.xaxis.set_ticks_position('bottom')
axs.set_xticks(np.arange(0, len(days)))
axs.set_xticklabels(days)
axs.set_xlabel('Day of the week')
if self.mean_sleep != 0:
axs.axhline(y=self.mean_sleep, color='0.5', linestyle='--', label="Avg: %dh%d" % (divmod(self.mean_sleep, 60)))
axs.legend()
fig.suptitle("Last 7 days sleep time")
fig.savefig(fig_name, dpi=200)
def get_fig_name(self):
return ("/" + fig_name)
| [
"os.sync",
"pickle.dump",
"matplotlib.ticker.FuncFormatter",
"pickle.load",
"datetime.timedelta",
"numpy.append",
"numpy.array",
"numpy.zeros",
"datetime.date.today",
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((651, 672), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (670, 672), False, 'import datetime\n'), ((1440, 1459), 'pickle.dump', 'pickle.dump', (['dic', 'f'], {}), '(dic, f)\n', (1451, 1459), False, 'import pickle\n'), ((1486, 1495), 'os.sync', 'os.sync', ([], {}), '()\n', (1493, 1495), False, 'import os\n'), ((2924, 2950), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(6)'}), '(days=6)\n', (2942, 2950), False, 'import datetime\n'), ((3138, 3155), 'numpy.zeros', 'np.zeros', (['len_dic'], {}), '(len_dic)\n', (3146, 3155), True, 'import numpy as np\n'), ((3394, 3437), 'numpy.array', 'np.array', (['[i for i in last_sleep if i != 0]'], {}), '([i for i in last_sleep if i != 0])\n', (3402, 3437), True, 'import numpy as np\n'), ((3581, 3599), 'numpy.arange', 'np.arange', (['len_dic'], {}), '(len_dic)\n', (3590, 3599), True, 'import numpy as np\n'), ((3619, 3633), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3631, 3633), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3776), 'matplotlib.ticker.FuncFormatter', 'FuncFormatter', (['format_minutes_graph'], {}), '(format_minutes_graph)\n', (3754, 3776), False, 'from matplotlib.ticker import FuncFormatter\n'), ((853, 867), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (864, 867), False, 'import pickle\n'), ((2797, 2840), 'numpy.append', 'np.append', (["self.dic['Sleep']", 'asleep_stored'], {}), "(self.dic['Sleep'], asleep_stored)\n", (2806, 2840), True, 'import numpy as np\n'), ((989, 1001), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (997, 1001), True, 'import numpy as np\n'), ((2987, 3013), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (3005, 3013), False, 'import datetime\n')] |
import numpy as np
import os
import pygame, sys
from pygame.locals import *
import sys
sys.path.insert(0, os.getcwd())
import rodentia
BLACK = (0, 0, 0)
MAX_STEP_NUM = 60 * 30
class Display(object):
def __init__(self, display_size):
self.width = 640
self.height = 480
self.data_path = os.path.dirname(
os.path.abspath(__file__)) + "/../../examples/data/"
self.env = rodentia.Environment(
width=self.width, height=self.height, bg_color=[0.1, 0.1, 0.1])
self.prepare_stage()
#self.prepare_maze_stage()
self.obj_ids_set = set()
self.reset()
pygame.init()
self.surface = pygame.display.set_mode(display_size, 0, 24)
pygame.display.set_caption('rodentia')
def prepare_stage(self):
floor_texture_path = self.data_path + "floor3.png"
# Floor
self.env.add_box(
texture_path=floor_texture_path,
half_extent=[20.0, 1.0, 20.0],
pos=[0.0, -1.0, 0.0],
rot=0.0,
detect_collision=False)
wall_texture_path = self.data_path + "wall2.png"
# -Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[20.0, 1.0, 1.0],
pos=[0.0, 1.0, -20.0],
rot=0.0,
detect_collision=False)
# +Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[20.0, 1.0, 1.0],
pos=[0.0, 1.0, 20.0],
rot=0.0,
detect_collision=False)
# -X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, 20.0],
pos=[-20.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# +X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, 20.0],
pos=[20.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# Debug box
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, 1.0],
pos=[0.0, 1.0, -5.0],
rot=0,
detect_collision=False)
def prepare_maze_stage(self):
floor_texture_path = self.data_path + "floor3.png"
# Floor
self.env.add_box(
texture_path=floor_texture_path,
half_extent=[20.0, 1.0, 20.0],
pos=[0.0, -1.0, 0.0],
rot=0.0,
detect_collision=False)
wall_texture_path = self.data_path + "wall0.png"
wall_thickness = 0.1
# [Center room]
# -Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[0.0, 1.0, -4.0],
rot=0.0,
detect_collision=False)
# +X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 3.0],
pos=[1.0, 1.0, -1.0],
rot=0.0,
detect_collision=False)
# +Z
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-1.0, 1.0, 2.0],
rot=0.0,
detect_collision=False)
# -X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[-3.0, 1.0, 1.0],
rot=0.0,
detect_collision=False)
# -X
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[-3.0, 1.0, -3.0],
rot=0.0,
detect_collision=False)
# [Outer wall]
# Left (-X) wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 10.0],
pos=[-5.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# Right (+X) wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 10.0],
pos=[5.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# -Z wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[5.0, 1.0, wall_thickness],
pos=[0.0, 1.0, -10.0],
rot=0.0,
detect_collision=False)
# +Z wall
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[5.0, 1.0, wall_thickness],
pos=[0.0, 1.0, 10.0],
rot=0.0,
detect_collision=False)
# [-Z L]
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-1.0, 1.0, -6.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 2.0],
pos=[-3.0, 1.0, -8.0],
rot=0.0,
detect_collision=False)
# [-Z 7]
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[1.0, 1.0, -8.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 3.0],
pos=[3.0, 1.0, -5.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, -2.0],
rot=0.0,
detect_collision=False)
# ใดใผใซๆจชๅคงใใใซ
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[3.0, 1.0, 0.0],
rot=0.0,
detect_collision=False)
# ใดใผใซๆจชๅฐใใใซ
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, 2.0],
rot=0.0,
detect_collision=False)
# ๆค
ๅญๅ
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, 4.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[3.0, 1.0, 5.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[4.0, 1.0, 6.0],
rot=0.0,
detect_collision=False)
# ่ถณใฎ้ทใๆค
ๅญๅ
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-1.0, 1.0, 6.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 1.0],
pos=[1.0, 1.0, 7.0],
rot=0.0,
detect_collision=False)
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[2.0, 1.0, 8.0],
rot=0.0,
detect_collision=False)
# ๆจชไธ็ด็ท
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[wall_thickness, 1.0, 2.0],
pos=[-1.0, 1.0, 4.0],
rot=0.0,
detect_collision=False)
# ไธ1ๆ
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[1.0, 1.0, wall_thickness],
pos=[-4.0, 1.0, 4.0],
rot=0.0,
detect_collision=False)
# ไธ2ๆ
self.env.add_box(
texture_path=wall_texture_path,
half_extent=[2.0, 1.0, wall_thickness],
pos=[-3.0, 1.0, 8.0],
rot=0.0,
detect_collision=False)
def update(self):
self.surface.fill(BLACK)
self.process()
pygame.display.update()
def get_action(self):
lookAction = 0
strafeAction = 0
moveAction = 0
pressed = pygame.key.get_pressed()
if pressed[K_q]:
lookAction += 6
if pressed[K_e]:
lookAction -= 6
if pressed[K_a]:
strafeAction += 1
if pressed[K_d]:
strafeAction -= 1
if pressed[K_w]:
moveAction += 1
if pressed[K_s]:
moveAction -= 1
return np.array([lookAction, strafeAction, moveAction], dtype=np.int32)
def process_sub(self, action):
obs = self.env.step(action=action)
self.step_num += 1
screen = obs["screen"]
collided = obs["collided"]
reward = 0
#if len(collided) != 0:
# for id in collided:
# reward += 1
# self.env.remove_obj(id)
self.total_reward += reward
terminal = self.total_reward >= 2 or self.step_num >= MAX_STEP_NUM
return screen, reward, terminal
def process(self):
action = self.get_action()
screen, reward, terminal = self.process_sub(action)
image = pygame.image.frombuffer(screen, (self.width, self.height),
'RGB')
self.surface.blit(image, (0, 0))
if terminal:
self.reset()
def clear_objects(self):
for id in self.obj_ids_set:
self.env.remove_obj(id)
self.obj_ids_set = set()
def reset(self):
# Clear remaining reward objects
self.clear_objects()
texture_path = self.data_path + "red.png"
# Reward Sphere
obj_id0 = self.env.add_sphere(
texture_path=texture_path,
radius=1.0,
pos=[-5.0, 1.0, 5.0],
rot=0.0,
mass=1.0,
detect_collision=True)
obj_id1 = self.env.add_sphere(
texture_path=texture_path,
radius=1.0,
pos=[5.0, 1.0, 5.0],
rot=0.0,
mass=1.0,
detect_collision=True)
self.obj_ids_set.add(obj_id0)
self.obj_ids_set.add(obj_id1)
# add test model
model_path0 = self.data_path + "apple0.obj"
self.env.add_model(
path=model_path0,
scale=[1.0, 1.0, 1.0],
pos=[0.0, 0.0, 10.0], # +z pos
rot=0.0,
mass=1.0,
detect_collision=True)
model_path1 = self.data_path + "lemon0.obj"
self.env.add_model(
path=model_path1,
scale=[1.0, 1.0, 1.0],
pos=[10.0, 0.0, 10.0],
rot=0.0,
mass=1.0,
detect_collision=True)
model_path2 = self.data_path + "ramp0.obj"
self.env.add_model(
path=model_path2,
scale=[2.0, 1.0, 2.0],
pos=[10.0, 0.0, 5.0],
rot=np.pi * 0.25,
mass=0.0,
detect_collision=False,
use_mesh_collision=True)
model_path3 = self.data_path + "cylinder0.obj"
self.env.add_model(
path=model_path3,
scale=[3.0, 3.0, 3.0],
pos=[-5.0, 0.0, 8.0],
rot=0.0,
mass=0.0,
detect_collision=False,
use_mesh_collision=True)
# Locate agent to default position
self.env.locate_agent(pos=[0, 0, 0], rot_y=0.0)
# Set light params
self.env.set_light(dir=[-0.5, -1.0, -0.4])
self.total_reward = 0
self.step_num = 0
def main():
display_size = (640, 480)
display = Display(display_size)
clock = pygame.time.Clock()
running = True
FPS = 60
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
display.update()
clock.tick(FPS)
if __name__ == '__main__':
main()
| [
"pygame.display.set_caption",
"pygame.init",
"rodentia.Environment",
"pygame.image.frombuffer",
"pygame.event.get",
"pygame.display.set_mode",
"os.getcwd",
"numpy.array",
"pygame.key.get_pressed",
"pygame.time.Clock",
"os.path.abspath",
"pygame.display.update"
] | [((106, 117), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (115, 117), False, 'import os\n'), ((12070, 12089), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (12087, 12089), False, 'import pygame, sys\n'), ((420, 509), 'rodentia.Environment', 'rodentia.Environment', ([], {'width': 'self.width', 'height': 'self.height', 'bg_color': '[0.1, 0.1, 0.1]'}), '(width=self.width, height=self.height, bg_color=[0.1, \n 0.1, 0.1])\n', (440, 509), False, 'import rodentia\n'), ((648, 661), 'pygame.init', 'pygame.init', ([], {}), '()\n', (659, 661), False, 'import pygame, sys\n'), ((686, 730), 'pygame.display.set_mode', 'pygame.display.set_mode', (['display_size', '(0)', '(24)'], {}), '(display_size, 0, 24)\n', (709, 730), False, 'import pygame, sys\n'), ((739, 777), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""rodentia"""'], {}), "('rodentia')\n", (765, 777), False, 'import pygame, sys\n'), ((8410, 8433), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8431, 8433), False, 'import pygame, sys\n'), ((8551, 8575), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (8573, 8575), False, 'import pygame, sys\n'), ((8914, 8978), 'numpy.array', 'np.array', (['[lookAction, strafeAction, moveAction]'], {'dtype': 'np.int32'}), '([lookAction, strafeAction, moveAction], dtype=np.int32)\n', (8922, 8978), True, 'import numpy as np\n'), ((9586, 9651), 'pygame.image.frombuffer', 'pygame.image.frombuffer', (['screen', '(self.width, self.height)', '"""RGB"""'], {}), "(screen, (self.width, self.height), 'RGB')\n", (9609, 9651), False, 'import pygame, sys\n'), ((12164, 12182), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (12180, 12182), False, 'import pygame, sys\n'), ((347, 372), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (362, 372), False, 'import os\n')] |
"""
Copyright (C) 2019 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import unittest
import numpy as np
from cxplain.backend.masking.masking_util import MaskingUtil
class TestMasking(unittest.TestCase):
def setUp(self):
np.random.seed(909)
def test_extract_downsample_factors_invalid_length(self):
with self.assertRaises(ValueError):
MaskingUtil.extract_downsample_factors((2, 2), 3)
with self.assertRaises(ValueError):
MaskingUtil.extract_downsample_factors((1, 1, 1, 1), 3)
def test_extract_downsample_factors_valid_length(self):
ret_val = MaskingUtil.extract_downsample_factors((2,), 2)
self.assertEqual(ret_val, (2, 2))
ret_val = MaskingUtil.extract_downsample_factors(1, 3)
self.assertEqual(ret_val, (1, 1, 1))
ret_val = MaskingUtil.extract_downsample_factors((1, 2, 3), 3)
self.assertEqual(ret_val, (1, 2, 3))
def test_get_input_constants(self):
test_cases_is = [(1,), (2,), (2,),
(1, 1), (2, 2), (2, 2), (4, 4), (4, 4), (4, 2),
(2, 2, 2), (2, 2, 2), (3, 1, 3)]
test_cases_df = [(1,), (1,), (2,),
(1, 1), (1, 1), (2, 2), (1, 4), (4, 1), (4, 2),
(1, 1, 1), (2, 2, 2), (1, 1, 1)]
expected_steps = [(1,), (2,), (1,),
(1, 1), (2, 2), (1, 1), (4, 1), (1, 4), (1, 1),
(2, 2, 2), (1, 1, 1), (3, 1, 3)]
for shape, factors, expected in zip(test_cases_is, test_cases_df, expected_steps):
shape = shape + (1,) # Add channel dim.
num_indices, num_channels, steps, downsampling_factor = \
MaskingUtil.get_input_constants(input_dim=shape,
downsample_factors=factors)
self.assertEqual((num_indices, num_channels, downsampling_factor),
(int(np.prod(expected)), 1, int(np.prod(factors))))
self.assertTrue(np.array_equal(np.array(expected), steps))
def test_get_ith_mask_invalid(self):
with self.assertRaises(ValueError):
MaskingUtil.get_ith_mask(i=0, image_shape=(1,), downsample_factors=(1,))
with self.assertRaises(ValueError):
MaskingUtil.get_ith_mask(i=0, image_shape=(1,)*5, downsample_factors=(1,))
if __name__ == '__main__':
unittest.main()
| [
"numpy.prod",
"cxplain.backend.masking.masking_util.MaskingUtil.get_ith_mask",
"numpy.array",
"cxplain.backend.masking.masking_util.MaskingUtil.extract_downsample_factors",
"numpy.random.seed",
"cxplain.backend.masking.masking_util.MaskingUtil.get_input_constants",
"unittest.main"
] | [((3467, 3482), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3480, 3482), False, 'import unittest\n'), ((1276, 1295), 'numpy.random.seed', 'np.random.seed', (['(909)'], {}), '(909)\n', (1290, 1295), True, 'import numpy as np\n'), ((1656, 1703), 'cxplain.backend.masking.masking_util.MaskingUtil.extract_downsample_factors', 'MaskingUtil.extract_downsample_factors', (['(2,)', '(2)'], {}), '((2,), 2)\n', (1694, 1703), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((1765, 1809), 'cxplain.backend.masking.masking_util.MaskingUtil.extract_downsample_factors', 'MaskingUtil.extract_downsample_factors', (['(1)', '(3)'], {}), '(1, 3)\n', (1803, 1809), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((1874, 1926), 'cxplain.backend.masking.masking_util.MaskingUtil.extract_downsample_factors', 'MaskingUtil.extract_downsample_factors', (['(1, 2, 3)', '(3)'], {}), '((1, 2, 3), 3)\n', (1912, 1926), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((1415, 1464), 'cxplain.backend.masking.masking_util.MaskingUtil.extract_downsample_factors', 'MaskingUtil.extract_downsample_factors', (['(2, 2)', '(3)'], {}), '((2, 2), 3)\n', (1453, 1464), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((1521, 1576), 'cxplain.backend.masking.masking_util.MaskingUtil.extract_downsample_factors', 'MaskingUtil.extract_downsample_factors', (['(1, 1, 1, 1)', '(3)'], {}), '((1, 1, 1, 1), 3)\n', (1559, 1576), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((2769, 2845), 'cxplain.backend.masking.masking_util.MaskingUtil.get_input_constants', 'MaskingUtil.get_input_constants', ([], {'input_dim': 'shape', 'downsample_factors': 'factors'}), '(input_dim=shape, downsample_factors=factors)\n', (2800, 2845), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((3229, 3301), 'cxplain.backend.masking.masking_util.MaskingUtil.get_ith_mask', 'MaskingUtil.get_ith_mask', ([], {'i': '(0)', 'image_shape': '(1,)', 'downsample_factors': '(1,)'}), '(i=0, image_shape=(1,), downsample_factors=(1,))\n', (3253, 3301), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((3359, 3435), 'cxplain.backend.masking.masking_util.MaskingUtil.get_ith_mask', 'MaskingUtil.get_ith_mask', ([], {'i': '(0)', 'image_shape': '((1,) * 5)', 'downsample_factors': '(1,)'}), '(i=0, image_shape=(1,) * 5, downsample_factors=(1,))\n', (3383, 3435), False, 'from cxplain.backend.masking.masking_util import MaskingUtil\n'), ((3099, 3117), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (3107, 3117), True, 'import numpy as np\n'), ((3008, 3025), 'numpy.prod', 'np.prod', (['expected'], {}), '(expected)\n', (3015, 3025), True, 'import numpy as np\n'), ((3035, 3051), 'numpy.prod', 'np.prod', (['factors'], {}), '(factors)\n', (3042, 3051), True, 'import numpy as np\n')] |
import numpy as np
import paddle
from paddlevision.models.alexnet import alexnet
from reprod_log import ReprodLogger
if __name__ == "__main__":
paddle.set_device("cpu")
# load model
# the model is save into ~/.cache/torch/hub/checkpoints/alexnet-owt-4df8aa71.pth
# def logger
reprod_logger = ReprodLogger()
model = alexnet(
pretrained="../../weights/alexnet_paddle.pdparams", num_classes=1000)
model.eval()
# read or gen fake data
fake_data = np.load("../../fake_data/fake_data.npy")
fake_data = paddle.to_tensor(fake_data)
# forward
out = model(fake_data)
#
reprod_logger.add("logits", out.cpu().detach().numpy())
reprod_logger.save("forward_paddle.npy")
| [
"reprod_log.ReprodLogger",
"paddlevision.models.alexnet.alexnet",
"paddle.to_tensor",
"numpy.load",
"paddle.set_device"
] | [((151, 175), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (168, 175), False, 'import paddle\n'), ((316, 330), 'reprod_log.ReprodLogger', 'ReprodLogger', ([], {}), '()\n', (328, 330), False, 'from reprod_log import ReprodLogger\n'), ((344, 421), 'paddlevision.models.alexnet.alexnet', 'alexnet', ([], {'pretrained': '"""../../weights/alexnet_paddle.pdparams"""', 'num_classes': '(1000)'}), "(pretrained='../../weights/alexnet_paddle.pdparams', num_classes=1000)\n", (351, 421), False, 'from paddlevision.models.alexnet import alexnet\n'), ((493, 533), 'numpy.load', 'np.load', (['"""../../fake_data/fake_data.npy"""'], {}), "('../../fake_data/fake_data.npy')\n", (500, 533), True, 'import numpy as np\n'), ((550, 577), 'paddle.to_tensor', 'paddle.to_tensor', (['fake_data'], {}), '(fake_data)\n', (566, 577), False, 'import paddle\n')] |
# Copyright 2019 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, Tuple
import numpy as np
import pytest
import torch
from torch import tensor as t
from mt.mvae.ops import Euclidean
from mt.mvae.ops import StereographicallyProjectedSphere
import mt.mvae.ops.poincare as P
import mt.mvae.ops.spherical_projected as SP
import mt.mvae.ops.euclidean as E
from mt.mvae.ops import PoincareBall
import mt.mvae.ops.universal as U
from mt.mvae.ops.common import eps
import tests.mvae.ops.test_spherical_projected as TSP
import tests.mvae.ops.test_poincare as TP
import tests.mvae.ops.test_euclidean as TE
np.random.seed(42)
random_nums = np.random.random_sample(100) * 100 + 1
test_eps = 5e-6
low_prec_test_eps = 1e-4
curvatures = [-1. / 4, 0., 1. / 4] # radii -2, 0, 2
def _create_manifold(curvature: float) -> U.Universal:
t = torch.tensor(curvature)
return U.Universal(lambda: t)
def lambda_x(x: torch.Tensor, manifold: U.Universal) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return torch.ones(x.shape[:-1] + (1,), dtype=x.dtype)
elif isinstance(manifold.manifold, PoincareBall):
return P.pm.lambda_x(x, c=-manifold.curvature, keepdim=True)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return SP.lambda_x_c(x, c=manifold.curvature)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def exp_map(x: torch.Tensor, at_point: torch.Tensor, manifold: U.Universal) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return E.exp_map(x, at_point)
elif isinstance(manifold.manifold, PoincareBall):
return P.exp_map(x, at_point, radius=manifold.radius)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return SP.exp_map(x, at_point, radius=manifold.radius)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def inverse_exp_map(x: torch.Tensor, at_point: torch.Tensor, manifold: U.Universal) -> \
torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return E.inverse_exp_map(x, at_point)
elif isinstance(manifold.manifold, PoincareBall):
return P.inverse_exp_map(x, at_point, radius=manifold.radius)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return SP.inverse_exp_map(x, at_point, radius=manifold.radius)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def inverse_sample_projection_mu0(x: torch.Tensor, at_point: torch.Tensor, manifold: U.Universal) -> \
Tuple[torch.Tensor, torch.Tensor]:
if isinstance(manifold.manifold, Euclidean):
return E.inverse_sample_projection_mu0(x, at_point)
elif isinstance(manifold.manifold, PoincareBall):
return P.inverse_sample_projection_mu0(x, at_point, radius=manifold.radius)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return SP.inverse_sample_projection_mu0(x, at_point, radius=manifold.radius)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def is_in_hyp_space(x: torch.Tensor, manifold: U.Universal, eps: float = eps) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return TE.is_in_hyp_space(x, eps=eps)
elif isinstance(manifold.manifold, PoincareBall):
return TP.is_in_hyp_space(x, radius=manifold.radius, eps=eps)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return TSP.is_in_hyp_space(x, radius=manifold.radius, eps=eps)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def is_in_tangent_space(x: torch.Tensor, at_point: torch.Tensor, manifold: U.Universal,
eps: float = eps) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return TE.is_in_tangent_space(x, at_point, eps=eps)
elif isinstance(manifold.manifold, PoincareBall):
return TP.is_in_tangent_space(x, at_point, radius=manifold.radius, eps=eps)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return TSP.is_in_tangent_space(x, at_point, radius=manifold.radius, eps=eps)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def distance(x: torch.Tensor, y: torch.Tensor, manifold: U.Universal, **kwargs: Any) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return TE.euclidean_distance(x, y, **kwargs)
elif isinstance(manifold.manifold, PoincareBall):
return P.poincare_distance(x, y, radius=manifold.radius, **kwargs)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return SP.spherical_projected_distance(x, y, K=manifold.curvature, **kwargs)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def parallel_transport(x: torch.Tensor, src: torch.Tensor, dst: torch.Tensor, manifold: U.Universal) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return TE.parallel_transport(x, src, dst)
elif isinstance(manifold.manifold, PoincareBall):
return TP.parallel_transport(x, src, dst, radius=manifold.radius)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return TSP.parallel_transport(x, src, dst, radius=manifold.radius)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
def inverse_parallel_transport(x: torch.Tensor, src: torch.Tensor, dst: torch.Tensor,
manifold: U.Universal) -> torch.Tensor:
if isinstance(manifold.manifold, Euclidean):
return TE.inverse_parallel_transport(x, src, dst)
elif isinstance(manifold.manifold, PoincareBall):
return TP.inverse_parallel_transport(x, src, dst, radius=manifold.radius)
elif isinstance(manifold.manifold, StereographicallyProjectedSphere):
return TSP.inverse_parallel_transport(x, src, dst, radius=manifold.radius)
else:
raise ValueError("Unknown manifold " + manifold.manifold.__class__.__name__)
@pytest.mark.parametrize("curvature", curvatures)
def test_mu_0(curvature: float) -> None:
manifold = _create_manifold(curvature)
res = manifold.mu_0((3, 3), dtype=torch.int64)
expected = t([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert res.allclose(expected)
@pytest.mark.parametrize("curvature", curvatures)
def test_is_in_hyp_space(curvature: float) -> None:
manifold = _create_manifold(curvature)
radius = manifold.radius
assert is_in_hyp_space(radius * t([1., 0, 0]), manifold=manifold)
if curvature >= 0:
assert is_in_hyp_space(radius * t([0.1, 2, 3]), manifold=manifold)
assert is_in_hyp_space(radius * t([1, 1, np.sqrt(2)]), manifold=manifold)
assert is_in_hyp_space(radius * t([1, 1, np.sqrt(2)]), manifold=manifold)
assert is_in_hyp_space(radius * t([0, 2, -np.sqrt(2)]), manifold=manifold)
else:
assert not is_in_hyp_space(radius * t([0.1, 2, 3]), manifold=manifold)
assert not is_in_hyp_space(radius * t([1, 1, np.sqrt(2)]), manifold=manifold)
assert not is_in_hyp_space(radius * t([1, 1, np.sqrt(2)]), manifold=manifold)
assert not is_in_hyp_space(radius * t([0, 2, -np.sqrt(2)]), manifold=manifold)
@pytest.mark.parametrize("curvature", curvatures)
def test_is_in_tangent_space(curvature: float) -> None:
# manifold = _create_manifold(curvature)
# assert is_in_tangent_space(t([0., 2, 3]), manifold.radius * t([1., 0, 0]), manifold=manifold)
#
# if curvature <= 0:
# assert not is_in_tangent_space(t([0.1, 2, 3]), manifold.radius * t([1., 0, 0]), manifold=manifold)
# else:
# assert is_in_tangent_space(t([0.1, 2, 3]), manifold.radius * t([1., 0, 0]), manifold=manifold)
#
# assert is_in_tangent_space(t([0, 2, -np.sqrt(2)]), t([1, 0.5, np.sqrt(2) / 2]), eps=test_eps, manifold=manifold)
# TODO-LATER: Projected spaces tangent spaces don't work like this.
pass
@pytest.mark.parametrize("curvature", curvatures)
def test_distance(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu0 = manifold.radius * t([1., 0, 0])
mu = manifold.radius * t([2., 1., np.sqrt(2)])
assert distance(mu0, mu0, manifold=manifold).allclose(t(0.), atol=5e-4)
assert distance(mu, mu, manifold=manifold).allclose(t(0.), atol=5e-4)
assert distance(mu0, mu, manifold=manifold) == distance(mu, mu0, manifold=manifold)
@pytest.mark.parametrize("curvature", curvatures)
def test_parallel_transport(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu1 = t([2., 1, np.sqrt(2)]).double() / manifold.radius
mu2 = t([np.sqrt(5), 1, np.sqrt(3)]).double() / manifold.radius
assert is_in_hyp_space(mu1, manifold=manifold)
assert is_in_hyp_space(mu2, manifold=manifold)
u = t([0, 2, -np.sqrt(2)]).double()
# assert is_in_tangent_space(u, at_point=mu1, eps=test_eps, manifold=manifold)
assert parallel_transport(u, src=mu1, dst=mu1, manifold=manifold).allclose(u, atol=5e-4)
pt_u = parallel_transport(u, src=mu1, dst=mu2, manifold=manifold)
# assert is_in_tangent_space(pt_u, at_point=mu2, eps=test_eps, manifold=manifold)
u_ = parallel_transport(pt_u, src=mu2, dst=mu1, manifold=manifold)
assert u.allclose(u_, atol=5e-4)
u_inv = inverse_parallel_transport(pt_u, src=mu1, dst=mu2, manifold=manifold)
assert u.allclose(u_inv)
@pytest.mark.parametrize("curvature", curvatures)
def test_parallel_transport_batch(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu1 = t([2., 1, np.sqrt(2)])
mu2 = t([np.sqrt(5), 1, np.sqrt(3)])
u = t([0, 2, -np.sqrt(2)])
u2 = t([0, 4, -2 * np.sqrt(2)])
U = torch.stack((u, u2), dim=0)
res = parallel_transport(U, src=mu1, dst=mu2, manifold=manifold)
U_ = inverse_parallel_transport(res, src=mu1, dst=mu2, manifold=manifold)
assert U.allclose(U_, atol=test_eps)
@pytest.mark.parametrize("curvature", curvatures)
def test_parallel_transport_mu0(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu0 = t([0., 0, 0])
mu2 = t([np.sqrt(5), 1, np.sqrt(3)])
u = t([0, 2, -np.sqrt(2)])
assert manifold.parallel_transport_mu0(u, dst=mu0).allclose(u)
pt_u = manifold.parallel_transport_mu0(u, dst=mu2)
assert parallel_transport(u, src=mu0, dst=mu2, manifold=manifold).allclose(pt_u)
u_inv = manifold.inverse_parallel_transport_mu0(pt_u, src=mu2)
assert u.allclose(u_inv)
@pytest.mark.parametrize("curvature", curvatures)
def test_parallel_transport_mu0_batch(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu2 = manifold.radius * t([np.sqrt(5), 1, np.sqrt(3)])
u = t([0, 2, -np.sqrt(2)])
u2 = t([0, 4, -2 * np.sqrt(2)])
UU = torch.stack((u, u2), dim=0)
res = manifold.parallel_transport_mu0(UU, dst=mu2)
U_ = manifold.inverse_parallel_transport_mu0(res, src=mu2)
assert UU.allclose(U_)
@pytest.mark.parametrize("curvature", curvatures)
def test_exp_map(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu = t([1., 0.5, np.sqrt(2) / 2])
u = t([0, 2, -np.sqrt(2)])
assert is_in_tangent_space(u, at_point=mu, eps=test_eps, manifold=manifold)
u_mapped = exp_map(u, at_point=mu, manifold=manifold)
u_ = inverse_exp_map(u_mapped, at_point=mu, manifold=manifold)
assert u.allclose(u_, atol=test_eps)
assert distance(mu, u_mapped, manifold=manifold).allclose(lambda_x(mu, manifold=manifold) * torch.norm(u, p=2))
@pytest.mark.parametrize("curvature", curvatures)
def test_exp_map_mu0(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu0 = manifold.mu_0((3,)).double()
u = t([0, 2, -np.sqrt(2)]).double()
assert is_in_tangent_space(u, at_point=mu0, eps=test_eps, manifold=manifold)
u_mapped = exp_map(u, at_point=mu0, manifold=manifold)
u_mu0_mapped = manifold.exp_map_mu0(u)
assert u_mapped.allclose(u_mu0_mapped)
u_ = inverse_exp_map(u_mapped, at_point=mu0, manifold=manifold)
u_mu0 = manifold.inverse_exp_map_mu0(u_mu0_mapped)
assert u.allclose(u_, atol=test_eps)
assert u.allclose(u_mu0, atol=test_eps)
assert distance(mu0, u_mapped, manifold=manifold).allclose(lambda_x(mu0, manifold) * torch.norm(u, p=2))
assert distance(mu0, u_mu0_mapped, manifold=manifold).allclose(lambda_x(mu0, manifold) * torch.norm(u, p=2))
@pytest.mark.parametrize("curvature", curvatures)
def test_exp_map_large(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu = t([1., 0.5, np.sqrt(2) / 2]).double()
u = 1.8 * t([0, 2, -np.sqrt(2)]).double()
assert is_in_tangent_space(u, at_point=mu, eps=test_eps, manifold=manifold) # This should hold.
u_mapped = exp_map(u, at_point=mu, manifold=manifold)
u_ = inverse_exp_map(u_mapped, at_point=mu, manifold=manifold)
assert u.allclose(u_, atol=low_prec_test_eps)
assert distance(mu, u_mapped, manifold=manifold).allclose(lambda_x(mu, manifold) * torch.norm(u, p=2))
@pytest.mark.parametrize("curvature", curvatures)
def test_exp_map_batch(curvature: float) -> None:
manifold = _create_manifold(curvature)
mu = t([1., 0.5, np.sqrt(2) / 2]).double()
u = t([0, 2, -np.sqrt(2)]).double()
u2 = 0.9 * t([0, 4, -2 * np.sqrt(2)]).double()
assert is_in_tangent_space(u, at_point=mu, eps=test_eps, manifold=manifold)
assert is_in_tangent_space(u2, at_point=mu, eps=test_eps, manifold=manifold)
UU = torch.stack((u, u2), dim=0)
U_mapped = exp_map(UU, at_point=mu, manifold=manifold)
U_ = inverse_exp_map(U_mapped, at_point=mu, manifold=manifold)
assert UU.allclose(U_, atol=1e-5)
assert distance(mu, U_mapped,
manifold=manifold).allclose(lambda_x(mu, manifold) * torch.norm(UU, p=2, dim=-1, keepdim=True))
@pytest.mark.parametrize("curvature", curvatures)
def test_sample_projection(curvature: float) -> None:
manifold = _create_manifold(curvature)
v = t([0., 1, 2])
mu0 = t([0., 0, 0])
assert is_in_hyp_space(mu0, manifold=manifold)
assert is_in_tangent_space(v, at_point=mu0, manifold=manifold)
mu = t([1., 0.5, np.sqrt(2) / 2])
assert is_in_hyp_space(mu, manifold=manifold)
v_proj, _ = manifold.sample_projection_mu0(v, at_point=mu)
assert is_in_hyp_space(v_proj, eps=low_prec_test_eps, manifold=manifold)
_, v_ = inverse_sample_projection_mu0(v_proj, at_point=mu, manifold=manifold)
assert v.allclose(v_, atol=test_eps)
assert is_in_tangent_space(v_, at_point=mu0, eps=test_eps, manifold=manifold)
| [
"mt.mvae.ops.poincare.poincare_distance",
"numpy.sqrt",
"mt.mvae.ops.spherical_projected.exp_map",
"tests.mvae.ops.test_spherical_projected.is_in_tangent_space",
"tests.mvae.ops.test_euclidean.inverse_parallel_transport",
"mt.mvae.ops.spherical_projected.inverse_sample_projection_mu0",
"tests.mvae.ops.t... | [((1210, 1228), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1224, 1228), True, 'import numpy as np\n'), ((6822, 6870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (6845, 6870), False, 'import pytest\n'), ((7095, 7143), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (7118, 7143), False, 'import pytest\n'), ((8036, 8084), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (8059, 8084), False, 'import pytest\n'), ((8753, 8801), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (8776, 8801), False, 'import pytest\n'), ((9224, 9272), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (9247, 9272), False, 'import pytest\n'), ((10198, 10246), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (10221, 10246), False, 'import pytest\n'), ((10720, 10768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (10743, 10768), False, 'import pytest\n'), ((11276, 11324), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (11299, 11324), False, 'import pytest\n'), ((11745, 11793), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (11768, 11793), False, 'import pytest\n'), ((12317, 12365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (12340, 12365), False, 'import pytest\n'), ((13197, 13245), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (13220, 13245), False, 'import pytest\n'), ((13819, 13867), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (13842, 13867), False, 'import pytest\n'), ((14616, 14664), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""curvature"""', 'curvatures'], {}), "('curvature', curvatures)\n", (14639, 14664), False, 'import pytest\n'), ((1441, 1464), 'torch.tensor', 'torch.tensor', (['curvature'], {}), '(curvature)\n', (1453, 1464), False, 'import torch\n'), ((1476, 1499), 'mt.mvae.ops.universal.Universal', 'U.Universal', (['(lambda : t)'], {}), '(lambda : t)\n', (1487, 1499), True, 'import mt.mvae.ops.universal as U\n'), ((7021, 7057), 'torch.tensor', 't', (['[[0, 0, 0], [0, 0, 0], [0, 0, 0]]'], {}), '([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n', (7022, 7057), True, 'from torch import tensor as t\n'), ((10501, 10528), 'torch.stack', 'torch.stack', (['(u, u2)'], {'dim': '(0)'}), '((u, u2), dim=0)\n', (10512, 10528), False, 'import torch\n'), ((10687, 10716), 'mt.mvae.ops.universal.allclose', 'U.allclose', (['U_'], {'atol': 'test_eps'}), '(U_, atol=test_eps)\n', (10697, 10716), True, 'import mt.mvae.ops.universal as U\n'), ((10881, 10895), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (10882, 10895), True, 'from torch import tensor as t\n'), ((11569, 11596), 'torch.stack', 'torch.stack', (['(u, u2)'], {'dim': '(0)'}), '((u, u2), dim=0)\n', (11580, 11596), False, 'import torch\n'), ((14270, 14297), 'torch.stack', 'torch.stack', (['(u, u2)'], {'dim': '(0)'}), '((u, u2), dim=0)\n', (14281, 14297), False, 'import torch\n'), ((14770, 14784), 'torch.tensor', 't', (['[0.0, 1, 2]'], {}), '([0.0, 1, 2])\n', (14771, 14784), True, 'from torch import tensor as t\n'), ((14795, 14809), 'torch.tensor', 't', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (14796, 14809), True, 'from torch import tensor as t\n'), ((1243, 1271), 'numpy.random.random_sample', 'np.random.random_sample', (['(100)'], {}), '(100)\n', (1266, 1271), True, 'import numpy as np\n'), ((1635, 1681), 'torch.ones', 'torch.ones', (['(x.shape[:-1] + (1,))'], {'dtype': 'x.dtype'}), '(x.shape[:-1] + (1,), dtype=x.dtype)\n', (1645, 1681), False, 'import torch\n'), ((2187, 2209), 'mt.mvae.ops.euclidean.exp_map', 'E.exp_map', (['x', 'at_point'], {}), '(x, at_point)\n', (2196, 2209), True, 'import mt.mvae.ops.euclidean as E\n'), ((2735, 2765), 'mt.mvae.ops.euclidean.inverse_exp_map', 'E.inverse_exp_map', (['x', 'at_point'], {}), '(x, at_point)\n', (2752, 2765), True, 'import mt.mvae.ops.euclidean as E\n'), ((3342, 3386), 'mt.mvae.ops.euclidean.inverse_sample_projection_mu0', 'E.inverse_sample_projection_mu0', (['x', 'at_point'], {}), '(x, at_point)\n', (3373, 3386), True, 'import mt.mvae.ops.euclidean as E\n'), ((3940, 3970), 'tests.mvae.ops.test_euclidean.is_in_hyp_space', 'TE.is_in_hyp_space', (['x'], {'eps': 'eps'}), '(x, eps=eps)\n', (3958, 3970), True, 'import tests.mvae.ops.test_euclidean as TE\n'), ((4548, 4592), 'tests.mvae.ops.test_euclidean.is_in_tangent_space', 'TE.is_in_tangent_space', (['x', 'at_point'], {'eps': 'eps'}), '(x, at_point, eps=eps)\n', (4570, 4592), True, 'import tests.mvae.ops.test_euclidean as TE\n'), ((5153, 5190), 'tests.mvae.ops.test_euclidean.euclidean_distance', 'TE.euclidean_distance', (['x', 'y'], {}), '(x, y, **kwargs)\n', (5174, 5190), True, 'import tests.mvae.ops.test_euclidean as TE\n'), ((5758, 5792), 'tests.mvae.ops.test_euclidean.parallel_transport', 'TE.parallel_transport', (['x', 'src', 'dst'], {}), '(x, src, dst)\n', (5779, 5792), True, 'import tests.mvae.ops.test_euclidean as TE\n'), ((6388, 6430), 'tests.mvae.ops.test_euclidean.inverse_parallel_transport', 'TE.inverse_parallel_transport', (['x', 'src', 'dst'], {}), '(x, src, dst)\n', (6417, 6430), True, 'import tests.mvae.ops.test_euclidean as TE\n'), ((8918, 8932), 'torch.tensor', 't', (['[1.0, 0, 0]'], {}), '([1.0, 0, 0])\n', (8919, 8932), True, 'from torch import tensor as t\n'), ((9041, 9047), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (9042, 9047), True, 'from torch import tensor as t\n'), ((9115, 9121), 'torch.tensor', 't', (['(0.0)'], {}), '(0.0)\n', (9116, 9121), True, 'from torch import tensor as t\n'), ((1751, 1804), 'mt.mvae.ops.poincare.pm.lambda_x', 'P.pm.lambda_x', (['x'], {'c': '(-manifold.curvature)', 'keepdim': '(True)'}), '(x, c=-manifold.curvature, keepdim=True)\n', (1764, 1804), True, 'import mt.mvae.ops.poincare as P\n'), ((2279, 2325), 'mt.mvae.ops.poincare.exp_map', 'P.exp_map', (['x', 'at_point'], {'radius': 'manifold.radius'}), '(x, at_point, radius=manifold.radius)\n', (2288, 2325), True, 'import mt.mvae.ops.poincare as P\n'), ((2835, 2889), 'mt.mvae.ops.poincare.inverse_exp_map', 'P.inverse_exp_map', (['x', 'at_point'], {'radius': 'manifold.radius'}), '(x, at_point, radius=manifold.radius)\n', (2852, 2889), True, 'import mt.mvae.ops.poincare as P\n'), ((3456, 3524), 'mt.mvae.ops.poincare.inverse_sample_projection_mu0', 'P.inverse_sample_projection_mu0', (['x', 'at_point'], {'radius': 'manifold.radius'}), '(x, at_point, radius=manifold.radius)\n', (3487, 3524), True, 'import mt.mvae.ops.poincare as P\n'), ((4040, 4094), 'tests.mvae.ops.test_poincare.is_in_hyp_space', 'TP.is_in_hyp_space', (['x'], {'radius': 'manifold.radius', 'eps': 'eps'}), '(x, radius=manifold.radius, eps=eps)\n', (4058, 4094), True, 'import tests.mvae.ops.test_poincare as TP\n'), ((4662, 4730), 'tests.mvae.ops.test_poincare.is_in_tangent_space', 'TP.is_in_tangent_space', (['x', 'at_point'], {'radius': 'manifold.radius', 'eps': 'eps'}), '(x, at_point, radius=manifold.radius, eps=eps)\n', (4684, 4730), True, 'import tests.mvae.ops.test_poincare as TP\n'), ((5260, 5319), 'mt.mvae.ops.poincare.poincare_distance', 'P.poincare_distance', (['x', 'y'], {'radius': 'manifold.radius'}), '(x, y, radius=manifold.radius, **kwargs)\n', (5279, 5319), True, 'import mt.mvae.ops.poincare as P\n'), ((5862, 5920), 'tests.mvae.ops.test_poincare.parallel_transport', 'TP.parallel_transport', (['x', 'src', 'dst'], {'radius': 'manifold.radius'}), '(x, src, dst, radius=manifold.radius)\n', (5883, 5920), True, 'import tests.mvae.ops.test_poincare as TP\n'), ((6500, 6566), 'tests.mvae.ops.test_poincare.inverse_parallel_transport', 'TP.inverse_parallel_transport', (['x', 'src', 'dst'], {'radius': 'manifold.radius'}), '(x, src, dst, radius=manifold.radius)\n', (6529, 6566), True, 'import tests.mvae.ops.test_poincare as TP\n'), ((7305, 7319), 'torch.tensor', 't', (['[1.0, 0, 0]'], {}), '([1.0, 0, 0])\n', (7306, 7319), True, 'from torch import tensor as t\n'), ((10371, 10381), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10378, 10381), True, 'import numpy as np\n'), ((10397, 10407), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (10404, 10407), True, 'import numpy as np\n'), ((10412, 10422), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (10419, 10422), True, 'import numpy as np\n'), ((10908, 10918), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (10915, 10918), True, 'import numpy as np\n'), ((10923, 10933), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (10930, 10933), True, 'import numpy as np\n'), ((12294, 12312), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (12304, 12312), False, 'import torch\n'), ((13061, 13079), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (13071, 13079), False, 'import torch\n'), ((13174, 13192), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (13184, 13192), False, 'import torch\n'), ((13796, 13814), 'torch.norm', 'torch.norm', (['u'], {'p': '(2)'}), '(u, p=2)\n', (13806, 13814), False, 'import torch\n'), ((14570, 14611), 'torch.norm', 'torch.norm', (['UU'], {'p': '(2)', 'dim': '(-1)', 'keepdim': '(True)'}), '(UU, p=2, dim=-1, keepdim=True)\n', (14580, 14611), False, 'import torch\n'), ((1894, 1932), 'mt.mvae.ops.spherical_projected.lambda_x_c', 'SP.lambda_x_c', (['x'], {'c': 'manifold.curvature'}), '(x, c=manifold.curvature)\n', (1907, 1932), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2415, 2462), 'mt.mvae.ops.spherical_projected.exp_map', 'SP.exp_map', (['x', 'at_point'], {'radius': 'manifold.radius'}), '(x, at_point, radius=manifold.radius)\n', (2425, 2462), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((2979, 3034), 'mt.mvae.ops.spherical_projected.inverse_exp_map', 'SP.inverse_exp_map', (['x', 'at_point'], {'radius': 'manifold.radius'}), '(x, at_point, radius=manifold.radius)\n', (2997, 3034), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((3614, 3683), 'mt.mvae.ops.spherical_projected.inverse_sample_projection_mu0', 'SP.inverse_sample_projection_mu0', (['x', 'at_point'], {'radius': 'manifold.radius'}), '(x, at_point, radius=manifold.radius)\n', (3646, 3683), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((4184, 4239), 'tests.mvae.ops.test_spherical_projected.is_in_hyp_space', 'TSP.is_in_hyp_space', (['x'], {'radius': 'manifold.radius', 'eps': 'eps'}), '(x, radius=manifold.radius, eps=eps)\n', (4203, 4239), True, 'import tests.mvae.ops.test_spherical_projected as TSP\n'), ((4820, 4889), 'tests.mvae.ops.test_spherical_projected.is_in_tangent_space', 'TSP.is_in_tangent_space', (['x', 'at_point'], {'radius': 'manifold.radius', 'eps': 'eps'}), '(x, at_point, radius=manifold.radius, eps=eps)\n', (4843, 4889), True, 'import tests.mvae.ops.test_spherical_projected as TSP\n'), ((5409, 5478), 'mt.mvae.ops.spherical_projected.spherical_projected_distance', 'SP.spherical_projected_distance', (['x', 'y'], {'K': 'manifold.curvature'}), '(x, y, K=manifold.curvature, **kwargs)\n', (5440, 5478), True, 'import mt.mvae.ops.spherical_projected as SP\n'), ((6010, 6069), 'tests.mvae.ops.test_spherical_projected.parallel_transport', 'TSP.parallel_transport', (['x', 'src', 'dst'], {'radius': 'manifold.radius'}), '(x, src, dst, radius=manifold.radius)\n', (6032, 6069), True, 'import tests.mvae.ops.test_spherical_projected as TSP\n'), ((6656, 6723), 'tests.mvae.ops.test_spherical_projected.inverse_parallel_transport', 'TSP.inverse_parallel_transport', (['x', 'src', 'dst'], {'radius': 'manifold.radius'}), '(x, src, dst, radius=manifold.radius)\n', (6686, 6723), True, 'import tests.mvae.ops.test_spherical_projected as TSP\n'), ((7403, 7417), 'torch.tensor', 't', (['[0.1, 2, 3]'], {}), '([0.1, 2, 3])\n', (7404, 7417), True, 'from torch import tensor as t\n'), ((8970, 8980), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8977, 8980), True, 'import numpy as np\n'), ((10443, 10453), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10450, 10453), True, 'import numpy as np\n'), ((10479, 10489), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10486, 10489), True, 'import numpy as np\n'), ((10954, 10964), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10961, 10964), True, 'import numpy as np\n'), ((11464, 11474), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11471, 11474), True, 'import numpy as np\n'), ((11479, 11489), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (11486, 11489), True, 'import numpy as np\n'), ((11510, 11520), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11517, 11520), True, 'import numpy as np\n'), ((11546, 11556), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11553, 11556), True, 'import numpy as np\n'), ((11902, 11912), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11909, 11912), True, 'import numpy as np\n'), ((11937, 11947), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11944, 11947), True, 'import numpy as np\n'), ((14949, 14959), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14956, 14959), True, 'import numpy as np\n'), ((7739, 7753), 'torch.tensor', 't', (['[0.1, 2, 3]'], {}), '([0.1, 2, 3])\n', (7740, 7753), True, 'from torch import tensor as t\n'), ((7487, 7497), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7494, 7497), True, 'import numpy as np\n'), ((7569, 7579), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7576, 7579), True, 'import numpy as np\n'), ((9391, 9401), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9398, 9401), True, 'import numpy as np\n'), ((9444, 9454), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (9451, 9454), True, 'import numpy as np\n'), ((9459, 9469), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (9466, 9469), True, 'import numpy as np\n'), ((9620, 9630), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9627, 9630), True, 'import numpy as np\n'), ((12514, 12524), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (12521, 12524), True, 'import numpy as np\n'), ((13360, 13370), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13367, 13370), True, 'import numpy as np\n'), ((13982, 13992), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13989, 13992), True, 'import numpy as np\n'), ((14026, 14036), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14033, 14036), True, 'import numpy as np\n'), ((7652, 7662), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7659, 7662), True, 'import numpy as np\n'), ((7827, 7837), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7834, 7837), True, 'import numpy as np\n'), ((7913, 7923), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7920, 7923), True, 'import numpy as np\n'), ((13410, 13420), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13417, 13420), True, 'import numpy as np\n'), ((14077, 14087), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14084, 14087), True, 'import numpy as np\n'), ((8000, 8010), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8007, 8010), True, 'import numpy as np\n')] |
#
# this code is for plate localization
#
# import tensorflow as tf
import cv2
import imutils
import numpy as np
from imutils import paths
import RDetectPlates as detplt
from imutils import perspective
import matplotlib.pyplot as plt
import RLpPreprocess as prpr
#
# trying different images by addressing different paths
#
# path = ''
path = '/path to image data'
# path = '/home/bayes/Academic/Research/Radarsan-01/ANPR/I_data/14_00_30_2'
imgs = sorted(list(paths.list_images(path)), reverse=True)
rnd = np.random.randint(0, len(imgs) - 1, 1)[0]
# rnd = 5 #87
# testing the detector:
det_1 = 'fatih'
det_2 = 'comp'
run = True
# the goal of this part is to find plate!!
while (run):
run = False
# for first path
# imgOrg = cv2.imread(path) #imgOrg = np.asarray(img)
# for second path
imgOrg = cv2.imread(path + '/ANPRCAMERA_701_143.52.jpg') # imgs[rnd]
# prpr.preprocess(img)
# s_x, s_y, ch = img.shape
# intface = paths.list_images(path) # list()
# imgOrg = sorted(intface, reverse=True)
# plt.imshow(imgOrg)
# plt.close()
try:
gimg = cv2.cvtColor(imgOrg, cv2.COLOR_BGR2GRAY)
except:
print('there is an error in making img gray')
#
# working on Fatihs
# this part should be checked once again
#
detector = 'fatih'
if detector == det_1:
retRegions = [] # this will be the return value
retCoords = [] # this will be the return value
# defining kernels
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 4)) # the rectangle kernel
superKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 3)) # 27,3 check 29 also
smallKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 3))
pKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 2))
# convert the image to grayscale, and apply the blackhat operation
# gray = cv2.cvtColor(imgOrg, cv2.COLOR_BGR2GRAY)
# step one
gradX = np.absolute(cv2.Sobel(gimg, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1))
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
# I used one name for readability and memory usage
# step two
gray = cv2.medianBlur(cv2.blur(cv2.GaussianBlur(gradX, (9, 5), 0), (9, 5)), 5)
# dilation should go here
# step three
gray = cv2.dilate(gray, rectKernel, iterations=2)
# testing to put erosion here -> goes bad
# step four
gray = cv2.morphologyEx(gray, cv2.MORPH_RECT, superKernel) # using morph consequently is helpfull
gray = cv2.morphologyEx(gray, cv2.MORPH_RECT, rectKernel)
gray = cv2.morphologyEx(gray, cv2.MORPH_RECT, smallKernel)
# step five
gray = cv2.dilate(gray, rectKernel, iterations=2)
# testing erosion here
# steps six and seven
gray = cv2.erode(gray, rectKernel, iterations=2)
gray = cv2.morphologyEx(gray, cv2.MORPH_RECT, rectKernel)
gray = cv2.morphologyEx(gray, cv2.MORPH_RECT, superKernel)
# step eight
gray = cv2.dilate(gray, rectKernel, iterations=2)
gray = cv2.dilate(gray, superKernel, iterations=1)
# gray = (gradX + 2 * gradY) // 3
# gray = cv2.dilate(gray, smallKernel, iterations=15)
# deleting variables
# del gradY, gradX
# step nine
# this part is going to develop by me
# Make a list for all possible licence plates
# todo: My previous code:
# mx_v = np.amax(gray)
# _, gray = cv2.threshold(gray, 0.60 * mx_v, mx_v, cv2.THRESH_BINARY)
# _, cnts, _ = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # before: cv2.RETR_EXTERNAL
# del gray, mx_v
poss_plate = []
det_plate = []
poss_plate.append(gray)
to_end = False
max_lim = 2
tolerance = 100
for i in range(max_lim):
mx_v = np.amax(poss_plate[i])
_, gray = cv2.threshold(poss_plate[i], 0.7 * mx_v, 0.7 * mx_v, cv2.THRESH_BINARY)
# gray = (mx_v / 255) * gray
d00 = poss_plate[i] - gray
det_plate.append(gray)
if np.linalg.norm(d00) < tolerance:
break
# I should do some morphology related things
d00 = cv2.erode(d00, superKernel, iterations=1)
d01 = cv2.morphologyEx(d00, cv2.MORPH_RECT, rectKernel)
d02 = cv2.dilate(d01, pKernel, iterations=3)
d03 = cv2.dilate(d02, rectKernel, iterations=2)
poss_plate.append(d03)
for i in range(len(det_plate)):
# loop over the contours
_, cnts, _ = cv2.findContours(det_plate[i], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# grab the bounding box associated with the contour and compute the area and
# aspect ratio
(x, y, w, h) = cv2.boundingRect(c)
aspectRatio = w / float(h)
# compute the rotated bounding box of the region
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
box[0, :] += 3
box[1, :] += 4
box[:, 0] -= 3
box[:, 1] -= 4
# ensure the aspect ratio, width, and height of the bounding box fall within
# tolerable limits, then update the list of license plate regions
# todo: lets examin the scaled factores with some tolerance
if (aspectRatio > 2.1 and aspectRatio < 7) and h > 10 and w > 50 and h < 125 and w < 400:
retRegions.append(box)
retCoords.append((x, y, w, h))
counter = 0
# for every possible plate regions
for region in retRegions:
pltCoord = retCoords[counter]
counter += 1
# plate, _, _, _ = detplt.getFilteredImageFromRegion(region)
plate = perspective.four_point_transform(gimg, region)
plate = imutils.resize(plate, width=400)
# show to decide best capturing
plt.imshow(plate) # there is a problem for closed images
plt.close()
print('its finished')
print(retRegions)
# for more improvement i will work on the interaction between size and threshold! in order to find most proper one!
| [
"imutils.perspective.four_point_transform",
"imutils.paths.list_images",
"numpy.linalg.norm",
"cv2.getStructuringElement",
"matplotlib.pyplot.imshow",
"cv2.threshold",
"cv2.erode",
"numpy.max",
"matplotlib.pyplot.close",
"cv2.minAreaRect",
"numpy.min",
"cv2.boxPoints",
"cv2.morphologyEx",
... | [((820, 867), 'cv2.imread', 'cv2.imread', (["(path + '/ANPRCAMERA_701_143.52.jpg')"], {}), "(path + '/ANPRCAMERA_701_143.52.jpg')\n", (830, 867), False, 'import cv2\n'), ((462, 485), 'imutils.paths.list_images', 'paths.list_images', (['path'], {}), '(path)\n', (479, 485), False, 'from imutils import paths\n'), ((1101, 1141), 'cv2.cvtColor', 'cv2.cvtColor', (['imgOrg', 'cv2.COLOR_BGR2GRAY'], {}), '(imgOrg, cv2.COLOR_BGR2GRAY)\n', (1113, 1141), False, 'import cv2\n'), ((1501, 1551), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(13, 4)'], {}), '(cv2.MORPH_RECT, (13, 4))\n', (1526, 1551), False, 'import cv2\n'), ((1598, 1648), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(25, 3)'], {}), '(cv2.MORPH_RECT, (25, 3))\n', (1623, 1648), False, 'import cv2\n'), ((1693, 1742), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(4, 3)'], {}), '(cv2.MORPH_RECT, (4, 3))\n', (1718, 1742), False, 'import cv2\n'), ((1761, 1810), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(9, 2)'], {}), '(cv2.MORPH_RECT, (9, 2))\n', (1786, 1810), False, 'import cv2\n'), ((2424, 2466), 'cv2.dilate', 'cv2.dilate', (['gray', 'rectKernel'], {'iterations': '(2)'}), '(gray, rectKernel, iterations=2)\n', (2434, 2466), False, 'import cv2\n'), ((2553, 2604), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_RECT', 'superKernel'], {}), '(gray, cv2.MORPH_RECT, superKernel)\n', (2569, 2604), False, 'import cv2\n'), ((2660, 2710), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_RECT', 'rectKernel'], {}), '(gray, cv2.MORPH_RECT, rectKernel)\n', (2676, 2710), False, 'import cv2\n'), ((2726, 2777), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_RECT', 'smallKernel'], {}), '(gray, cv2.MORPH_RECT, smallKernel)\n', (2742, 2777), False, 'import cv2\n'), ((2814, 2856), 'cv2.dilate', 'cv2.dilate', (['gray', 'rectKernel'], {'iterations': '(2)'}), '(gray, rectKernel, iterations=2)\n', (2824, 2856), False, 'import cv2\n'), ((2934, 2975), 'cv2.erode', 'cv2.erode', (['gray', 'rectKernel'], {'iterations': '(2)'}), '(gray, rectKernel, iterations=2)\n', (2943, 2975), False, 'import cv2\n'), ((2991, 3041), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_RECT', 'rectKernel'], {}), '(gray, cv2.MORPH_RECT, rectKernel)\n', (3007, 3041), False, 'import cv2\n'), ((3057, 3108), 'cv2.morphologyEx', 'cv2.morphologyEx', (['gray', 'cv2.MORPH_RECT', 'superKernel'], {}), '(gray, cv2.MORPH_RECT, superKernel)\n', (3073, 3108), False, 'import cv2\n'), ((3145, 3187), 'cv2.dilate', 'cv2.dilate', (['gray', 'rectKernel'], {'iterations': '(2)'}), '(gray, rectKernel, iterations=2)\n', (3155, 3187), False, 'import cv2\n'), ((3203, 3246), 'cv2.dilate', 'cv2.dilate', (['gray', 'superKernel'], {'iterations': '(1)'}), '(gray, superKernel, iterations=1)\n', (3213, 3246), False, 'import cv2\n'), ((6040, 6086), 'imutils.perspective.four_point_transform', 'perspective.four_point_transform', (['gimg', 'region'], {}), '(gimg, region)\n', (6072, 6086), False, 'from imutils import perspective\n'), ((6103, 6135), 'imutils.resize', 'imutils.resize', (['plate'], {'width': '(400)'}), '(plate, width=400)\n', (6117, 6135), False, 'import imutils\n'), ((6184, 6201), 'matplotlib.pyplot.imshow', 'plt.imshow', (['plate'], {}), '(plate)\n', (6194, 6201), True, 'import matplotlib.pyplot as plt\n'), ((6250, 6261), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6259, 6261), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2049), 'cv2.Sobel', 'cv2.Sobel', (['gimg'], {'ddepth': 'cv2.CV_32F', 'dx': '(1)', 'dy': '(0)', 'ksize': '(-1)'}), '(gimg, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)\n', (2002, 2049), False, 'import cv2\n'), ((2079, 2092), 'numpy.min', 'np.min', (['gradX'], {}), '(gradX)\n', (2085, 2092), True, 'import numpy as np\n'), ((2094, 2107), 'numpy.max', 'np.max', (['gradX'], {}), '(gradX)\n', (2100, 2107), True, 'import numpy as np\n'), ((4015, 4037), 'numpy.amax', 'np.amax', (['poss_plate[i]'], {}), '(poss_plate[i])\n', (4022, 4037), True, 'import numpy as np\n'), ((4060, 4131), 'cv2.threshold', 'cv2.threshold', (['poss_plate[i]', '(0.7 * mx_v)', '(0.7 * mx_v)', 'cv2.THRESH_BINARY'], {}), '(poss_plate[i], 0.7 * mx_v, 0.7 * mx_v, cv2.THRESH_BINARY)\n', (4073, 4131), False, 'import cv2\n'), ((4392, 4433), 'cv2.erode', 'cv2.erode', (['d00', 'superKernel'], {'iterations': '(1)'}), '(d00, superKernel, iterations=1)\n', (4401, 4433), False, 'import cv2\n'), ((4452, 4501), 'cv2.morphologyEx', 'cv2.morphologyEx', (['d00', 'cv2.MORPH_RECT', 'rectKernel'], {}), '(d00, cv2.MORPH_RECT, rectKernel)\n', (4468, 4501), False, 'import cv2\n'), ((4520, 4558), 'cv2.dilate', 'cv2.dilate', (['d01', 'pKernel'], {'iterations': '(3)'}), '(d01, pKernel, iterations=3)\n', (4530, 4558), False, 'import cv2\n'), ((4577, 4618), 'cv2.dilate', 'cv2.dilate', (['d02', 'rectKernel'], {'iterations': '(2)'}), '(d02, rectKernel, iterations=2)\n', (4587, 4618), False, 'import cv2\n'), ((4757, 4831), 'cv2.findContours', 'cv2.findContours', (['det_plate[i]', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(det_plate[i], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (4773, 4831), False, 'import cv2\n'), ((2305, 2339), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gradX', '(9, 5)', '(0)'], {}), '(gradX, (9, 5), 0)\n', (2321, 2339), False, 'import cv2\n'), ((4262, 4281), 'numpy.linalg.norm', 'np.linalg.norm', (['d00'], {}), '(d00)\n', (4276, 4281), True, 'import numpy as np\n'), ((5014, 5033), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (5030, 5033), False, 'import cv2\n'), ((5165, 5183), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (5180, 5183), False, 'import cv2\n'), ((5214, 5233), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (5227, 5233), False, 'import cv2\n')] |
import matplotlib.pyplot as plt
import random
import os
import numpy as np
import tensorflow as tf
import logging
from load_data import load_train_data, load_data_by_id, load_entity_by_id, \
load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, \
get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary
import matplotlib
matplotlib.use('agg')
logger = logging.getLogger(__name__)
local_file = os.path.split(__file__)[-1]
logging.basicConfig(
format='%(asctime)s : %(filename)s : %(funcName)s : %(levelname)s : %(message)s',
level=logging.INFO)
def set_random_seed(seed_value):
random.seed(seed_value)
np.random.seed(seed_value)
tf.random.set_seed(seed_value)
def sample(sample_list):
return random.sample(sample_list, 1)[0]
def negative_sample(pos_id_list, id_list, sample_num):
neg_reault = []
cnt = 0
while cnt < sample_num:
negative_id = ''
while negative_id == '' or negative_id in pos_id_list:
negative_id = sample(id_list)
neg_reault.append(negative_id)
cnt += 1
return neg_reault
def gen_valid_data(data_path, word_dict, char_dict, sentence_length, can_path, prior_path, entity_embedding_path, character_length,
topk=25, alpha=0.0, voting_k=10, context_path=None, context_max_len=100):
all_data, raw_data = load_data_by_id(data_path, word_dict, char_dict, sentence_length, character_length)
candidate_dict, raw_can_dict, can_char_dict = load_candidates_by_id(word_dict, char_dict, sentence_length, can_path, character_length, topk, alpha=alpha)
mention_entity_prior = load_mention_entity_prior2(prior_path)
#coherence
entity_embedding, default_embedding = load_entity_emb(entity_embedding_path)
voting_eids = load_voting_eid_by_doc(raw_data, can_path, voting_k)
voting_emb_dict = get_embedding_of_voting(voting_eids, entity_embedding, default_embedding)
# context
mention_context_dict = load_mention_context(context_path, context_max_len, word_dict)
mention_list, candidate_list, y_list, raw_can_name_list, x_char_list, y_char_list, can_prior = [], [], [], [], [], [], []
can_emb_list, voting_emb_list = [], []
context_list = []
miss_cnt = 0
test_data = list()
for index, (mention_id, labels, char_ids) in enumerate(all_data):
label = labels
raw = raw_data[index]
doc_id, raw_mention, raw_label = raw[0], raw[1], raw[2]
mention_name = raw_mention
can_list_of_the_mention = candidate_dict[mention_name]
raw_can_dict_of_the_mention = raw_can_dict[mention_name]
for can_label, can_id in can_list_of_the_mention:
mention_list.append(mention_id)
candidate_list.append(can_id)
y_list.append(can_label)
entity_name = raw_can_dict_of_the_mention[can_label]
raw_can_name_list.append(entity_name)
#char
x_char_list.append(char_ids)
can_char_ids = can_char_dict[entity_name]
y_char_list.append(can_char_ids)
#prior
prior_value = 0
if mention_name in mention_entity_prior and can_label in mention_entity_prior[mention_name]:
prior_value = mention_entity_prior[mention_name][can_label]
can_prior.append([prior_value])
#coherence
can_emb = default_embedding
if can_label in entity_embedding:
can_emb = entity_embedding[can_label]
voting_emb = voting_emb_dict[doc_id][raw_mention]
can_emb_list.append(can_emb)
voting_emb_list.append(voting_emb)
# context
doc_id = raw_data[index][0]
mention_context = [0] * context_max_len
if doc_id in mention_context_dict and mention_name in mention_context_dict[doc_id]:
mention_context = mention_context_dict[doc_id][mention_name]
context_list.append(mention_context)
data = ({'mention': np.array(mention_list), 'candidate': np.array(candidate_list),
'entity_name': raw_can_name_list, 'men_char': np.array(x_char_list),
'can_char': np.array(y_char_list), 'can_prior':np.array(can_prior),
'candidate_emb':np.array(can_emb_list), 'voting_candidates_emb':np.array(voting_emb_list), 'can_context':np.array(context_list)
}, np.array(y_list), (label, doc_id, raw_mention))
test_data.append(data)
mention_list, candidate_list, y_list, raw_can_name_list, x_char_list, y_char_list, can_prior = [], [], [], [], [], [], []
can_emb_list, voting_emb_list = [], []
context_list = []
logging.info('test data size = {a}, miss data size = {b}'.format(a=len(all_data), b=miss_cnt))
return test_data
def gen_train_data(data_path, word_dict, char_dict, entity_path, batch_size, sentence_length, character_length, can_path, prior_path, entity_embedding_path,
topk=10, alpha=0.0, voting_k=10, context_path=None, context_max_len=100):
all_data, raw_data = load_data_by_id(data_path, word_dict, char_dict, sentence_length, character_length, mode='train')
all_entity, entity_dict, _ = load_entity_by_id(entity_path, word_dict, char_dict, sentence_length, character_length)
candidate_dict, raw_can_dict, can_char_dict = load_candidates_by_id(word_dict, char_dict, sentence_length, can_path, character_length, topk=topk, alpha=alpha)
#cohrence
entity_embedding, default_embedding = load_entity_emb(entity_embedding_path)
voting_eids = load_voting_eid_by_doc(raw_data, can_path, voting_k)
voting_emb_dict = get_embedding_of_voting(voting_eids, entity_embedding, default_embedding)
#context
mention_context_dict = load_mention_context(context_path, context_max_len, word_dict)
mention_entity_prior = load_mention_entity_prior2(prior_path)
mention_list, candidate_list, neg_candidate_list, x_char_list, y_char_list, z_char_list, pos_candidate_prior, neg_candidate_prior \
= [], [], [], [], [], [], [], []
pos_can_emb_list, neg_can_emb_list, voting_emb_list = [], [], []
context_list = []
while True:
for index, (mention_id, label, char_ids) in enumerate(all_data):
if label not in entity_dict:
pos_sample, pos_y_chars = mention_id, char_ids
else:
synonyms = entity_dict[label]
pos_sample, pos_y_chars = sample(synonyms)
# use candidates to train
mention = raw_data[index][1]
candidates_of_this_mention = candidate_dict[mention]
can_lables = [e1 for (e1, e2) in candidates_of_this_mention]
if len(can_lables) == 1:
neg_lables = negative_sample(label, list(all_entity), 1)[0]
else:
neg_lables = negative_sample(label, can_lables, 1)[0]
neg_synonyms = entity_dict[neg_lables]
neg_sample, neg_y_chars = sample(neg_synonyms)
mention_list.append(mention_id)
candidate_list.append(pos_sample)
x_char_list.append(char_ids)
y_char_list.append(pos_y_chars)
neg_candidate_list.append(neg_sample)
z_char_list.append(neg_y_chars)
pos_prior_value, neg_prior_value = 0, 0
if mention in mention_entity_prior and label in mention_entity_prior[mention]:
pos_prior_value = mention_entity_prior[mention][label]
if mention in mention_entity_prior and neg_lables in mention_entity_prior[mention]:
neg_prior_value = mention_entity_prior[mention][neg_lables]
pos_candidate_prior.append([pos_prior_value])
neg_candidate_prior.append([neg_prior_value])
#coherence
doc_id = raw_data[index][0]
pos_can_emb, neg_can_emb = default_embedding, default_embedding
if label in entity_embedding:
pos_can_emb = entity_embedding[label]
if neg_lables in entity_embedding:
neg_can_emb = entity_embedding[neg_lables]
voting_emb = voting_emb_dict[doc_id][mention]
pos_can_emb_list.append(pos_can_emb)
neg_can_emb_list.append(neg_can_emb)
voting_emb_list.append(voting_emb)
#context
doc_id = raw_data[index][0]
if mention in mention_context_dict[doc_id]:
mention_context = mention_context_dict[doc_id][mention]
else: mention_context = [0]*context_max_len
context_list.append(mention_context)
if len(mention_list) % batch_size == 0:
yield {'mention': np.array(mention_list), 'pos_candidate':np.array(candidate_list),
'men_char':np.array(x_char_list), 'pos_can_char':np.array(y_char_list)
,'neg_candidate':np.array(neg_candidate_list), 'neg_can_char':np.array(z_char_list),
'pos_can_prior':np.array(pos_candidate_prior), 'neg_can_prior':np.array(neg_candidate_prior)
,'pos_candidate_emb':np.array(pos_can_emb_list), 'neg_candidate_emb':np.array(neg_can_emb_list),
'voting_candidates_emb':np.array(voting_emb_list), 'can_context':np.array(context_list)}, \
np.array(mention_list),
mention_list, candidate_list, neg_candidate_list, x_char_list, y_char_list, z_char_list, pos_candidate_prior, neg_candidate_prior \
= [], [], [], [], [], [], [], []
pos_can_emb_list, neg_can_emb_list, voting_emb_list = [], [], []
context_list = []
def make_loss_picture(history):
print('Plot validation accuracy and loss...')
# acc=history.history['acc']
# val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
# plt.plot(acc, label='acc')
# plt.plot(val_acc, label='val_acc')
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'valid'], loc='upper left')
# plt.savefig('../checkpoints/acc.png')
# plt.close()
plt.plot(loss, label='loss')
plt.plot(val_loss, label='val_loss')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'valid'], loc='upper left')
plt.savefig('../checkpoints/loss.png')
if __name__ == '__main__':
can_path = '../output/adr/candidates/training_aligned_cos_with_mention_candidate.txt'
context_path = '../output/adr/context/train_mention_context.txt'
word_vocab_path = '../output/adr/word_vocabulary.dict'
char_vocab_path = '../output/adr/char_vocabulary.dict'
prior_path = '../output/adr/mention_entity_prior.txt'
word_dict, word_list = load_word_vocabulary(word_vocab_path, True)
char_dict, char_list = load_char_vocabulary(char_vocab_path)
data = gen_train_data(data_path='../output/adr/train_data.txt', word_dict=word_dict, char_dict=char_dict,
entity_path='../output/adr/entity_kb.txt', batch_size=6,topk=20, alpha=0.0,
sentence_length=20, character_length=25, can_path=can_path,
prior_path=prior_path,
entity_embedding_path='../output/adr/embed/entity_emb_50.txt',
context_path=context_path)
cnt = 0
for train, y in data:
cnt += 1
mention, pos_candidate, neg_candidate = train['mention'], train['pos_candidate'], train['neg_candidate']
men_char, pos_can_char, neg_can_char = train['men_char'], train['pos_can_char'], train['neg_can_char']
voting = train['can_context']
print(mention)
if len(np.shape(mention)) != 2:
print(mention)
raise Exception('error') | [
"logging.getLogger",
"load_data.load_mention_entity_prior2",
"matplotlib.pyplot.ylabel",
"numpy.array",
"load_data.load_mention_context",
"load_data.load_char_vocabulary",
"load_data.get_embedding_of_voting",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.split",
"numpy.random.se... | [((417, 438), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (431, 438), False, 'import matplotlib\n'), ((450, 477), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (467, 477), False, 'import logging\n'), ((519, 649), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(filename)s : %(funcName)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s : %(filename)s : %(funcName)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (538, 649), False, 'import logging\n'), ((491, 514), 'os.path.split', 'os.path.split', (['__file__'], {}), '(__file__)\n', (504, 514), False, 'import os\n'), ((689, 712), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (700, 712), False, 'import random\n'), ((717, 743), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (731, 743), True, 'import numpy as np\n'), ((748, 778), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed_value'], {}), '(seed_value)\n', (766, 778), True, 'import tensorflow as tf\n'), ((1427, 1514), 'load_data.load_data_by_id', 'load_data_by_id', (['data_path', 'word_dict', 'char_dict', 'sentence_length', 'character_length'], {}), '(data_path, word_dict, char_dict, sentence_length,\n character_length)\n', (1442, 1514), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((1561, 1672), 'load_data.load_candidates_by_id', 'load_candidates_by_id', (['word_dict', 'char_dict', 'sentence_length', 'can_path', 'character_length', 'topk'], {'alpha': 'alpha'}), '(word_dict, char_dict, sentence_length, can_path,\n character_length, topk, alpha=alpha)\n', (1582, 1672), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((1696, 1734), 'load_data.load_mention_entity_prior2', 'load_mention_entity_prior2', (['prior_path'], {}), '(prior_path)\n', (1722, 1734), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((1793, 1831), 'load_data.load_entity_emb', 'load_entity_emb', (['entity_embedding_path'], {}), '(entity_embedding_path)\n', (1808, 1831), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((1850, 1902), 'load_data.load_voting_eid_by_doc', 'load_voting_eid_by_doc', (['raw_data', 'can_path', 'voting_k'], {}), '(raw_data, can_path, voting_k)\n', (1872, 1902), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((1925, 1998), 'load_data.get_embedding_of_voting', 'get_embedding_of_voting', (['voting_eids', 'entity_embedding', 'default_embedding'], {}), '(voting_eids, entity_embedding, default_embedding)\n', (1948, 1998), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((2041, 2103), 'load_data.load_mention_context', 'load_mention_context', (['context_path', 'context_max_len', 'word_dict'], {}), '(context_path, context_max_len, word_dict)\n', (2061, 2103), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5180, 5281), 'load_data.load_data_by_id', 'load_data_by_id', (['data_path', 'word_dict', 'char_dict', 'sentence_length', 'character_length'], {'mode': '"""train"""'}), "(data_path, word_dict, char_dict, sentence_length,\n character_length, mode='train')\n", (5195, 5281), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5311, 5402), 'load_data.load_entity_by_id', 'load_entity_by_id', (['entity_path', 'word_dict', 'char_dict', 'sentence_length', 'character_length'], {}), '(entity_path, word_dict, char_dict, sentence_length,\n character_length)\n', (5328, 5402), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5449, 5565), 'load_data.load_candidates_by_id', 'load_candidates_by_id', (['word_dict', 'char_dict', 'sentence_length', 'can_path', 'character_length'], {'topk': 'topk', 'alpha': 'alpha'}), '(word_dict, char_dict, sentence_length, can_path,\n character_length, topk=topk, alpha=alpha)\n', (5470, 5565), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5618, 5656), 'load_data.load_entity_emb', 'load_entity_emb', (['entity_embedding_path'], {}), '(entity_embedding_path)\n', (5633, 5656), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5675, 5727), 'load_data.load_voting_eid_by_doc', 'load_voting_eid_by_doc', (['raw_data', 'can_path', 'voting_k'], {}), '(raw_data, can_path, voting_k)\n', (5697, 5727), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5750, 5823), 'load_data.get_embedding_of_voting', 'get_embedding_of_voting', (['voting_eids', 'entity_embedding', 'default_embedding'], {}), '(voting_eids, entity_embedding, default_embedding)\n', (5773, 5823), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5865, 5927), 'load_data.load_mention_context', 'load_mention_context', (['context_path', 'context_max_len', 'word_dict'], {}), '(context_path, context_max_len, word_dict)\n', (5885, 5927), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((5956, 5994), 'load_data.load_mention_entity_prior2', 'load_mention_entity_prior2', (['prior_path'], {}), '(prior_path)\n', (5982, 5994), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((10313, 10341), 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': '"""loss"""'}), "(loss, label='loss')\n", (10321, 10341), True, 'import matplotlib.pyplot as plt\n'), ((10346, 10382), 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'label': '"""val_loss"""'}), "(val_loss, label='val_loss')\n", (10354, 10382), True, 'import matplotlib.pyplot as plt\n'), ((10387, 10410), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (10396, 10410), True, 'import matplotlib.pyplot as plt\n'), ((10415, 10433), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (10425, 10433), True, 'import matplotlib.pyplot as plt\n'), ((10438, 10457), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (10448, 10457), True, 'import matplotlib.pyplot as plt\n'), ((10462, 10510), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'valid']"], {'loc': '"""upper left"""'}), "(['train', 'valid'], loc='upper left')\n", (10472, 10510), True, 'import matplotlib.pyplot as plt\n'), ((10515, 10553), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../checkpoints/loss.png"""'], {}), "('../checkpoints/loss.png')\n", (10526, 10553), True, 'import matplotlib.pyplot as plt\n'), ((10945, 10988), 'load_data.load_word_vocabulary', 'load_word_vocabulary', (['word_vocab_path', '(True)'], {}), '(word_vocab_path, True)\n', (10965, 10988), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((11016, 11053), 'load_data.load_char_vocabulary', 'load_char_vocabulary', (['char_vocab_path'], {}), '(char_vocab_path)\n', (11036, 11053), False, 'from load_data import load_train_data, load_data_by_id, load_entity_by_id, load_candidates_by_id, load_mention_entity_prior2, load_entity_emb, load_voting_eid_by_doc, get_embedding_of_voting, load_mention_context, load_word_vocabulary, load_char_vocabulary\n'), ((817, 846), 'random.sample', 'random.sample', (['sample_list', '(1)'], {}), '(sample_list, 1)\n', (830, 846), False, 'import random\n'), ((4492, 4508), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (4500, 4508), True, 'import numpy as np\n'), ((4099, 4121), 'numpy.array', 'np.array', (['mention_list'], {}), '(mention_list)\n', (4107, 4121), True, 'import numpy as np\n'), ((4136, 4160), 'numpy.array', 'np.array', (['candidate_list'], {}), '(candidate_list)\n', (4144, 4160), True, 'import numpy as np\n'), ((4223, 4244), 'numpy.array', 'np.array', (['x_char_list'], {}), '(x_char_list)\n', (4231, 4244), True, 'import numpy as np\n'), ((4273, 4294), 'numpy.array', 'np.array', (['y_char_list'], {}), '(y_char_list)\n', (4281, 4294), True, 'import numpy as np\n'), ((4308, 4327), 'numpy.array', 'np.array', (['can_prior'], {}), '(can_prior)\n', (4316, 4327), True, 'import numpy as np\n'), ((4362, 4384), 'numpy.array', 'np.array', (['can_emb_list'], {}), '(can_emb_list)\n', (4370, 4384), True, 'import numpy as np\n'), ((4410, 4435), 'numpy.array', 'np.array', (['voting_emb_list'], {}), '(voting_emb_list)\n', (4418, 4435), True, 'import numpy as np\n'), ((4451, 4473), 'numpy.array', 'np.array', (['context_list'], {}), '(context_list)\n', (4459, 4473), True, 'import numpy as np\n'), ((11928, 11945), 'numpy.shape', 'np.shape', (['mention'], {}), '(mention)\n', (11936, 11945), True, 'import numpy as np\n'), ((9453, 9475), 'numpy.array', 'np.array', (['mention_list'], {}), '(mention_list)\n', (9461, 9475), True, 'import numpy as np\n'), ((8812, 8834), 'numpy.array', 'np.array', (['mention_list'], {}), '(mention_list)\n', (8820, 8834), True, 'import numpy as np\n'), ((8852, 8876), 'numpy.array', 'np.array', (['candidate_list'], {}), '(candidate_list)\n', (8860, 8876), True, 'import numpy as np\n'), ((8912, 8933), 'numpy.array', 'np.array', (['x_char_list'], {}), '(x_char_list)\n', (8920, 8933), True, 'import numpy as np\n'), ((8950, 8971), 'numpy.array', 'np.array', (['y_char_list'], {}), '(y_char_list)\n', (8958, 8971), True, 'import numpy as np\n'), ((9012, 9040), 'numpy.array', 'np.array', (['neg_candidate_list'], {}), '(neg_candidate_list)\n', (9020, 9040), True, 'import numpy as np\n'), ((9057, 9078), 'numpy.array', 'np.array', (['z_char_list'], {}), '(z_char_list)\n', (9065, 9078), True, 'import numpy as np\n'), ((9119, 9148), 'numpy.array', 'np.array', (['pos_candidate_prior'], {}), '(pos_candidate_prior)\n', (9127, 9148), True, 'import numpy as np\n'), ((9166, 9195), 'numpy.array', 'np.array', (['neg_candidate_prior'], {}), '(neg_candidate_prior)\n', (9174, 9195), True, 'import numpy as np\n'), ((9240, 9266), 'numpy.array', 'np.array', (['pos_can_emb_list'], {}), '(pos_can_emb_list)\n', (9248, 9266), True, 'import numpy as np\n'), ((9288, 9314), 'numpy.array', 'np.array', (['neg_can_emb_list'], {}), '(neg_can_emb_list)\n', (9296, 9314), True, 'import numpy as np\n'), ((9363, 9388), 'numpy.array', 'np.array', (['voting_emb_list'], {}), '(voting_emb_list)\n', (9371, 9388), True, 'import numpy as np\n'), ((9404, 9426), 'numpy.array', 'np.array', (['context_list'], {}), '(context_list)\n', (9412, 9426), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import datetime
from matplotlib import rcParams
import matplotlib.pyplot as plt
from pandas_datareader import data
from tqdm import tqdm
from scipy.optimize import minimize
from scipy.optimize import NonlinearConstraint
def randomWeightGen(n):
w = np.random.random(n)
return w / w.sum()
def rateOfReturn(asset: np.array):
return asset[1:] / asset[:-1] - 1
def get_assets_data(
start_date,
end_date,
symbols: list,
):
assets = []
for symbol in tqdm(symbols):
assets.append(
data.get_data_yahoo(symbol, start_date,
end_date)["Close"].to_numpy())
assert len(assets) == len(symbols)
assets_daily_return = np.array([rateOfReturn(asset)
for asset in assets]).squeeze()
return assets_daily_return
def get_covariance_matrix(assets_daily_return):
"""
assets_daily_return: (days, n_assets)
"""
Sigma = np.cov(assets_daily_return, ddof=0)
return Sigma
def get_imputed_rf(start_date, end_date):
"""
Compared to stocks data, risk-free daily data usually have missing values.
Use this method to impute.
"""
AAPL = data.get_data_yahoo("AAPL", start_date, end_date)
rf = data.get_data_yahoo("^TNX", start_date, end_date)
missing_dates = list(set(AAPL.index) - set(rf.index))
for missing_date in missing_dates:
# Roll back till last date with value:
shift = 1
while (True):
try:
line = rf.loc[missing_date - datetime.timedelta(days=shift)]
except:
shift += 1
continue
break
df_temp = pd.DataFrame(
data={
"Date": [missing_date],
"High": line["High"],
"Low": line["Low"],
"Open": line["Open"],
"Close": line["Close"],
"Volume": line["Volume"],
"Adj Close": line["Adj Close"],
}).set_index("Date")
rf = rf.append(df_temp)
return rf.sort_index()
def MonteCarlo(n, asset_daily_return, times):
"""
Parameters:
---------
n: n_assets
asset_daily_return: (n_assets, days), daily return data of assets
times: times of simulation
return:
---------
arr_mu: array of daily expected return of the simulated portfolio, the y coordinates
arr_volatility: array of daily volatility (standard deviation) of the portfolio, the x coordinates
arr_w: array of the weight vector "w" at each point
"""
arr_w = []
arr_mu = []
arr_volatility = []
for i in tqdm(range(times)):
w = randomWeightGen(n)
arr_w.append(w)
portfolio_daily_return = np.dot(w, asset_daily_return)
arr_volatility.append(np.std(portfolio_daily_return, ddof=0))
arr_mu.append(np.mean(portfolio_daily_return))
arr_volatility = np.array(arr_volatility)
arr_mu = np.array(arr_mu)
arr_w = np.array(arr_w).reshape(times, -1)
return arr_mu, arr_volatility, arr_w
def analyticalSolver(n, Sigma, R, arr_mu):
"""
Parameters:
---------
n: n_assets
Sigma: Covariance matrix of shape (n, n)
R: Expected annual return (mean over the time period) of assets in the pool, of shape (n, )
arr_mu: np.array of "mu" (expected daily return of the portfolio), the y coordinates
return:
---------
arr_volatility: array of daily volatility (standard deviation) of the portfolio, the x coordinates
arr_w: array of the weight vector "w" at each point
"""
# The matrix on the left
mat1 = np.vstack([
np.hstack([2 * Sigma, -np.expand_dims(R, axis=1), -np.ones((n, 1))]),
np.hstack([R, [0], [0]]),
np.hstack([np.ones(n), [0], [0]])
])
arr_volatility = []
arr_w = []
for mu in tqdm(arr_mu):
vec2 = np.array([0] * n + [mu] + [1])
w_lambda = np.linalg.solve(mat1, vec2)
w = w_lambda[:n]
arr_w.append(w)
volatility = np.sqrt(np.dot(w, np.dot(Sigma, w)))
arr_volatility.append(volatility)
arr_volatility = np.array([arr_volatility]).squeeze()
arr_w = np.array(arr_w)
return arr_volatility, arr_w
def optimizerSolver(n, Sigma, R, arr_mu):
"""
Solving for the efficient frontier using optimizer scipy.optimize.minimize().
Parameters:
---------
n: n_assets
Sigma: Covariance matrix of shape (n, n)
R: Expected daily return (mean over the time period) of assets in the pool, of shape (n, )
arr_mu: np.array of "mu" (expected daily return of the portfolio), the y coordinates
return:
---------
arr_volatility: array of daily volatility (standard deviation) of the portfolio, the x coordinates
arr_w: array of the weight vector "w" at each point
"""
def fun(x):
w = np.array(x)
return w.T.dot(Sigma.dot(w))
arr_w = np.array([])
arr_volatility = np.array([])
for mu in tqdm(arr_mu):
con1 = lambda x: x.sum() - 1
nlc1 = NonlinearConstraint(con1, 0, 0)
con2 = lambda x: np.dot(R, x) - mu
nlc2 = NonlinearConstraint(con2, 0, 0)
bounds = [(0, None)] * n
result = minimize(
fun=fun,
x0=np.array([1 / n] * n),
constraints=(nlc1, nlc2),
bounds=bounds,
)
w = result.x
arr_w = np.append(arr_w, w)
volatility = np.sqrt(result.fun)
arr_volatility = np.append(arr_volatility, volatility)
arr_w = arr_w.reshape(len(arr_mu), -1)
return arr_volatility, arr_w
def tangencySolver(n, Sigma, R, rf, arr_mu):
"""
Solving for the tangency portfolio / CML analytically by allowing weight on risk-free asset
"""
ones = np.ones(n)
vec1 = R - rf * ones
Sigma_inv = np.linalg.inv(Sigma)
arr_w = np.array([])
arr_volatility = np.array([])
for mu in arr_mu:
_lambda = (mu - rf) / (vec1.T.dot(Sigma_inv).dot(vec1))
w = _lambda * Sigma_inv.dot(vec1)
arr_w = np.append(arr_w, w)
volatility = np.sqrt(w.T.dot(Sigma).dot(w))
arr_volatility = np.append(arr_volatility, volatility)
arr_w = arr_w.reshape(len(arr_mu), -1)
return arr_volatility, arr_w
| [
"numpy.mean",
"numpy.linalg.solve",
"numpy.sqrt",
"numpy.ones",
"numpy.hstack",
"numpy.random.random",
"tqdm.tqdm",
"scipy.optimize.NonlinearConstraint",
"datetime.timedelta",
"numpy.append",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.expand_dims",
"numpy.std",
"pandas.Data... | [((293, 312), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (309, 312), True, 'import numpy as np\n'), ((520, 533), 'tqdm.tqdm', 'tqdm', (['symbols'], {}), '(symbols)\n', (524, 533), False, 'from tqdm import tqdm\n'), ((993, 1028), 'numpy.cov', 'np.cov', (['assets_daily_return'], {'ddof': '(0)'}), '(assets_daily_return, ddof=0)\n', (999, 1028), True, 'import numpy as np\n'), ((1236, 1285), 'pandas_datareader.data.get_data_yahoo', 'data.get_data_yahoo', (['"""AAPL"""', 'start_date', 'end_date'], {}), "('AAPL', start_date, end_date)\n", (1255, 1285), False, 'from pandas_datareader import data\n'), ((1295, 1344), 'pandas_datareader.data.get_data_yahoo', 'data.get_data_yahoo', (['"""^TNX"""', 'start_date', 'end_date'], {}), "('^TNX', start_date, end_date)\n", (1314, 1344), False, 'from pandas_datareader import data\n'), ((3056, 3080), 'numpy.array', 'np.array', (['arr_volatility'], {}), '(arr_volatility)\n', (3064, 3080), True, 'import numpy as np\n'), ((3094, 3110), 'numpy.array', 'np.array', (['arr_mu'], {}), '(arr_mu)\n', (3102, 3110), True, 'import numpy as np\n'), ((4052, 4064), 'tqdm.tqdm', 'tqdm', (['arr_mu'], {}), '(arr_mu)\n', (4056, 4064), False, 'from tqdm import tqdm\n'), ((4379, 4394), 'numpy.array', 'np.array', (['arr_w'], {}), '(arr_w)\n', (4387, 4394), True, 'import numpy as np\n'), ((5187, 5199), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5195, 5199), True, 'import numpy as np\n'), ((5221, 5233), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5229, 5233), True, 'import numpy as np\n'), ((5249, 5261), 'tqdm.tqdm', 'tqdm', (['arr_mu'], {}), '(arr_mu)\n', (5253, 5261), False, 'from tqdm import tqdm\n'), ((6045, 6055), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (6052, 6055), True, 'import numpy as np\n'), ((6097, 6117), 'numpy.linalg.inv', 'np.linalg.inv', (['Sigma'], {}), '(Sigma)\n', (6110, 6117), True, 'import numpy as np\n'), ((6131, 6143), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6139, 6143), True, 'import numpy as np\n'), ((6165, 6177), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6173, 6177), True, 'import numpy as np\n'), ((2879, 2908), 'numpy.dot', 'np.dot', (['w', 'asset_daily_return'], {}), '(w, asset_daily_return)\n', (2885, 2908), True, 'import numpy as np\n'), ((4081, 4111), 'numpy.array', 'np.array', (['([0] * n + [mu] + [1])'], {}), '([0] * n + [mu] + [1])\n', (4089, 4111), True, 'import numpy as np\n'), ((4131, 4158), 'numpy.linalg.solve', 'np.linalg.solve', (['mat1', 'vec2'], {}), '(mat1, vec2)\n', (4146, 4158), True, 'import numpy as np\n'), ((5125, 5136), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5133, 5136), True, 'import numpy as np\n'), ((5316, 5347), 'scipy.optimize.NonlinearConstraint', 'NonlinearConstraint', (['con1', '(0)', '(0)'], {}), '(con1, 0, 0)\n', (5335, 5347), False, 'from scipy.optimize import NonlinearConstraint\n'), ((5407, 5438), 'scipy.optimize.NonlinearConstraint', 'NonlinearConstraint', (['con2', '(0)', '(0)'], {}), '(con2, 0, 0)\n', (5426, 5438), False, 'from scipy.optimize import NonlinearConstraint\n'), ((5672, 5691), 'numpy.append', 'np.append', (['arr_w', 'w'], {}), '(arr_w, w)\n', (5681, 5691), True, 'import numpy as np\n'), ((5713, 5732), 'numpy.sqrt', 'np.sqrt', (['result.fun'], {}), '(result.fun)\n', (5720, 5732), True, 'import numpy as np\n'), ((5758, 5795), 'numpy.append', 'np.append', (['arr_volatility', 'volatility'], {}), '(arr_volatility, volatility)\n', (5767, 5795), True, 'import numpy as np\n'), ((6323, 6342), 'numpy.append', 'np.append', (['arr_w', 'w'], {}), '(arr_w, w)\n', (6332, 6342), True, 'import numpy as np\n'), ((6420, 6457), 'numpy.append', 'np.append', (['arr_volatility', 'volatility'], {}), '(arr_volatility, volatility)\n', (6429, 6457), True, 'import numpy as np\n'), ((2939, 2977), 'numpy.std', 'np.std', (['portfolio_daily_return'], {'ddof': '(0)'}), '(portfolio_daily_return, ddof=0)\n', (2945, 2977), True, 'import numpy as np\n'), ((3001, 3032), 'numpy.mean', 'np.mean', (['portfolio_daily_return'], {}), '(portfolio_daily_return)\n', (3008, 3032), True, 'import numpy as np\n'), ((3123, 3138), 'numpy.array', 'np.array', (['arr_w'], {}), '(arr_w)\n', (3131, 3138), True, 'import numpy as np\n'), ((3922, 3946), 'numpy.hstack', 'np.hstack', (['[R, [0], [0]]'], {}), '([R, [0], [0]])\n', (3931, 3946), True, 'import numpy as np\n'), ((4330, 4356), 'numpy.array', 'np.array', (['[arr_volatility]'], {}), '([arr_volatility])\n', (4338, 4356), True, 'import numpy as np\n'), ((1733, 1930), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'Date': [missing_date], 'High': line['High'], 'Low': line['Low'], 'Open':\n line['Open'], 'Close': line['Close'], 'Volume': line['Volume'],\n 'Adj Close': line['Adj Close']}"}), "(data={'Date': [missing_date], 'High': line['High'], 'Low':\n line['Low'], 'Open': line['Open'], 'Close': line['Close'], 'Volume':\n line['Volume'], 'Adj Close': line['Adj Close']})\n", (1745, 1930), True, 'import pandas as pd\n'), ((4247, 4263), 'numpy.dot', 'np.dot', (['Sigma', 'w'], {}), '(Sigma, w)\n', (4253, 4263), True, 'import numpy as np\n'), ((5374, 5386), 'numpy.dot', 'np.dot', (['R', 'x'], {}), '(R, x)\n', (5380, 5386), True, 'import numpy as np\n'), ((5537, 5558), 'numpy.array', 'np.array', (['([1 / n] * n)'], {}), '([1 / n] * n)\n', (5545, 5558), True, 'import numpy as np\n'), ((3967, 3977), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (3974, 3977), True, 'import numpy as np\n'), ((570, 619), 'pandas_datareader.data.get_data_yahoo', 'data.get_data_yahoo', (['symbol', 'start_date', 'end_date'], {}), '(symbol, start_date, end_date)\n', (589, 619), False, 'from pandas_datareader import data\n'), ((1592, 1622), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'shift'}), '(days=shift)\n', (1610, 1622), False, 'import datetime\n'), ((3867, 3892), 'numpy.expand_dims', 'np.expand_dims', (['R'], {'axis': '(1)'}), '(R, axis=1)\n', (3881, 3892), True, 'import numpy as np\n'), ((3895, 3910), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (3902, 3910), True, 'import numpy as np\n')] |
from torch.utils.data import RandomSampler, BatchSampler, Sampler
import numpy as np
from random import shuffle
class VideoSampler(Sampler):
"""a batch sampler for sampling multiple video frames"""
def __init__(self, dataset, batchSize, shuffle=True):
self.dataset = dataset
self.num_videos = len(self.dataset.file_list)
self.batch_size = batchSize
# self.available_videos = [i for i in range(self.num_videos)]
def __iter__(self):
while True:
video_idx = np.random.randint(0, high=self.num_videos)
n_frames = self.dataset.video_lengths[video_idx]
batch = []
for _ in range(self.batch_size):
max_idx = n_frames - self.dataset.video_fps * 2 * 60
min_idx = self.dataset.video_fps * 2 * 60
frame_idx = np.random.randint(min_idx, max_idx)
batch.append((video_idx, frame_idx))
yield batch
def __len__(self):
return self.dataset.total_num_frames
class TwoVideoSampler(Sampler):
"""a batch sampler for sampling multiple video frames"""
def __init__(self, dataset_a, dataset_b, batchSize, shuffle=True):
self.dataset_a = dataset_a
self.dataset_b = dataset_b
self.num_videos = max(len(self.dataset_a.file_list), len(self.dataset_b.file_list))
self.batch_size = batchSize
def __iter__(self):
while True:
video_idx = np.random.randint(0, high=self.num_videos)
A_video_idx = video_idx % self.dataset_a.num_videos
B_video_idx = video_idx % self.dataset_b.num_videos
max_frame_idx = max(self.dataset_a.video_lengths[A_video_idx], self.dataset_b.video_lengths[B_video_idx])
max_frame_idx -= max(self.dataset_a.video_fps[A_video_idx], self.dataset_b.video_fps[B_video_idx]) * 4 * 60 # ditch the first and last 2 minutes
batch = []
for _ in range(self.batch_size):
frame_idx = np.random.randint(0, high=max_frame_idx)
A_frame_idx = frame_idx % (self.dataset_a.video_lengths[A_video_idx] - self.dataset_a.video_fps[A_video_idx] * 4 * 60)
B_frame_idx = frame_idx % (self.dataset_b.video_lengths[B_video_idx] - self.dataset_b.video_fps[B_video_idx] * 4 * 60)
A_frame_idx = int(A_frame_idx + self.dataset_a.video_fps[A_video_idx] * 2 * 60)
B_frame_idx = int(B_frame_idx + self.dataset_b.video_fps[B_video_idx] * 2 * 60)
# assert A_frame_idx < self.dataset_a.video_lengths[A_video_idx], f'{A_frame_idx} < {self.dataset_a.video_lengths[A_video_idx]}'
# assert B_frame_idx < self.dataset_b.video_lengths[B_video_idx], f'{B_frame_idx} < {self.dataset_b.video_lengths[B_video_idx]}'
batch.append((A_video_idx, A_frame_idx, B_video_idx, B_frame_idx))
yield batch
def __len__(self):
return self.dataset.total_num_frames
| [
"numpy.random.randint"
] | [((521, 563), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'self.num_videos'}), '(0, high=self.num_videos)\n', (538, 563), True, 'import numpy as np\n'), ((1470, 1512), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'self.num_videos'}), '(0, high=self.num_videos)\n', (1487, 1512), True, 'import numpy as np\n'), ((848, 883), 'numpy.random.randint', 'np.random.randint', (['min_idx', 'max_idx'], {}), '(min_idx, max_idx)\n', (865, 883), True, 'import numpy as np\n'), ((2012, 2052), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': 'max_frame_idx'}), '(0, high=max_frame_idx)\n', (2029, 2052), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat May 14 17:10:46 2016
@author: castaned
"""
import numpy as np
import main_modules as mmod
def xi(i,r1,theta,dr,phi,tstp):
global dth,dr_arr,alambda
dx1 = np.sin(np.deg2rad(i))
# dy1 = 0.
dz1 = np.cos(np.deg2rad(i))
x1 = r1*np.sin(np.deg2rad(theta))*np.cos(np.deg2rad(phi))
y1 = r1*np.sin(np.deg2rad(theta))*np.sin(np.deg2rad(phi))
z1 = r1*np.cos(np.deg2rad(theta))
pert = 0.1*r1
alambda = dr/(r1*np.deg2rad(tstp))
r3 = pert*np.tan(alambda)
r2 = np.sqrt((r1+pert)**2+r3**2)
psi = np.rad2deg(np.arcsin(r3/r2))
th2 = theta-psi
x2 = r2*np.sin(np.deg2rad(th2))*np.cos(np.deg2rad(phi))
y2 = r2*np.sin(np.deg2rad(th2))*np.sin(np.deg2rad(phi))
z2 = r2*np.cos(np.deg2rad(th2))
dx2 = x2-x1
dy2 = y2-y1
dz2 = z2-z1
v2 = np.sqrt(dx2**2+dy2**2+dz2**2)
cosang = (dx1*dx2+dz1*dz2)/v2
return cosang
def modelread(model_name,dr_arr,par):
model = np.genfromtxt(model_name)
if len(model[:,0])==10:
temp1 = model[::-1]
temp2 = np.tile(0.,(20,5))
for i in range(20):
if i<10:
temp2[i] = model[i]
else:
temp2[i,0] = i*(9.)+4.5
temp2[i,1:5] = temp1[i-10,1:5]
model=1.*temp2
if par=="ODD":
model = np.genfromtxt(model_name)
dr_arr[0] = 0.
dr_arr[1:20] = (np.diff(model[:,1]))
dr_arr[20] = 0.
#interp = modelinterpolate(model,dr_arr,nzth,nzphi)
return model
def modelinterpolate(model,dr_arr,nzth,nzphi):
xi = model[:,0]
#xi_dr = np.linspace(13.5,166.5,len(dr_arr))
xi_dr = np.zeros(len(dr_arr))
xi_dr[0] = 0.
xi_dr[1:-1] = np.linspace(9,171,len(dr_arr)-2)
xi_dr[-1] = 180.
y = []
x1 = np.linspace(0,180,nzth+1)
midx = x1[0:nzth]+np.diff(x1)/2.
midx = np.linspace(0,180,nzth)
#phi = np.linspace(0,360,nzphi)
# spline order: 1 linear, 2 quadratic, 3 cubic ...
#order = 1
y.append(midx)
# do inter/extrapolation
for i in range(1,len(model[0,:])):
#s = InterpolatedUnivariateSpline(xi, model[:,i], k=order)
s = np.interp(midx,xi,model[:,i])
#y.append(s(midx))
y.append(s)
#s = InterpolatedUnivariateSpline(xi_dr,dr_arr, k=order)
y.append(np.interp(midx,xi_dr,dr_arr))
y = np.array(y)
y = np.transpose(y)
return y
def find_cosxi(model_name,incl,par,fine_model=False):
### Stellar grid definition #####
nzphi = 400 # Phi zones
nzth = 200 # Theta zones
dth = 180./nzth
dphi = 360./nzphi
# dr_arr = np.empty(21,dtype=float)
phi = np.linspace(0,360.-360./nzphi,nzphi)
# model = modelread(model_name,dr_arr,par)
# interpmodel = modelinterpolate(model,dr_arr,nzth,nzphi)
# stp = abs(model[0,0]-model[1,0])
if fine_model==True:
interpmodel_fine = np.genfromtxt(model_name)
interpmodel = np.zeros((nzth,len(interpmodel_fine[0,:])+1))
stp = interpmodel_fine[1,0]-interpmodel_fine[0,0]
midp = np.empty(201)
midp[0] = 0.
midp[1:-1] = np.arange(stp/2., interpmodel_fine[-1,0], stp)
midp[-1] = 190.
dr_fine = np.empty(201)
dr_fine[0] = 0.
dr_fine[1:-1] = np.diff(interpmodel_fine[:,1])
dr_fine[-1] = 0.
dr_fine = np.interp(interpmodel_fine[:,0],midp,dr_fine)
interpmodel[:,0:-1] = interpmodel_fine
interpmodel[:,-1] = dr_fine
cossquiggle = []
darea = []
#ROTORC model dtheta:
model_dth = stp
#Geometric correction factor:
correct = np.sqrt(1.+interpmodel[:,-1]**2/(interpmodel[:,1]*np.deg2rad(model_dth))**2)
#Cossquiggle and darea calculation for every point in the grid:
for angle in phi:
xitemp = xi(incl[0],interpmodel[:,1],interpmodel[:,0],interpmodel[:,-1],angle,model_dth)
cossquiggle.append(xitemp)
radius = interpmodel[:,1]*6.9598e10
darea.append(correct*np.sin(np.deg2rad(interpmodel[:,0]))*dth*dphi*(np.pi*radius)**2/(180.**2))
#Convert lists to numpy.array:
cossquiggle = np.array(cossquiggle)
return cossquiggle
#homedir = "/home/castaned/Documents/"
#static_m = homedir+"ROTORCmodels/visibilities/"
#
#rzone = 490
#
#vv = 0
#mass = "1p875"
#mde = "0 p5"
#m = mmod.emode(mass,vv,mde)
#where =static_m+mass+'Msun/V'+m.eq_vel+"/MODE_"+m.parity+"_"+str(m.index)+"/"
#modeln = "fine_model_MODE_"+str(m.index)+"_r"+str(rzone)
#incl = [0]
#
#cosxi = find_cosxi(where+modeln,incl,m.parity,fine_model=True)
| [
"numpy.tile",
"numpy.sqrt",
"numpy.tan",
"numpy.arcsin",
"numpy.diff",
"numpy.array",
"numpy.linspace",
"numpy.deg2rad",
"numpy.empty",
"numpy.interp",
"numpy.transpose",
"numpy.genfromtxt",
"numpy.arange"
] | [((537, 572), 'numpy.sqrt', 'np.sqrt', (['((r1 + pert) ** 2 + r3 ** 2)'], {}), '((r1 + pert) ** 2 + r3 ** 2)\n', (544, 572), True, 'import numpy as np\n'), ((841, 880), 'numpy.sqrt', 'np.sqrt', (['(dx2 ** 2 + dy2 ** 2 + dz2 ** 2)'], {}), '(dx2 ** 2 + dy2 ** 2 + dz2 ** 2)\n', (848, 880), True, 'import numpy as np\n'), ((978, 1003), 'numpy.genfromtxt', 'np.genfromtxt', (['model_name'], {}), '(model_name)\n', (991, 1003), True, 'import numpy as np\n'), ((1419, 1439), 'numpy.diff', 'np.diff', (['model[:, 1]'], {}), '(model[:, 1])\n', (1426, 1439), True, 'import numpy as np\n'), ((1804, 1833), 'numpy.linspace', 'np.linspace', (['(0)', '(180)', '(nzth + 1)'], {}), '(0, 180, nzth + 1)\n', (1815, 1833), True, 'import numpy as np\n'), ((1878, 1903), 'numpy.linspace', 'np.linspace', (['(0)', '(180)', 'nzth'], {}), '(0, 180, nzth)\n', (1889, 1903), True, 'import numpy as np\n'), ((2373, 2384), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2381, 2384), True, 'import numpy as np\n'), ((2393, 2408), 'numpy.transpose', 'np.transpose', (['y'], {}), '(y)\n', (2405, 2408), True, 'import numpy as np\n'), ((2665, 2709), 'numpy.linspace', 'np.linspace', (['(0)', '(360.0 - 360.0 / nzphi)', 'nzphi'], {}), '(0, 360.0 - 360.0 / nzphi, nzphi)\n', (2676, 2709), True, 'import numpy as np\n'), ((4206, 4227), 'numpy.array', 'np.array', (['cossquiggle'], {}), '(cossquiggle)\n', (4214, 4227), True, 'import numpy as np\n'), ((214, 227), 'numpy.deg2rad', 'np.deg2rad', (['i'], {}), '(i)\n', (224, 227), True, 'import numpy as np\n'), ((264, 277), 'numpy.deg2rad', 'np.deg2rad', (['i'], {}), '(i)\n', (274, 277), True, 'import numpy as np\n'), ((512, 527), 'numpy.tan', 'np.tan', (['alambda'], {}), '(alambda)\n', (518, 527), True, 'import numpy as np\n'), ((586, 604), 'numpy.arcsin', 'np.arcsin', (['(r3 / r2)'], {}), '(r3 / r2)\n', (595, 604), True, 'import numpy as np\n'), ((1077, 1098), 'numpy.tile', 'np.tile', (['(0.0)', '(20, 5)'], {}), '(0.0, (20, 5))\n', (1084, 1098), True, 'import numpy as np\n'), ((1349, 1374), 'numpy.genfromtxt', 'np.genfromtxt', (['model_name'], {}), '(model_name)\n', (1362, 1374), True, 'import numpy as np\n'), ((2175, 2207), 'numpy.interp', 'np.interp', (['midx', 'xi', 'model[:, i]'], {}), '(midx, xi, model[:, i])\n', (2184, 2207), True, 'import numpy as np\n'), ((2335, 2365), 'numpy.interp', 'np.interp', (['midx', 'xi_dr', 'dr_arr'], {}), '(midx, xi_dr, dr_arr)\n', (2344, 2365), True, 'import numpy as np\n'), ((2904, 2929), 'numpy.genfromtxt', 'np.genfromtxt', (['model_name'], {}), '(model_name)\n', (2917, 2929), True, 'import numpy as np\n'), ((3080, 3093), 'numpy.empty', 'np.empty', (['(201)'], {}), '(201)\n', (3088, 3093), True, 'import numpy as np\n'), ((3136, 3186), 'numpy.arange', 'np.arange', (['(stp / 2.0)', 'interpmodel_fine[-1, 0]', 'stp'], {}), '(stp / 2.0, interpmodel_fine[-1, 0], stp)\n', (3145, 3186), True, 'import numpy as np\n'), ((3225, 3238), 'numpy.empty', 'np.empty', (['(201)'], {}), '(201)\n', (3233, 3238), True, 'import numpy as np\n'), ((3288, 3319), 'numpy.diff', 'np.diff', (['interpmodel_fine[:, 1]'], {}), '(interpmodel_fine[:, 1])\n', (3295, 3319), True, 'import numpy as np\n'), ((3362, 3410), 'numpy.interp', 'np.interp', (['interpmodel_fine[:, 0]', 'midp', 'dr_fine'], {}), '(interpmodel_fine[:, 0], midp, dr_fine)\n', (3371, 3410), True, 'import numpy as np\n'), ((324, 339), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (334, 339), True, 'import numpy as np\n'), ((386, 401), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (396, 401), True, 'import numpy as np\n'), ((422, 439), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (432, 439), True, 'import numpy as np\n'), ((480, 496), 'numpy.deg2rad', 'np.deg2rad', (['tstp'], {}), '(tstp)\n', (490, 496), True, 'import numpy as np\n'), ((667, 682), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (677, 682), True, 'import numpy as np\n'), ((727, 742), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (737, 742), True, 'import numpy as np\n'), ((767, 782), 'numpy.deg2rad', 'np.deg2rad', (['th2'], {}), '(th2)\n', (777, 782), True, 'import numpy as np\n'), ((1852, 1863), 'numpy.diff', 'np.diff', (['x1'], {}), '(x1)\n', (1859, 1863), True, 'import numpy as np\n'), ((298, 315), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (308, 315), True, 'import numpy as np\n'), ((360, 377), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (370, 377), True, 'import numpy as np\n'), ((643, 658), 'numpy.deg2rad', 'np.deg2rad', (['th2'], {}), '(th2)\n', (653, 658), True, 'import numpy as np\n'), ((703, 718), 'numpy.deg2rad', 'np.deg2rad', (['th2'], {}), '(th2)\n', (713, 718), True, 'import numpy as np\n'), ((3727, 3748), 'numpy.deg2rad', 'np.deg2rad', (['model_dth'], {}), '(model_dth)\n', (3737, 3748), True, 'import numpy as np\n'), ((4058, 4087), 'numpy.deg2rad', 'np.deg2rad', (['interpmodel[:, 0]'], {}), '(interpmodel[:, 0])\n', (4068, 4087), True, 'import numpy as np\n')] |
import warnings
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import math as tfm
import anndata
from .utils import print_func
from .utils.float_limits import check_range_exp
# Preprocessing functionalities
def preprocess(adata,
prepro_func='none',
transformation='none',
sf_norm=True,
centering=True,
noise_factor=0.0,
covariates=None):
assert isinstance(adata, anndata.AnnData), (
'adata must be an AnnData instance')
# preprocess
print_func.print_time("Preprocessing input ...")
adata.raw = adata
adata = prepro(adata, prepro_func)
# sizefactor calculation + normalization
if sf_norm is True:
adata = sf_normalize(adata)
else:
adata.obsm["sizefactors"] = np.ones(adata.n_obs)
# transform
adata = transform(adata, transformation)
# add noise if requested
adata = add_noise(adata, noise_factor)
# centering
if centering is True:
adata = center(adata)
# prepare covariates for inclusion in fit
adata = prepare_covariates(adata, covariates)
# put input matrix back to adata.X (AE input is in X_AE_input)
adata.X = adata.layers["X_raw"]
return adata
def center(adata):
adata.varm['means'] = np.nanmean(adata.X, axis=0)
adata.X = adata.X - adata.varm['means']
return adata
def sf_normalize(adata):
adata = calc_sizefactors(adata)
sf = adata.obsm["sizefactors"]
adata.X = adata.X / np.expand_dims(sf, 1)
adata.layers["X_sf_norm"] = adata.X
return adata
def calc_sizefactors(adata):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loggeomeans = np.nanmean(np.log(adata.X), axis=0)
sf = [_calc_size_factor_per_sample(x, loggeomeans) for x in adata.X]
adata.obsm["sizefactors"] = np.array(sf)
return adata
def _calc_size_factor_per_sample(sample_values, loggeomeans):
sf_sample = np.exp(np.nanmedian((np.log(sample_values) - loggeomeans)[
np.logical_and(np.isfinite(loggeomeans), sample_values > 0)]))
return sf_sample
def prepro(adata, prepro_func):
adata.layers["X_raw"] = adata.X
if isinstance(prepro_func, str):
assert prepro_func in ('none', 'log', 'log1p', 'log2'), (
'Unknown prepro function')
adata.uns["prepro_func_name"] = prepro_func
if prepro_func == 'log':
prepro_func = np.log
elif prepro_func == 'log1p':
prepro_func = np.log1p
elif prepro_func == 'log2':
prepro_func = np.log2
elif prepro_func == 'none':
prepro_func = lambda x: x
# elif prepro_func == 'vst':
# prepro_func = vst # TODO implement
else:
adata.uns["prepro_func_name"] = prepro_func.__name__
adata.X = prepro_func(adata.X)
adata.layers["X_prepro"] = adata.X
# adata.uns["prepro_func"] = prepro_func
return adata
def transform(adata, transform_func):
assert transform_func in ('none', 'log', 'log1p'), (
'Unknown tranformation function')
adata.uns["transform_func"] = transform_func
if transform_func != 'none':
if transform_func == 'log':
transform_func = np.log
elif transform_func == 'log1p':
transform_func = np.log1p
adata.X = transform_func(adata.X)
adata.layers["X_transformed"] = adata.X
return adata
def reverse_transform(adata):
assert "transform_func" in adata.uns.keys(), (
'No tranform_func found in adata.uns')
transform_func = adata.uns["transform_func"]
if transform_func != 'none':
adata.layers["X_predicted_no_trans"] = adata.layers["X_predicted"]
adata.layers["X_predicted"] = rev_trans(adata.layers["X_predicted"],
adata.obsm["sizefactors"],
transform_func)
return adata
def rev_trans(x_pred, sf, trans_func):
assert trans_func in ('none', 'log', 'log1p'), (
'Unknown tranformation function')
if trans_func == 'log':
x_pred = check_range_exp(x_pred)
x_pred = np.exp(x_pred)
elif trans_func == 'log1p':
x_pred = check_range_exp(x_pred)
x_pred = np.exp(x_pred) # -1
# multiply sf back (sf=1 if sf_norm=False so no effect)
x_pred = x_pred * np.expand_dims(sf, 1)
return x_pred
@tf.function
def rev_trans_tf(x_pred, sf, trans_func):
assert trans_func in ('none', 'log', 'log1p'), (
'Unknown tranformation function')
if trans_func == 'log':
x_pred = check_range_exp(x_pred)
x_pred = tfm.exp(x_pred)
elif trans_func == 'log1p':
x_pred = check_range_exp(x_pred)
x_pred = tfm.exp(x_pred) # - 1
# multiply sf back (sf=1 if sf_norm=False so no effect)
x_pred = x_pred * tf.expand_dims(sf, 1)
return x_pred
def prepare_covariates(adata, covariates=None):
if covariates is not None:
assert isinstance(adata, anndata.AnnData), (
'adata must be an AnnData instance')
assert isinstance(covariates, list), (
"covariates has to be a list of strings")
for cov in covariates:
assert cov in adata.obs.columns, (
f"Did not find column '{cov}' in adata.obs")
cov_sample = adata.obs[covariates].copy()
# transform each cov column to the respective 0|1 code
for c in cov_sample:
col = cov_sample[c].astype("category")
if len(col.cat.categories) == 1:
cov_sample.drop(c, axis=1, inplace=True, errors="ignore")
elif len(col.cat.categories) == 2:
only_01 = [(True
if x in [0, 1]
else False
for x in col.cat.categories)]
if all(only_01) is True:
# print(f"only_01: {c}")
pass
else:
# print(f"2 cat: {c}")
oneh = pd.get_dummies(cov_sample[c])
cov_sample[c] = oneh.iloc[:, 0]
else:
# print(f">2 cat: {c}")
oneh = pd.get_dummies(cov_sample[c])
oneh.columns = [c + "_" + str(x) for x in oneh.columns]
cov_sample.drop(c, axis=1, inplace=True, errors="ignore")
cov_sample = pd.concat([cov_sample, oneh], axis=1)
print_func.print_time("Including given covariates as:")
print(cov_sample.head())
adata.uns["covariates_oneh"] = np.array(cov_sample.values,
dtype=adata.X.dtype)
adata.uns["X_AE_input"] = np.concatenate([adata.X, cov_sample.values],
axis=1)
else:
adata.uns["X_AE_input"] = adata.X
return adata
def add_noise(adata, noise_factor):
assert noise_factor >= 0, "noise_factor must be >= 0"
if noise_factor > 0:
# Add gaussian noise
noise = (np.random.normal(loc=0, scale=1, size=adata.X.shape) *
noise_factor * np.nanstd(adata.X, ddof=1, axis=0))
adata.X = adata.X + noise
adata.layers["X_noise"] = noise
return adata
def inject_outliers(adata, inj_freq=1e-3, inj_mean=3, inj_sd=1.6, **kwargs):
# TODO implement
adata = preprocess(adata,
prepro_func=kwargs["prepro_func"],
transformation=kwargs["data_trans"],
sf_norm=kwargs["sf_norm"],
centering=False, # kwargs["centering"],
noise_factor=0.0,
covariates=None)
if kwargs["data_trans"] != 'none':
X_trans = adata.layers["X_transformed"]
elif kwargs["sf_norm"] is True:
X_trans = adata.layers["X_sf_norm"]
else:
X_trans = adata.layers["X_prepro"]
# draw where to inject
np.random.seed(kwargs["seed"])
outlier_mask = np.random.choice(
[0., -1., 1.], size=X_trans.shape,
p=[1 - inj_freq, inj_freq / 2, inj_freq / 2])
# insert with log normally distributed zscore in transformed space
inj_zscores = _rlnorm(size=X_trans.shape, inj_mean=inj_mean, inj_sd=inj_sd)
sd = np.nanstd(X_trans, ddof=1, axis=0)
X_injected_trans = outlier_mask * inj_zscores * sd + X_trans
# reverse transform to original space
X_injected = rev_trans(X_injected_trans, sf=adata.obsm["sizefactors"],
trans_func=kwargs["data_trans"])
# avoid inj outlier to be too strong
max_outlier_value = np.nanmin(
[100 * np.nanmax(adata.layers["X_prepro"]),
np.finfo(adata.layers["X_prepro"].dtype).max])
cond_value_too_big = X_injected > max_outlier_value
X_injected[cond_value_too_big] = (
adata.layers["X_prepro"][cond_value_too_big])
outlier_mask[cond_value_too_big] = 0
outlier_mask[~np.isfinite(adata.X)] = np.nan
X_injected[~np.isfinite(adata.X)] = np.nan
nr_out = np.sum(np.abs(outlier_mask[np.isfinite(outlier_mask)]))
print_func.print_time(f"Injecting {nr_out} outliers "
f"(freq = {nr_out/adata.X.size})")
# return new AnnData object with injected outliers
adata_with_outliers = anndata.AnnData(X=X_injected,
dtype=adata.X.dtype,
obs=adata.obs)
adata_with_outliers.layers["X_is_outlier"] = outlier_mask
adata_with_outliers.layers["X_injected_zscore"] = inj_zscores
return adata_with_outliers
def _rlnorm(size, inj_mean, inj_sd):
log_mean = np.log(inj_mean) if inj_mean != 0 else 0
return np.random.lognormal(mean=log_mean, sigma=np.log(inj_sd), size=size)
def get_k_most_variable_features(adata, k):
if k is None:
return range(adata.n_vars)
assert isinstance(k, int), "k has to be an integer"
assert isinstance(adata, anndata.AnnData), (
"adata must be an AnnData instance")
assert "X_AE_input" in adata.uns.keys(), (
"X_AE_input needs to be in adata.uns, preprocess data first")
feature_sd = np.nanstd(adata.uns["X_AE_input"], axis=0)
most_var = np.argsort(-feature_sd)[:k]
return most_var
| [
"numpy.log",
"numpy.argsort",
"numpy.nanmean",
"numpy.array",
"numpy.isfinite",
"tensorflow.math.exp",
"numpy.exp",
"numpy.random.seed",
"numpy.concatenate",
"numpy.nanmax",
"warnings.simplefilter",
"anndata.AnnData",
"numpy.random.normal",
"numpy.nanstd",
"numpy.ones",
"numpy.random.c... | [((1336, 1363), 'numpy.nanmean', 'np.nanmean', (['adata.X'], {'axis': '(0)'}), '(adata.X, axis=0)\n', (1346, 1363), True, 'import numpy as np\n'), ((1901, 1913), 'numpy.array', 'np.array', (['sf'], {}), '(sf)\n', (1909, 1913), True, 'import numpy as np\n'), ((8050, 8080), 'numpy.random.seed', 'np.random.seed', (["kwargs['seed']"], {}), "(kwargs['seed'])\n", (8064, 8080), True, 'import numpy as np\n'), ((8100, 8205), 'numpy.random.choice', 'np.random.choice', (['[0.0, -1.0, 1.0]'], {'size': 'X_trans.shape', 'p': '[1 - inj_freq, inj_freq / 2, inj_freq / 2]'}), '([0.0, -1.0, 1.0], size=X_trans.shape, p=[1 - inj_freq, \n inj_freq / 2, inj_freq / 2])\n', (8116, 8205), True, 'import numpy as np\n'), ((8408, 8442), 'numpy.nanstd', 'np.nanstd', (['X_trans'], {'ddof': '(1)', 'axis': '(0)'}), '(X_trans, ddof=1, axis=0)\n', (8417, 8442), True, 'import numpy as np\n'), ((9467, 9532), 'anndata.AnnData', 'anndata.AnnData', ([], {'X': 'X_injected', 'dtype': 'adata.X.dtype', 'obs': 'adata.obs'}), '(X=X_injected, dtype=adata.X.dtype, obs=adata.obs)\n', (9482, 9532), False, 'import anndata\n'), ((10335, 10377), 'numpy.nanstd', 'np.nanstd', (["adata.uns['X_AE_input']"], {'axis': '(0)'}), "(adata.uns['X_AE_input'], axis=0)\n", (10344, 10377), True, 'import numpy as np\n'), ((841, 861), 'numpy.ones', 'np.ones', (['adata.n_obs'], {}), '(adata.n_obs)\n', (848, 861), True, 'import numpy as np\n'), ((1547, 1568), 'numpy.expand_dims', 'np.expand_dims', (['sf', '(1)'], {}), '(sf, 1)\n', (1561, 1568), True, 'import numpy as np\n'), ((1666, 1691), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1689, 1691), False, 'import warnings\n'), ((1701, 1732), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1722, 1732), False, 'import warnings\n'), ((4223, 4237), 'numpy.exp', 'np.exp', (['x_pred'], {}), '(x_pred)\n', (4229, 4237), True, 'import numpy as np\n'), ((4432, 4453), 'numpy.expand_dims', 'np.expand_dims', (['sf', '(1)'], {}), '(sf, 1)\n', (4446, 4453), True, 'import numpy as np\n'), ((4712, 4727), 'tensorflow.math.exp', 'tfm.exp', (['x_pred'], {}), '(x_pred)\n', (4719, 4727), True, 'from tensorflow import math as tfm\n'), ((4924, 4945), 'tensorflow.expand_dims', 'tf.expand_dims', (['sf', '(1)'], {}), '(sf, 1)\n', (4938, 4945), True, 'import tensorflow as tf\n'), ((6668, 6716), 'numpy.array', 'np.array', (['cov_sample.values'], {'dtype': 'adata.X.dtype'}), '(cov_sample.values, dtype=adata.X.dtype)\n', (6676, 6716), True, 'import numpy as np\n'), ((6799, 6851), 'numpy.concatenate', 'np.concatenate', (['[adata.X, cov_sample.values]'], {'axis': '(1)'}), '([adata.X, cov_sample.values], axis=1)\n', (6813, 6851), True, 'import numpy as np\n'), ((9830, 9846), 'numpy.log', 'np.log', (['inj_mean'], {}), '(inj_mean)\n', (9836, 9846), True, 'import numpy as np\n'), ((10393, 10416), 'numpy.argsort', 'np.argsort', (['(-feature_sd)'], {}), '(-feature_sd)\n', (10403, 10416), True, 'import numpy as np\n'), ((1766, 1781), 'numpy.log', 'np.log', (['adata.X'], {}), '(adata.X)\n', (1772, 1781), True, 'import numpy as np\n'), ((4328, 4342), 'numpy.exp', 'np.exp', (['x_pred'], {}), '(x_pred)\n', (4334, 4342), True, 'import numpy as np\n'), ((4818, 4833), 'tensorflow.math.exp', 'tfm.exp', (['x_pred'], {}), '(x_pred)\n', (4825, 4833), True, 'from tensorflow import math as tfm\n'), ((7226, 7260), 'numpy.nanstd', 'np.nanstd', (['adata.X'], {'ddof': '(1)', 'axis': '(0)'}), '(adata.X, ddof=1, axis=0)\n', (7235, 7260), True, 'import numpy as np\n'), ((9119, 9139), 'numpy.isfinite', 'np.isfinite', (['adata.X'], {}), '(adata.X)\n', (9130, 9139), True, 'import numpy as np\n'), ((9166, 9186), 'numpy.isfinite', 'np.isfinite', (['adata.X'], {}), '(adata.X)\n', (9177, 9186), True, 'import numpy as np\n'), ((9923, 9937), 'numpy.log', 'np.log', (['inj_sd'], {}), '(inj_sd)\n', (9929, 9937), True, 'import numpy as np\n'), ((7139, 7191), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1)', 'size': 'adata.X.shape'}), '(loc=0, scale=1, size=adata.X.shape)\n', (7155, 7191), True, 'import numpy as np\n'), ((8798, 8833), 'numpy.nanmax', 'np.nanmax', (["adata.layers['X_prepro']"], {}), "(adata.layers['X_prepro'])\n", (8807, 8833), True, 'import numpy as np\n'), ((8864, 8904), 'numpy.finfo', 'np.finfo', (["adata.layers['X_prepro'].dtype"], {}), "(adata.layers['X_prepro'].dtype)\n", (8872, 8904), True, 'import numpy as np\n'), ((9237, 9262), 'numpy.isfinite', 'np.isfinite', (['outlier_mask'], {}), '(outlier_mask)\n', (9248, 9262), True, 'import numpy as np\n'), ((2032, 2053), 'numpy.log', 'np.log', (['sample_values'], {}), '(sample_values)\n', (2038, 2053), True, 'import numpy as np\n'), ((2093, 2117), 'numpy.isfinite', 'np.isfinite', (['loggeomeans'], {}), '(loggeomeans)\n', (2104, 2117), True, 'import numpy as np\n'), ((6288, 6317), 'pandas.get_dummies', 'pd.get_dummies', (['cov_sample[c]'], {}), '(cov_sample[c])\n', (6302, 6317), True, 'import pandas as pd\n'), ((6493, 6530), 'pandas.concat', 'pd.concat', (['[cov_sample, oneh]'], {'axis': '(1)'}), '([cov_sample, oneh], axis=1)\n', (6502, 6530), True, 'import pandas as pd\n'), ((6125, 6154), 'pandas.get_dummies', 'pd.get_dummies', (['cov_sample[c]'], {}), '(cov_sample[c])\n', (6139, 6154), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
#title : exponential.py
#description : Discrete exponential distribution.
#author : <NAME>
#date : 2015.06.19
#version : 0.1
#usage : python exponential.py
#=====================================================
import numpy as np
from mpmath import exp, ln
from core import core as co
class Exponential(co.RealDistribution):
"""
Discrete exponential distribution:
Exponential(x) = C * exp(-x/beta),
where C = (1-exp(-1/beta)).
If the scale parameter is very small, a delta distribution is used.
"""
@staticmethod
def pmf(params, domain=co.DEFAULT_PDF_MAX):
"""
Probability mass function.
:param params: single element list containing the scale (beta) parameter.
:param domain: domain size.
:return: probability mass function.
"""
if params[0] < co.EPSILON:
return co.delta.pmf([0], domain)
else:
c = 1-exp(-1/params[0])
x = np.arange(0, domain+1)
return np.exp(-x/params[0])*c
@staticmethod
def samples(params, size=co.DEFAULT_SAMPLE_SIZE, domain=co.DEFAULT_SAMPLE_MAX):
"""
Returns samples with discrete exponential distribution.
:param params: single element list containing the scale (beta) parameter.
:param size: number of samples.
:param domain: domain size.
:return: numpy array of samples.
"""
if params[0] < co.EPSILON:
return co.delta.samples([0], size)
else:
x = np.arange(0, domain+1)
return co.generate_discrete_samples(x, np.exp(-x/params[0]), size)
@staticmethod
def log_likelihood(params, data, nonzero_only=False):
"""
Calculates the log-likelihood on the data.
:param params: single element list containing the scale (beta) parameter.
:param data: input data as a numpy array.
:param nonzero_only: whether nonzero element should be considered only. This is
used after determining the parameters and comparing to distributions that ignore
zero values.
:return: log-likelihood.
"""
if params[0] < co.EPSILON:
return co.delta.log_likelihood([0], data)
else:
if nonzero_only:
_samples = data[np.where(data > 0)]
else:
_samples = data
return len(_samples)*ln(1-exp(-1/params[0])) - np.sum(_samples)/params[0]
@staticmethod
def get_params(params):
return "beta = %.5f" % params[0]
exponential = Exponential() | [
"mpmath.exp",
"core.core.delta.log_likelihood",
"numpy.where",
"core.core.delta.pmf",
"numpy.exp",
"core.core.delta.samples",
"numpy.sum",
"numpy.arange"
] | [((940, 965), 'core.core.delta.pmf', 'co.delta.pmf', (['[0]', 'domain'], {}), '([0], domain)\n', (952, 965), True, 'from core import core as co\n'), ((1032, 1056), 'numpy.arange', 'np.arange', (['(0)', '(domain + 1)'], {}), '(0, domain + 1)\n', (1041, 1056), True, 'import numpy as np\n'), ((1542, 1569), 'core.core.delta.samples', 'co.delta.samples', (['[0]', 'size'], {}), '([0], size)\n', (1558, 1569), True, 'from core import core as co\n'), ((1600, 1624), 'numpy.arange', 'np.arange', (['(0)', '(domain + 1)'], {}), '(0, domain + 1)\n', (1609, 1624), True, 'import numpy as np\n'), ((2273, 2307), 'core.core.delta.log_likelihood', 'co.delta.log_likelihood', (['[0]', 'data'], {}), '([0], data)\n', (2296, 2307), True, 'from core import core as co\n'), ((998, 1017), 'mpmath.exp', 'exp', (['(-1 / params[0])'], {}), '(-1 / params[0])\n', (1001, 1017), False, 'from mpmath import exp, ln\n'), ((1074, 1096), 'numpy.exp', 'np.exp', (['(-x / params[0])'], {}), '(-x / params[0])\n', (1080, 1096), True, 'import numpy as np\n'), ((1674, 1696), 'numpy.exp', 'np.exp', (['(-x / params[0])'], {}), '(-x / params[0])\n', (1680, 1696), True, 'import numpy as np\n'), ((2383, 2401), 'numpy.where', 'np.where', (['(data > 0)'], {}), '(data > 0)\n', (2391, 2401), True, 'import numpy as np\n'), ((2512, 2528), 'numpy.sum', 'np.sum', (['_samples'], {}), '(_samples)\n', (2518, 2528), True, 'import numpy as np\n'), ((2491, 2510), 'mpmath.exp', 'exp', (['(-1 / params[0])'], {}), '(-1 / params[0])\n', (2494, 2510), False, 'from mpmath import exp, ln\n')] |
from rlkit.samplers.util import rollout
from rlkit.samplers.rollout_functions import multitask_rollout
import numpy as np
class InPlacePathSampler(object):
"""
A sampler that does not serialization for sampling. Instead, it just uses
the current policy and environment as-is.
WARNING: This will affect the environment! So
```
sampler = InPlacePathSampler(env, ...)
sampler.obtain_samples # this has side-effects: env will change!
```
"""
def __init__(self, env, policy, max_samples, max_path_length, randomize_env=False, alg=None):
self.env = env
self.policy = policy
self.max_path_length = max_path_length
self.max_samples = max_samples
assert max_samples >= max_path_length, "Need max_samples >= max_path_length"
self.randomize_env = randomize_env
self.alg = alg
def start_worker(self):
pass
def shutdown_worker(self):
pass
def obtain_samples(self, rollout_type="multitask"):
paths = []
n_steps_total = 0
while n_steps_total + self.max_path_length <= self.max_samples:
if self.randomize_env:
self.env, env_name = self.alg.get_new_env()
print(f"Evaluating {env_name}")
if rollout_type == "multitask":
path = multitask_rollout(
self.env,
self.policy,
max_path_length=self.max_path_length,
animated=False,
observation_key='observation',
desired_goal_key='desired_goal',
get_action_kwargs=dict(
return_stacked_softmax=False,
mask=np.ones((1, self.env.unwrapped.num_blocks)),
deterministic=True
)
)
else:
path = rollout(
self.env, self.policy, max_path_length=self.max_path_length
)
paths.append(path)
n_steps_total += len(path['observations'])
return paths
| [
"numpy.ones",
"rlkit.samplers.util.rollout"
] | [((1912, 1980), 'rlkit.samplers.util.rollout', 'rollout', (['self.env', 'self.policy'], {'max_path_length': 'self.max_path_length'}), '(self.env, self.policy, max_path_length=self.max_path_length)\n', (1919, 1980), False, 'from rlkit.samplers.util import rollout\n'), ((1743, 1786), 'numpy.ones', 'np.ones', (['(1, self.env.unwrapped.num_blocks)'], {}), '((1, self.env.unwrapped.num_blocks))\n', (1750, 1786), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
from corner_detector import *
from anms import *
from feat_desc import *
from feat_match import *
from ransac_est_homography import *
from mymosaic import *
def wrapper():
#SET 1
im1 = np.array(Image.open('SET1_L.jpg').convert('RGB'))
im2 = np.array(Image.open('SET1_M.jpg').convert('RGB'))
im3 = np.array(Image.open('SET1_R.jpg').convert('RGB'))
#DEFINING THE INPUT IMAGE MATRIX
img_input = np.zeros((3,),dtype='object')
img_input[0] = im1
img_input[1]= im2
img_input[2]= im3
#GETTING THE FINAL MOSAIC
final_mosaic = mymosaic(img_input)
#PLOTTING THE FINAL MOSAIC
plt.imshow(final_mosaic)
plt.show()
'''
##############################################################################
#SET 2
im1 = np.array(Image.open('SET2_L.jpg').convert('RGB'))
im2 = np.array(Image.open('SET2_M.jpg').convert('RGB'))
im3 = np.array(Image.open('SET2_R.jpg').convert('RGB'))
#DEFINING THE INPUT IMAGE MATRIX
img_input = np.zeros((3,),dtype='object')
img_input[0] = im1
img_input[1]= im2
img_input[2]= im3
#GETTING THE FINAL MOSAIC
final_mosaic = mymosaic(img_input)
#PLOTTING THE FINAL MOSAIC
plt.imshow(final_mosaic)
plt.show()
##############################################################################
'''
'''
##############################################################################
#TEST IMAGE
im1 = np.array(Image.open('1_M.jpg').convert('RGB'))
im2 = np.array(Image.open('1_R.jpg').convert('RGB'))
#TEST IMAGE INPUT MATRIX
img_input = np.zeros((2,),dtype='object')
img_input[0] = im1
img_input[1]= im2
#GETTING THE FINAL MOSAIC
final_mosaic = mymosaic(img_input)
#PLOTTING THE FINAL MOSAIC
plt.imshow(final_mosaic)
plt.show()
##############################################################################
'''
if __name__=="__main__":
wrapper()
| [
"matplotlib.pyplot.imshow",
"numpy.zeros",
"PIL.Image.open",
"matplotlib.pyplot.show"
] | [((511, 541), 'numpy.zeros', 'np.zeros', (['(3,)'], {'dtype': '"""object"""'}), "((3,), dtype='object')\n", (519, 541), True, 'import numpy as np\n'), ((715, 739), 'matplotlib.pyplot.imshow', 'plt.imshow', (['final_mosaic'], {}), '(final_mosaic)\n', (725, 739), True, 'import matplotlib.pyplot as plt\n'), ((744, 754), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (752, 754), True, 'import matplotlib.pyplot as plt\n'), ((295, 319), 'PIL.Image.open', 'Image.open', (['"""SET1_L.jpg"""'], {}), "('SET1_L.jpg')\n", (305, 319), False, 'from PIL import Image\n'), ((355, 379), 'PIL.Image.open', 'Image.open', (['"""SET1_M.jpg"""'], {}), "('SET1_M.jpg')\n", (365, 379), False, 'from PIL import Image\n'), ((415, 439), 'PIL.Image.open', 'Image.open', (['"""SET1_R.jpg"""'], {}), "('SET1_R.jpg')\n", (425, 439), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
#
# Observed Data Object
#
# Started: Jan 2015 (KDG)
#
from __future__ import print_function
import string
import numpy as np
import matplotlib.pyplot as pyplot
from astropy.table import Table
from Ext import f99
from Ext import extdata
# Object for the observed dust data
class ObsData():
def __init__(self, wavelengths, wavelengths_emission):
# read in the observed extinction curve
extinfo = extdata.ExtData()
extinfo.read_ext_data('Ext/azv18_ext.fits')
# compute the dust extintion curve one the supplied wavelength grid
self.n_wavelengths = len(wavelengths)
self.wavelengths = wavelengths
self.Rv = extinfo.rv[0]
self.avnhi = 1./extinfo.nhiav[0]
self.alnhi = self.avnhi*f99.f99(self.Rv, 1.0/wavelengths, x0=extinfo.fm90['x0'][0], gamma=extinfo.fm90['gamma'][0],
c2=extinfo.fm90['C2'][0],c3=extinfo.fm90['C3'][0],c4=extinfo.fm90['C4'][0])
# Kirill sent those column densities toward AzV 18:
# Fe II : 15.58 +/- 0.02
# Mg II : 16.50 +/- 0.03
# O I : 18.27 +/- 0.09
# Si II : 16.63 +/- 0.03
#
# Kirill sent at depletion for carbon of -0.15 (F_* = 0.5)
# C, O, Mg, Si, Fe
tot_abund_log = np.array([7.52, 8.14, 6.88, 6.96, 6.89])
tot_abund_unc_log = np.array([0.14, 0.12, 0.09, 0.16, 0.11])
tot_abund = np.power(10.,tot_abund_log - 6.0)
tot_abund_up = np.power(10.,tot_abund_log + tot_abund_unc_log - 6.0)
tot_abund_down = np.power(10.,tot_abund_log - tot_abund_unc_log - 6.0)
tot_abund_unc = 0.5*(tot_abund_up - tot_abund_down)
#print(tot_abund, tot_abund_unc)
gas_abund_log = np.array([18.27, 16.50, 16.63, 15.58])
gas_abund_unc_log = np.array([0.09, 0.03, 0.03, 0.02])
gas_abund = 1e6*np.power(10.,gas_abund_log)/np.power(10.,22.04)
nhi = np.power(10.,22.04)
gas_abund_up = 1e6*np.power(10.,gas_abund_log+gas_abund_unc_log)/nhi
gas_abund_down = 1e6*np.power(10.,gas_abund_log-gas_abund_unc_log)/nhi
gas_abund_unc = 0.5*(gas_abund_up - gas_abund_down)
#print(gas_abund_unc)
nhi_unc = 0.5*(np.power(10.,22.04+0.12) - np.power(10.,22.04-0.18))
gas_abund = np.concatenate(([1e-6*np.power(10.,tot_abund_log[0]-0.15)],gas_abund))
gas_abund_unc = np.concatenate(([0.2*gas_abund[0]],gas_abund_unc))
gas_abund_unc = gas_abund*np.sqrt((np.power((gas_abund_unc/gas_abund),2) + np.power((nhi_unc/nhi),2)))
#print(gas_abund, gas_abund_unc)
dust_abund = tot_abund - gas_abund
dust_abund_unc = np.sqrt(np.square(tot_abund_unc) + np.square(gas_abund_unc))
#print(dust_abund, dust_abund_unc)
self.total_abundance = {'C': (tot_abund[0], tot_abund_unc[0]),
'O': (tot_abund[1], tot_abund_unc[1]),
'Mg': (tot_abund[2], tot_abund_unc[2]),
'Si': (tot_abund[3], tot_abund_unc[3]),
'Fe': (tot_abund[4], tot_abund_unc[4])}
self.depletions = {'C': (dust_abund[0], dust_abund_unc[0]),
'O': (dust_abund[1], dust_abund_unc[1]),
'Mg': (dust_abund[2], dust_abund_unc[2]),
'Si': (dust_abund[3], dust_abund_unc[3]),
'Fe': (dust_abund[4], dust_abund_unc[4])}
self.fit_depletions = True
self.fit_ir_emission = False
self.fit_scat_param = False
if __name__ == "__main__":
print('No test code')
| [
"numpy.power",
"Ext.f99.f99",
"Ext.extdata.ExtData",
"numpy.square",
"numpy.array",
"numpy.concatenate"
] | [((442, 459), 'Ext.extdata.ExtData', 'extdata.ExtData', ([], {}), '()\n', (457, 459), False, 'from Ext import extdata\n'), ((1308, 1348), 'numpy.array', 'np.array', (['[7.52, 8.14, 6.88, 6.96, 6.89]'], {}), '([7.52, 8.14, 6.88, 6.96, 6.89])\n', (1316, 1348), True, 'import numpy as np\n'), ((1377, 1417), 'numpy.array', 'np.array', (['[0.14, 0.12, 0.09, 0.16, 0.11]'], {}), '([0.14, 0.12, 0.09, 0.16, 0.11])\n', (1385, 1417), True, 'import numpy as np\n'), ((1439, 1474), 'numpy.power', 'np.power', (['(10.0)', '(tot_abund_log - 6.0)'], {}), '(10.0, tot_abund_log - 6.0)\n', (1447, 1474), True, 'import numpy as np\n'), ((1496, 1551), 'numpy.power', 'np.power', (['(10.0)', '(tot_abund_log + tot_abund_unc_log - 6.0)'], {}), '(10.0, tot_abund_log + tot_abund_unc_log - 6.0)\n', (1504, 1551), True, 'import numpy as np\n'), ((1575, 1630), 'numpy.power', 'np.power', (['(10.0)', '(tot_abund_log - tot_abund_unc_log - 6.0)'], {}), '(10.0, tot_abund_log - tot_abund_unc_log - 6.0)\n', (1583, 1630), True, 'import numpy as np\n'), ((1755, 1792), 'numpy.array', 'np.array', (['[18.27, 16.5, 16.63, 15.58]'], {}), '([18.27, 16.5, 16.63, 15.58])\n', (1763, 1792), True, 'import numpy as np\n'), ((1822, 1856), 'numpy.array', 'np.array', (['[0.09, 0.03, 0.03, 0.02]'], {}), '([0.09, 0.03, 0.03, 0.02])\n', (1830, 1856), True, 'import numpy as np\n'), ((1943, 1964), 'numpy.power', 'np.power', (['(10.0)', '(22.04)'], {}), '(10.0, 22.04)\n', (1951, 1964), True, 'import numpy as np\n'), ((2402, 2455), 'numpy.concatenate', 'np.concatenate', (['([0.2 * gas_abund[0]], gas_abund_unc)'], {}), '(([0.2 * gas_abund[0]], gas_abund_unc))\n', (2416, 2455), True, 'import numpy as np\n'), ((779, 959), 'Ext.f99.f99', 'f99.f99', (['self.Rv', '(1.0 / wavelengths)'], {'x0': "extinfo.fm90['x0'][0]", 'gamma': "extinfo.fm90['gamma'][0]", 'c2': "extinfo.fm90['C2'][0]", 'c3': "extinfo.fm90['C3'][0]", 'c4': "extinfo.fm90['C4'][0]"}), "(self.Rv, 1.0 / wavelengths, x0=extinfo.fm90['x0'][0], gamma=extinfo\n .fm90['gamma'][0], c2=extinfo.fm90['C2'][0], c3=extinfo.fm90['C3'][0],\n c4=extinfo.fm90['C4'][0])\n", (786, 959), False, 'from Ext import f99\n'), ((1909, 1930), 'numpy.power', 'np.power', (['(10.0)', '(22.04)'], {}), '(10.0, 22.04)\n', (1917, 1930), True, 'import numpy as np\n'), ((1881, 1910), 'numpy.power', 'np.power', (['(10.0)', 'gas_abund_log'], {}), '(10.0, gas_abund_log)\n', (1889, 1910), True, 'import numpy as np\n'), ((1990, 2039), 'numpy.power', 'np.power', (['(10.0)', '(gas_abund_log + gas_abund_unc_log)'], {}), '(10.0, gas_abund_log + gas_abund_unc_log)\n', (1998, 2039), True, 'import numpy as np\n'), ((2069, 2118), 'numpy.power', 'np.power', (['(10.0)', '(gas_abund_log - gas_abund_unc_log)'], {}), '(10.0, gas_abund_log - gas_abund_unc_log)\n', (2077, 2118), True, 'import numpy as np\n'), ((2233, 2261), 'numpy.power', 'np.power', (['(10.0)', '(22.04 + 0.12)'], {}), '(10.0, 22.04 + 0.12)\n', (2241, 2261), True, 'import numpy as np\n'), ((2260, 2288), 'numpy.power', 'np.power', (['(10.0)', '(22.04 - 0.18)'], {}), '(10.0, 22.04 - 0.18)\n', (2268, 2288), True, 'import numpy as np\n'), ((2684, 2708), 'numpy.square', 'np.square', (['tot_abund_unc'], {}), '(tot_abund_unc)\n', (2693, 2708), True, 'import numpy as np\n'), ((2711, 2735), 'numpy.square', 'np.square', (['gas_abund_unc'], {}), '(gas_abund_unc)\n', (2720, 2735), True, 'import numpy as np\n'), ((2497, 2535), 'numpy.power', 'np.power', (['(gas_abund_unc / gas_abund)', '(2)'], {}), '(gas_abund_unc / gas_abund, 2)\n', (2505, 2535), True, 'import numpy as np\n'), ((2537, 2563), 'numpy.power', 'np.power', (['(nhi_unc / nhi)', '(2)'], {}), '(nhi_unc / nhi, 2)\n', (2545, 2563), True, 'import numpy as np\n'), ((2329, 2368), 'numpy.power', 'np.power', (['(10.0)', '(tot_abund_log[0] - 0.15)'], {}), '(10.0, tot_abund_log[0] - 0.15)\n', (2337, 2368), True, 'import numpy as np\n')] |
import inspect
import numpy as np
from limix.core.type.exception import NotArrayConvertibleError
def my_name():
return inspect.stack()[1][3]
def assert_finite_array(*arrays):
for a in arrays:
if not np.isfinite(a).all():
raise ValueError("Array must not contain infs or NaNs")
def assert_make_float_array(arr, arg_name):
try:
arr = np.asarray(arr, dtype=float)
except ValueError as e:
raise NotArrayConvertibleError("%s has to be float-array "
"convertible." % arg_name)
return arr
def assert_type(arg, type_, param_name):
err_msg = ("Parameter %s is not of type %s.%s."
% (param_name, type_.__module__, type_.__name__))
if type(arg) is not type_:
raise TypeError(err_msg)
def assert_subtype(arg, type_, param_name):
err_msg = ("Parameter %s must have %s.%s inheritance."
% (param_name, type_.__module__, type_.__name__))
if not issubclass(type(arg), type_):
raise TypeError(err_msg)
def assert_type_or_list_type(arg, type_, param_name):
err_msg = ("Parameter %s is not of type "
"%s.%s nor a list or a tuple of the same."
% (param_name, type_.__module__, type_.__name__))
if type(arg) in (list, tuple):
for a in arg:
if type(a) is not type_:
raise TypeError(err_msg)
else:
if type(arg) is not type_:
raise TypeError(err_msg)
def assert_subtype_or_list_subtype(arg, type_, param_name):
err_msg = ("Parameter %s is not of type "
"%s.%s nor a list or a tuple of the same."
% (param_name, type_.__module__, type_.__name__))
if type(arg) in (list, tuple):
for a in arg:
if not issubclass(type(a), type_):
raise TypeError(err_msg)
else:
if issubclass(type(arg), type_):
raise TypeError(err_msg)
| [
"numpy.isfinite",
"numpy.asarray",
"inspect.stack",
"limix.core.type.exception.NotArrayConvertibleError"
] | [((378, 406), 'numpy.asarray', 'np.asarray', (['arr'], {'dtype': 'float'}), '(arr, dtype=float)\n', (388, 406), True, 'import numpy as np\n'), ((125, 140), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (138, 140), False, 'import inspect\n'), ((449, 525), 'limix.core.type.exception.NotArrayConvertibleError', 'NotArrayConvertibleError', (["('%s has to be float-array convertible.' % arg_name)"], {}), "('%s has to be float-array convertible.' % arg_name)\n", (473, 525), False, 'from limix.core.type.exception import NotArrayConvertibleError\n'), ((219, 233), 'numpy.isfinite', 'np.isfinite', (['a'], {}), '(a)\n', (230, 233), True, 'import numpy as np\n')] |
import sys
import tables
from collections import OrderedDict
import datetime
import os
import numpy as np
from copy import deepcopy
from qtpy import QtGui, QtWidgets
from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt
from pathlib import Path
import pickle
from pyqtgraph.dockarea import Dock
from pymodaq.daq_utils.gui_utils import DockArea, select_file
from pyqtgraph.parametertree import Parameter, ParameterTree
import pyqtgraph.parametertree.parameterTypes as pTypes
import pymodaq.daq_utils.custom_parameter_tree as custom_tree
from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand
from pymodaq.daq_utils.plotting.qled import QLED
from pymodaq.daq_viewer.daq_viewer_main import DAQ_Viewer
from pymodaq.daq_utils.plotting.viewer1D.viewer1D_main import Viewer1D
from pymodaq.daq_utils import daq_utils as utils
from pymodaq.daq_utils.h5modules import H5Browser, H5Saver, browse_data, H5BrowserUtil
from pymodaq_spectro.utils.calibration import Calibration
from pymodaq.dashboard import DashBoard
from units_converter.main import UnitsConverter
import logging
logger = utils.set_logger(utils.get_module_name(__file__))
spectro_path = utils.get_set_config_path('spectrometer_configs')
class Spectrometer(QObject):
"""
Defines a Spectrometer object, unified interface for many spectrometers
Parameters that could be set in the selected detector plugin (should be defined there):
'laser_wl' : value of the configured laser (could eventually be changed, case of Xplora, Labram...)
'spectro_center_freq': value of the configured grating center wavelength (could eventually be changed, case of Shamrock, Xplora...)
"""
#custom signal that will be fired sometimes. Could be connected to an external object method or an internal method
log_signal = Signal(str)
#list of dicts enabling the settings tree on the user interface
params = [{'title': 'Configuration settings:', 'name': 'config_settings', 'type': 'group', 'children': [
{'title': 'Laser wavelength (nm):', 'name': 'laser_wl', 'type': 'float', 'value': 515.},
{'title': 'Laser wavelength (nm):', 'name': 'laser_wl_list', 'type': 'list', 'limits':['']},
{'title': 'Current Detector:', 'name': 'curr_det', 'type': 'str', 'value': ''},
{'title': 'Show detector:', 'name': 'show_det', 'type': 'bool', 'value': False},
],},
{'title': 'Calibration settings:', 'name': 'calib_settings', 'type': 'group', 'children': [
{'title': 'Use calibration:', 'name': 'use_calib', 'type': 'bool', 'value': False},
{'title': 'Save calibration', 'name': 'save_calib', 'type': 'bool_push', 'value': False},
{'title': 'Load calibration', 'name': 'load_calib', 'type': 'bool_push', 'value': False},
{'title': 'Calibration coeffs:', 'name': 'calib_coeffs', 'type': 'group', 'children': [
{'title': 'Center wavelength (nm):', 'name': 'center_calib', 'type': 'float', 'value': 515.},
{'title': 'Slope (nm/pxl):', 'name': 'slope_calib', 'type': 'float', 'value': 1.},
{'title': 'Second order :', 'name': 'second_calib', 'type': 'float', 'value': 0},
{'title': 'third:', 'name': 'third_calib', 'type': 'float', 'value': 0},]},
{'title': 'Perform calibration:', 'name': 'do_calib', 'type': 'bool', 'value': False},
]},
{'title': 'Acquisition settings:', 'name': 'acq_settings', 'type': 'group', 'children': [
{'title': 'Spectro. Center:', 'name': 'spectro_center_freq', 'type': 'float', 'value': 800,},
{'title': 'Spectro. Center:', 'name': 'spectro_center_freq_txt', 'type': 'str', 'value': '????', 'readonly':True },
{'title': 'Units:', 'name': 'units', 'type': 'list', 'value': 'nm', 'limits': ['nm', 'cm-1', 'eV']},
{'title': 'Exposure (ms):', 'name': 'exposure_ms', 'type': 'float', 'value': 100, },
]},
]
def __init__(self, parent):
QLocale.setDefault(QLocale(QLocale.English, QLocale.UnitedStates))
super().__init__()
if not isinstance(parent, DockArea):
raise Exception('no valid parent container, expected a DockArea')
self.wait_time = 2000 #ms
self.offline = True
self.dockarea = parent
self.mainwindow = parent.parent()
self.spectro_widget = QtWidgets.QWidget()
self.data_dict = None
"""
List of the possible plugins that could be used with Spectrometer module
type : dimensionality of the detector
name: name of the plugin
calib = True means there is a builtin calibration of the frequency axis
movable : tells if the dispersion can be set (for instance by moving a grating)
unit: valid only if calib is True. Unit of the calibration axis (x_axis of the detector), most often in
nanometers. Possible values are 'nm', 'radfs' (rad/femtosecond), 'eV'
laser: if False, laser cannot be changed by the program, do it manually
laser_list: if laser is True, laser_list gives a list of selectable lasers
"""
self.current_det = None # will be after initialization
self.laser_set_manual = True
#init the object parameters
self.detector = None
self.save_file_pathname = None
self._spectro_wl = 550 # center wavelngth of the spectrum
self.viewer_freq_axis = utils.Axis(data=None, label='Photon energy', units='')
self.raw_data = []
#init the user interface
self.dashboard = self.set_dashboard()
self.dashboard.preset_loaded_signal.connect(lambda: self.show_detector(False))
self.dashboard.preset_loaded_signal.connect(self.set_detector)
self.dashboard.preset_loaded_signal.connect(self.initialized)
self.set_GUI()
self.dashboard.new_preset_created.connect(lambda: self.create_menu(self.menubar))
self.show_detector(False)
self.dockarea.setEnabled(False)
def set_dashboard(self):
params = [{'title': 'Spectro Settings:', 'name': 'spectro_settings', 'type': 'group', 'children': [
{'title': 'Is calibrated?', 'name': 'iscalibrated', 'type': 'bool', 'value': False, 'tooltip':
'Whether the selected plugin has internal frequency calibration or not.'},
{'title': 'Movable?', 'name': 'ismovable', 'type': 'bool', 'value': False, 'tooltip':
'Whether the selected plugin has a functionality to change its central frequency: as a movable grating'
' for instance.'},
{'title': 'Laser selectable?', 'name': 'laser_selectable', 'type': 'bool', 'value': False, 'tooltip':
'Whether the selected plugin has a functionality to change its excitation ray'},
{'title': 'Laser ray:', 'name': 'laser_ray', 'type': 'list', 'value': '', 'show_pb': True, 'tooltip':
'List of settable laser rays (not manual ones)'},]},
]
dashboard = DashBoard(self.dockarea.addTempArea())
dashboard.set_preset_path(spectro_path)
options =[dict(path='saving_options', options_dict=dict(visible=False)),
dict(path='use_pid', options_dict=dict(visible=False)),
dict(path='Moves', options_dict=dict(visible=False))]
dashboard.set_extra_preset_params(params, options)
dashboard.dockarea.window().setVisible(False)
return dashboard
def set_GUI(self):
###########################################
###########################################
#init the docks containing the main widgets
#######################################################################################################################
#create a dock containing a viewer object, displaying the data for the spectrometer
self.dock_viewer = Dock('Viewer dock', size=(350, 350))
self.dockarea.addDock(self.dock_viewer, 'left')
target_widget = QtWidgets.QWidget()
self.viewer = Viewer1D(target_widget)
self.dock_viewer.addWidget(target_widget)
################################################################
#create a logger dock where to store info senf from the programm
self.dock_logger = Dock("Logger")
self.logger_list = QtWidgets.QListWidget()
self.logger_list.setMinimumWidth(300)
self.dock_logger.addWidget(self.logger_list)
self.dockarea.addDock(self.dock_logger, 'right')
self.log_signal[str].connect(self.add_log)
############################################
# creating a menubar
self.menubar = self.mainwindow.menuBar()
self.create_menu(self.menubar)
#creating a toolbar
self.toolbar = QtWidgets.QToolBar()
self.create_toolbar()
self.mainwindow.addToolBar(self.toolbar)
#creating a status bar
self.statusbar = QtWidgets.QStatusBar()
self.statusbar.setMaximumHeight(25)
self.status_laser = QtWidgets.QLabel('????')
self.status_laser.setAlignment(Qt.AlignCenter)
#self.status_laser.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
#self.status_laser.setReadOnly(True)
self.status_laser.setMaximumWidth(80)
self.status_laser.setMinimumWidth(80)
self.status_laser.setToolTip('Current laser wavelength')
self.status_laser.setStyleSheet("background-color: red")
self.status_center = QtWidgets.QLabel('????')
self.status_center.setAlignment(Qt.AlignCenter)
#self.status_center.setReadOnly(True)
#self.status_center.setButtonSymbols(QtWidgets.QAbstractSpinBox.NoButtons)
self.status_center.setMaximumWidth(80)
self.status_center.setMinimumWidth(80)
self.status_center.setToolTip('center frequency of the spectrum, either in nm or cm-1')
self.status_center.setStyleSheet("background-color: red")
self.status_init = QLED()
self.status_init.setToolTip('Initialization state of the detector')
self.status_init.set_as_false()
self.status_init.clickable = False
self.statusbar.addPermanentWidget(self.status_laser)
self.statusbar.addPermanentWidget(self.status_center)
self.statusbar.addPermanentWidget(self.status_init)
self.dockarea.window().setStatusBar(self.statusbar)
#############################################
self.settings = Parameter.create(name='settings', type='group', children=self.params)
self.settings.sigTreeStateChanged.connect(self.parameter_tree_changed)
dock_config_settings = Dock('Configuration', size=(300, 350))
self.dockarea.addDock(dock_config_settings, 'above', self.dock_logger)
# create main parameter tree
self.config_settings_tree = ParameterTree()
dock_config_settings.addWidget(self.config_settings_tree, 10)
self.config_settings_tree.setMinimumWidth(300)
self.config_settings_tree.setParameters(self.settings.child(('config_settings')), showTop=False)
#any change to the tree on the user interface will call the parameter_tree_changed method where all actions will be applied
dock_calib_settings = Dock('Calibration', size=(300, 350))
self.dockarea.addDock(dock_calib_settings, 'above', self.dock_logger)
# create main parameter tree
self.calib_settings_tree = ParameterTree()
dock_calib_settings.addWidget(self.calib_settings_tree, 10)
self.calib_settings_tree.setMinimumWidth(300)
self.calib_settings_tree.setParameters(self.settings.child(('calib_settings')), showTop=False)
#any change to the tree on the user interface will call the parameter_tree_changed method where all actions will be applied
#this one for the custom application settings
dock_acq_settings = Dock('Acquisition', size=(300, 350))
self.dockarea.addDock(dock_acq_settings, 'above', dock_config_settings)
# create main parameter tree
self.acq_settings_tree = ParameterTree()
dock_acq_settings.addWidget(self.acq_settings_tree, 10)
self.acq_settings_tree.setMinimumWidth(300)
self.acq_settings_tree.setParameters(self.settings.child(('acq_settings')), showTop=False)
@Slot(ThreadCommand)
def cmd_from_det(self,status):
try:
if status.command == 'spectro_wl':
self.status_center.setStyleSheet("background-color: green")
self.spectro_wl_is(status.attributes[0])
elif status.command == 'laser_wl':
#self.laser_set_manual = False
self.settings.child('config_settings', 'laser_wl_list').setValue(status.attributes[0])
self.status_laser.setText('{:}nm'.format(status.attributes[0]))
self.status_laser.setStyleSheet("background-color: green")
self.update_center_frequency(self.spectro_wl)
elif status.command == 'exposure_ms':
self.settings.child('acq_settings', 'exposure_ms').setValue(status.attributes[0])
elif status.command == "x_axis":
x_axis = status.attributes[0]
if np.any(x_axis['data'] != self.viewer_freq_axis['data']) and self.current_det['calib']:
self.viewer_freq_axis.update(x_axis)
self.update_axis()
except Exception as e:
logger.exception(str(e))
def update_status(self, txt, wait_time=1000, log_type=None):
"""
"""
self.statusbar.showMessage(txt,wait_time)
if log_type is not None:
self.log_signal.emit(txt)
def set_detector(self):
self.detector = self.dashboard.detector_modules[0]
self.settings.child('config_settings', 'curr_det').setValue(
f"{self.detector.settings.child('main_settings','DAQ_type').value()} / "
f"{self.detector.settings.child('main_settings','detector_type').value()} / {self.detector.title}")
self.detector.custom_sig[ThreadCommand].connect(self.cmd_from_det)
self.current_det = \
dict(laser=self.dashboard.preset_manager.preset_params.child('spectro_settings', 'laser_selectable').value(),
laser_list=self.dashboard.preset_manager.preset_params.child('spectro_settings', 'laser_ray').opts['limits'],
movable=self.dashboard.preset_manager.preset_params.child('spectro_settings', 'ismovable').value(),
calib=self.dashboard.preset_manager.preset_params.child('spectro_settings', 'iscalibrated').value(),
)
self.detector.grab_done_signal.connect(self.show_data)
self.settings.sigTreeStateChanged.disconnect(self.parameter_tree_changed)
if self.current_det['laser']:
self.settings.child('config_settings', 'laser_wl_list').show()
self.settings.child('config_settings', 'laser_wl').hide()
self.settings.child('config_settings', 'laser_wl_list').setOpts(limits=self.current_det['laser_list'])
else:
self.settings.child('config_settings', 'laser_wl').show()
self.settings.child('config_settings', 'laser_wl_list').hide()
self.settings.sigTreeStateChanged.connect(self.parameter_tree_changed)
#apply current detector particularities
#self.settings.child('acq_settings', 'spectro_center_freq').setOpts(readonly=not self.current_det['movable'])
self.get_spectro_wl()
QtWidgets.QApplication.processEvents()
self.get_laser_wl()
QtWidgets.QApplication.processEvents()
self.get_exposure_ms()
QtWidgets.QApplication.processEvents()
def get_exposure_ms(self):
self.detector.command_detector.emit(ThreadCommand('get_exposure_ms'))
def set_exposure_ms(self, data):
self.detector.command_detector.emit(ThreadCommand('set_exposure_ms', [data]))
@Slot(bool)
def initialized(self, state, offline=False):
self.offline = offline
self.grab_action.setEnabled(state)
self.snap_action.setEnabled(state)
if state or offline:
self.status_init.set_as_true()
self.dockarea.setEnabled(True)
else:
self.status_init.set_as_false()
def update_center_frequency(self, spectro_wl):
self._spectro_wl = spectro_wl
if self.settings.child('acq_settings', 'units').value() == 'nm':
self.settings.child('acq_settings', 'spectro_center_freq').setValue(spectro_wl)
elif self.settings.child('acq_settings', 'units').value() == 'cm-1':
self.settings.child('acq_settings', 'spectro_center_freq').setValue(Enm2cmrel(spectro_wl,
self.settings.child(
'config_settings',
'laser_wl').value()))
elif self.settings.child('acq_settings', 'units').value() == 'eV':
self.settings.child('acq_settings', 'spectro_center_freq').setValue(nm2eV(spectro_wl))
self.set_status_center(self.settings.child('acq_settings', 'spectro_center_freq').value(),
self.settings.child('acq_settings', 'units').value())
def set_status_center(self, val, unit, precision=3):
self.status_center.setText(f'{val:.{precision}f} {unit}')
def spectro_wl_is(self, spectro_wl):
"""
this slot receives a signal from the detector telling it what's the current spectro_wl
Parameters
----------
spectro_wl
"""
self._spectro_wl = spectro_wl
self.update_center_frequency(spectro_wl)
def set_spectro_wl(self, spectro_wl):
try:
if self.current_det['movable']:
self.detector.command_detector.emit(ThreadCommand('set_spectro_wl', [spectro_wl]))
except Exception as e:
logger.exception(str(e))
def get_spectro_wl(self):
if self.current_det['calib']:
self.settings.child('acq_settings', 'spectro_center_freq').show()
self.settings.child('acq_settings', 'spectro_center_freq_txt').hide()
self.detector.command_detector.emit(ThreadCommand('get_spectro_wl'))
self.detector.command_detector.emit(ThreadCommand('get_axis'))
else:
self.settings.child('acq_settings', 'spectro_center_freq').hide()
self.settings.child('acq_settings', 'spectro_center_freq_txt').show()
self.viewer_freq_axis['units'] = 'Pxls'
def get_laser_wl(self):
if self.current_det['laser']:
self.detector.command_detector.emit(ThreadCommand('get_laser_wl'))
else:
self.settings.child('config_settings', 'laser_wl').setValue(0)
@property
def spectro_wl(self):
# try to get the param value from detector (if it has been added in the plugin)
return self._spectro_wl
@spectro_wl.setter
def spectro_wl(self, spec_wl):
# try to get the param value from detector (if it has been added in the plugin)
self.set_spectro_wl(spec_wl)
def show_detector(self, show=True):
self.dashboard.mainwindow.setVisible(show)
for area in self.dashboard.dockarea.tempAreas:
area.window().setVisible(show)
def parameter_tree_changed(self, param, changes):
for param, change, data in changes:
path = self.settings.childPath(param)
if path is not None:
childName = '.'.join(path)
else:
childName = param.name()
if change == 'childAdded':
pass
elif change == 'value':
if param.name() == 'show_det':
self.show_detector(data)
elif param.name() == 'spectro_center_freq':
unit = self.settings.child('acq_settings', 'units').value()
if unit == 'nm':
center_wavelength = data
elif unit == 'cm-1':
center_wavelength = Ecmrel2Enm(data, self.settings.child( 'config_settings', 'laser_wl').value())
elif unit == 'eV':
center_wavelength = eV2nm(data)
if int(self.spectro_wl*100) != int(100*center_wavelength): #comprison at 1e-2
self.spectro_wl = center_wavelength
self.update_axis()
elif param.name() == 'units':
if self.settings.child('acq_settings', 'spectro_center_freq').value() > 0.000000001:
if data == 'nm':
self.settings.child('acq_settings', 'spectro_center_freq').setValue(self._spectro_wl)
elif data == 'cm-1':
self.settings.child('acq_settings', 'spectro_center_freq').setValue(Enm2cmrel(self._spectro_wl,
self.settings.child( 'config_settings', 'laser_wl').value()))
elif data == 'eV':
self.settings.child('acq_settings', 'spectro_center_freq').setValue(nm2eV(self._spectro_wl))
self.set_status_center(self.settings.child('acq_settings', 'spectro_center_freq').value(),
self.settings.child('acq_settings', 'units').value())
elif param.name() == 'laser_wl_list':
if data is not None:
self.move_laser_wavelength(data)
elif param.name() == 'laser_wl':
if data is not None:
self.move_laser_wavelength(data)
if int(data) == 0:
self.settings.child('acq_settings', 'units').setValue('nm')
self.settings.child('acq_settings', 'units').setOpts(readonly=True)
else:
self.settings.child('acq_settings', 'units').setOpts(readonly=False)
if data != 0:
self.set_manual_laser_wl(data)
elif param.name() == 'exposure_ms':
self.set_exposure_ms(data)
elif param.name() == 'do_calib':
if len(self.raw_data) != 0:
if data:
self.calib_dock = Dock('Calibration module')
self.dockarea.addDock(self.calib_dock)
self.calibration = Calibration(self.dockarea)
self.calib_dock.addWidget(self.calibration)
self.calibration.coeffs_calib.connect(self.update_calibration)
else:
self.calib_dock.close()
elif param.name() == 'save_calib':
filename = select_file(start_path=self.save_file_pathname, save=True, ext='xml')
if filename != '':
custom_tree.parameter_to_xml_file(self.settings.child('calib_settings', 'calib_coeffs'), filename)
elif param.name() == 'load_calib':
filename = select_file(start_path=self.save_file_pathname, save=False, ext='xml')
if filename != '':
children = custom_tree.XML_file_to_parameter(filename)
self.settings.child('calib_settings', 'calib_coeffs').restoreState(
Parameter.create(title='Calibration coeffs:', name='calib_coeffs', type='group',
children=children).saveState())
elif param.name() in custom_tree.iter_children(self.settings.child('calib_settings', 'calib_coeffs')) \
or param.name() == 'use_calib':
if self.settings.child('calib_settings', 'use_calib').value():
calib_coeffs = [self.settings.child('calib_settings', 'calib_coeffs', 'third_calib').value(),
self.settings.child('calib_settings', 'calib_coeffs', 'second_calib').value(),
self.settings.child('calib_settings', 'calib_coeffs', 'slope_calib').value(),
self.settings.child('calib_settings', 'calib_coeffs', 'center_calib').value()]
self.update_center_frequency(self.settings.child('calib_settings', 'calib_coeffs', 'center_calib').value())
self.settings.child('acq_settings', 'spectro_center_freq').show()
self.settings.child('acq_settings', 'spectro_center_freq').setOpts(readonly=True)
self.status_center.setStyleSheet("background-color: green")
self.settings.child('acq_settings', 'spectro_center_freq_txt').hide()
x_axis_pxls = np.linspace(0, self.raw_data[0].size-1, self.raw_data[0].size)
self.viewer_freq_axis['data'] = np.polyval(calib_coeffs, x_axis_pxls-np.max(x_axis_pxls)/2)
self.update_axis()
else:
self.settings.child('acq_settings', 'spectro_center_freq').hide()
self.settings.child('acq_settings', 'spectro_center_freq_txt').show()
self.status_center.setStyleSheet("background-color: red")
elif change == 'parent':
pass
@Slot(list)
def update_calibration(self, coeffs):
self.settings.child('calib_settings', 'calib_coeffs', 'center_calib').setValue(coeffs[0])
self.settings.child('calib_settings', 'calib_coeffs', 'slope_calib').setValue(coeffs[1])
if len(coeffs) > 2:
self.settings.child('calib_settings', 'calib_coeffs', 'second_calib').setValue(coeffs[2])
else:
self.settings.child('calib_settings', 'calib_coeffs', 'second_calib').setValue(0)
if len(coeffs) > 3:
self.settings.child('calib_settings', 'calib_coeffs', 'third_calib').setValue(coeffs[3])
else:
self.settings.child('calib_settings', 'calib_coeffs', 'third_calib').setValue(0)
def set_manual_laser_wl(self, laser_wl):
messg = QtWidgets.QMessageBox()
messg.setText('You manually changed the laser wavelength to {:}nm!'.format(laser_wl))
messg.setInformativeText("Is that correct?")
messg.setStandardButtons(QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
ret = messg.exec()
if ret == QtWidgets.QMessageBox.Yes:
self.status_laser.setText('{:}nm'.format(laser_wl))
self.status_laser.setStyleSheet("background-color: green")
self.settings.child('acq_settings', 'units').setOpts(readonly=False)
def move_laser_wavelength(self, laser_wavelength):
#do hardware stuff if possible (Mock, labspec...)
try:
if self.current_det['laser']:
self.detector.command_detector.emit(ThreadCommand('set_laser_wl', [laser_wavelength]))
except Exception as e:
logger.exception(str(e))
@Slot(OrderedDict)
def show_data(self, data):
"""
do stuff with data from the detector if its grab_done_signal has been connected
Parameters
----------
data: (OrderedDict) #OrderedDict(name=self.title,x_axis=None,y_axis=None,z_axis=None,data0D=None,data1D=None,data2D=None)
"""
self.data_dict = data
if 'data1D' in data:
self.raw_data = []
for key in data['data1D']:
self.raw_data.append(data['data1D'][key]['data'])
if 'x_axis' in data['data1D'][key]:
x_axis = data['data1D'][key]['x_axis']
else:
x_axis = utils.Axis(
data=np.linspace(0, len(data['data1D'][key]['data'])-1, len(data['data1D'][key]['data'])),
units='pxls',
label='')
if self.viewer_freq_axis['data'] is None:
self.viewer_freq_axis.update(x_axis)
elif np.any(x_axis['data'] != self.viewer_freq_axis['data']) and self.current_det['calib']:
self.viewer_freq_axis.update(x_axis)
self.viewer.show_data(self.raw_data)
self.update_axis()
def update_axis(self):
axis = utils.Axis()
unit = self.settings.child('acq_settings', 'units').value()
if unit == 'nm':
axis['data'] = self.viewer_freq_axis['data']
elif unit == 'cm-1':
axis['data'] = Enm2cmrel(self.viewer_freq_axis['data'],
self.settings.child('config_settings', 'laser_wl').value())
elif unit == 'eV':
axis['data'] = nm2eV(self.viewer_freq_axis['data'])
axis['units'] = unit
axis['label'] = 'Photon energy'
self.viewer.x_axis = axis
def create_menu(self, menubar):
"""
"""
menubar.clear()
# %% create file menu
file_menu = menubar.addMenu('File')
load_action = file_menu.addAction('Load file')
load_action.triggered.connect(self.load_file)
save_action = file_menu.addAction('Save file')
save_action.triggered.connect(self.save_data)
export_action = file_menu.addAction('Export as ascii')
export_action.triggered.connect(lambda: self.save_data(export=True))
file_menu.addSeparator()
file_menu.addAction('Show log file', self.show_log)
file_menu.addSeparator()
quit_action = file_menu.addAction('Quit')
quit_action.triggered.connect(self.quit_function)
settings_menu = menubar.addMenu('Settings')
settings_menu.addAction('Show Units Converter', self.show_units_converter)
docked_menu = settings_menu.addMenu('Docked windows')
docked_menu.addAction('Load Layout', self.load_layout_state)
docked_menu.addAction('Save Layout', self.save_layout_state)
self.preset_menu = menubar.addMenu(self.dashboard.preset_menu)
self.preset_menu.menu().addSeparator()
self.preset_menu.menu().addAction('Offline Mode', lambda: self.initialized(state=False, offline=True))
def load_layout_state(self, file=None):
"""
Load and restore a layout state from the select_file obtained pathname file.
See Also
--------
utils.select_file
"""
try:
if file is None:
file = select_file(save=False, ext='dock')
if file is not None:
with open(str(file), 'rb') as f:
dockstate = pickle.load(f)
self.dockarea.restoreState(dockstate)
file = file.name
self.settings.child('loaded_files', 'layout_file').setValue(file)
except Exception as e:
logger.exception(str(e))
def save_layout_state(self, file=None):
"""
Save the current layout state in the select_file obtained pathname file.
Once done dump the pickle.
See Also
--------
utils.select_file
"""
try:
dockstate = self.dockarea.saveState()
if 'float' in dockstate:
dockstate['float'] = []
if file is None:
file = select_file(start_path=None, save=True, ext='dock')
if file is not None:
with open(str(file), 'wb') as f:
pickle.dump(dockstate, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
logger.exception(str(e))
def show_log(self):
import webbrowser
webbrowser.open(logging.getLogger('pymodaq').handlers[0].baseFilename)
def show_units_converter(self):
self.units_converter = UnitsConverter()
dock_converter = Dock('Units Converter', size=(300, 350))
self.dockarea.addDock(dock_converter, 'bottom', self.dock_logger)
dock_converter.addWidget(self.units_converter.parent)
def load_file(self):
data, fname, node_path = browse_data(ret_all=True)
if data is not None:
h5utils = H5BrowserUtil()
h5utils.open_file(fname)
data, axes, nav_axes, is_spread = h5utils.get_h5_data(node_path)
data_node = h5utils.get_node(node_path)
if data_node.attrs['type'] == 'data':
if data_node.attrs['data_dimension'] == '1D':
data_dict = OrderedDict(data1D=dict(raw=dict(data=data, x_axis=axes['x_axis'])))
self.show_data(data_dict)
h5utils.close_file()
def quit_function(self):
#close all stuff that need to be
if self.detector is not None:
self.detector.quit_fun()
QtWidgets.QApplication.processEvents()
self.mainwindow.close()
def create_toolbar(self):
self.toolbar.addWidget(QtWidgets.QLabel('Acquisition:'))
iconquit = QtGui.QIcon()
iconquit.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/close2.png"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
self.quit_action = QtWidgets.QAction(iconquit, "Quit program", None)
self.toolbar.addAction(self.quit_action)
self.quit_action.triggered.connect(self.quit_function)
iconload = QtGui.QIcon()
iconload.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/Open.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.loadaction = QtWidgets.QAction(iconload, "Load target file (.h5, .png, .jpg) or data from camera", None)
self.toolbar.addAction(self.loadaction)
self.loadaction.triggered.connect(self.load_file)
iconsave = QtGui.QIcon()
iconsave.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/SaveAs.png"), QtGui.QIcon.Normal,
QtGui.QIcon.Off)
self.saveaction = QtWidgets.QAction(iconsave, "Save current data", None)
self.toolbar.addAction(self.saveaction)
self.saveaction.triggered.connect(self.save_data)
iconrun = QtGui.QIcon()
iconrun.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/run2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.grab_action = QtWidgets.QAction(iconrun, 'Grab', None)
self.grab_action.setCheckable(True)
self.toolbar.addAction(self.grab_action)
self.grab_action.triggered.connect(self.grab_detector)
iconsnap = QtGui.QIcon()
iconsnap.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/snap.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.snap_action = QtWidgets.QAction(iconsnap, 'Snap', None)
self.snap_action.triggered.connect(self.snap_detector)
self.toolbar.addAction(self.snap_action)
self.grab_action.setEnabled(False)
self.snap_action.setEnabled(False)
def grab_detector(self):
self.detector.ui.grab_pb.click()
def snap_detector(self):
self.detector.ui.single_pb.click()
def save_data(self, export=False):
try:
if export:
ext = 'dat'
else:
ext = 'h5'
path = select_file(start_path=self.save_file_pathname, save=True, ext=ext)
if not (not(path)):
if not export:
h5saver = H5Saver(save_type='detector')
h5saver.init_file(update_h5=True, custom_naming=False, addhoc_file_path=path)
settings_str = b'<All_settings>' + custom_tree.parameter_to_xml_string(self.settings)
if self.detector is not None:
settings_str += custom_tree.parameter_to_xml_string(self.detector.settings)
if hasattr(self.detector.ui.viewers[0], 'roi_manager'):
settings_str += custom_tree.parameter_to_xml_string(self.detector.ui.viewers[0].roi_manager.settings)
settings_str += custom_tree.parameter_to_xml_string(h5saver.settings)
settings_str += b'</All_settings>'
det_group = h5saver.add_det_group(h5saver.raw_group, "Data", settings_str)
try:
self.channel_arrays = OrderedDict([])
data_dim = 'data1D'
if not h5saver.is_node_in_group(det_group, data_dim):
self.channel_arrays['data1D'] = OrderedDict([])
data_group = h5saver.add_data_group(det_group, data_dim)
for ind_channel, data in enumerate(self.raw_data): # list of numpy arrays
channel = f'CH{ind_channel:03d}'
channel_group = h5saver.add_CH_group(data_group, title=channel)
self.channel_arrays[data_dim]['parent'] = channel_group
self.channel_arrays[data_dim][channel] = h5saver.add_data(channel_group,
dict(data=data,
x_axis=self.viewer_freq_axis),
scan_type='',
enlargeable=False)
h5saver.close_file()
except Exception as e:
logger.exception(str(e))
else:
data_to_save = [self.viewer_freq_axis['data']]
data_to_save.extend([dat for dat in self.raw_data])
np.savetxt(path, data_to_save, delimiter='\t')
except Exception as e:
logger.exception(str(e))
@Slot(str)
def add_log(self, txt):
"""
Add a log to the logger list from the given text log and the current time
================ ========= ======================
**Parameters** **Type** **Description**
*txt* string the log to be added
================ ========= ======================
"""
now = datetime.datetime.now()
new_item = QtWidgets.QListWidgetItem(str(now) + ": " + txt)
self.logger_list.addItem(new_item)
##to do
##self.save_parameters.logger_array.append(str(now)+": "+txt)
@Slot(str)
def emit_log(self, txt):
"""
Emit a log-signal from the given log index
=============== ======== =======================
**Parameters** **Type** **Description**
*txt* string the log to be emitted
=============== ======== =======================
"""
self.log_signal.emit(txt)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
win = QtWidgets.QMainWindow()
area = DockArea()
win.setCentralWidget(area)
win.resize(1000, 500)
win.setWindowTitle('pymodaq example')
prog = Spectrometer(area)
win.show()
sys.exit(app.exec_())
| [
"logging.getLogger",
"pymodaq.daq_utils.daq_utils.get_set_config_path",
"pymodaq.daq_utils.h5modules.browse_data",
"qtpy.QtGui.QPixmap",
"qtpy.QtWidgets.QListWidget",
"qtpy.QtWidgets.QAction",
"qtpy.QtWidgets.QStatusBar",
"pymodaq.daq_utils.daq_utils.ThreadCommand",
"pymodaq.daq_utils.daq_utils.eV2n... | [((1246, 1295), 'pymodaq.daq_utils.daq_utils.get_set_config_path', 'utils.get_set_config_path', (['"""spectrometer_configs"""'], {}), "('spectrometer_configs')\n", (1271, 1295), True, 'from pymodaq.daq_utils import daq_utils as utils\n'), ((1198, 1229), 'pymodaq.daq_utils.daq_utils.get_module_name', 'utils.get_module_name', (['__file__'], {}), '(__file__)\n', (1219, 1229), True, 'from pymodaq.daq_utils import daq_utils as utils\n'), ((1890, 1901), 'qtpy.QtCore.Signal', 'Signal', (['str'], {}), '(str)\n', (1896, 1901), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((12664, 12683), 'qtpy.QtCore.Slot', 'Slot', (['ThreadCommand'], {}), '(ThreadCommand)\n', (12668, 12683), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((16338, 16348), 'qtpy.QtCore.Slot', 'Slot', (['bool'], {}), '(bool)\n', (16342, 16348), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((26202, 26212), 'qtpy.QtCore.Slot', 'Slot', (['list'], {}), '(list)\n', (26206, 26212), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((27880, 27897), 'qtpy.QtCore.Slot', 'Slot', (['OrderedDict'], {}), '(OrderedDict)\n', (27884, 27897), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((38729, 38738), 'qtpy.QtCore.Slot', 'Slot', (['str'], {}), '(str)\n', (38733, 38738), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((39362, 39371), 'qtpy.QtCore.Slot', 'Slot', (['str'], {}), '(str)\n', (39366, 39371), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((39791, 39823), 'qtpy.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (39813, 39823), False, 'from qtpy import QtGui, QtWidgets\n'), ((39834, 39857), 'qtpy.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (39855, 39857), False, 'from qtpy import QtGui, QtWidgets\n'), ((39869, 39879), 'pymodaq.daq_utils.gui_utils.DockArea', 'DockArea', ([], {}), '()\n', (39877, 39879), False, 'from pymodaq.daq_utils.gui_utils import DockArea, select_file\n'), ((4663, 4682), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (4680, 4682), False, 'from qtpy import QtGui, QtWidgets\n'), ((5740, 5794), 'pymodaq.daq_utils.daq_utils.Axis', 'utils.Axis', ([], {'data': 'None', 'label': '"""Photon energy"""', 'units': '""""""'}), "(data=None, label='Photon energy', units='')\n", (5750, 5794), True, 'from pymodaq.daq_utils import daq_utils as utils\n'), ((8212, 8248), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Viewer dock"""'], {'size': '(350, 350)'}), "('Viewer dock', size=(350, 350))\n", (8216, 8248), False, 'from pyqtgraph.dockarea import Dock\n'), ((8329, 8348), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (8346, 8348), False, 'from qtpy import QtGui, QtWidgets\n'), ((8371, 8394), 'pymodaq.daq_utils.plotting.viewer1D.viewer1D_main.Viewer1D', 'Viewer1D', (['target_widget'], {}), '(target_widget)\n', (8379, 8394), False, 'from pymodaq.daq_utils.plotting.viewer1D.viewer1D_main import Viewer1D\n'), ((8620, 8634), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Logger"""'], {}), "('Logger')\n", (8624, 8634), False, 'from pyqtgraph.dockarea import Dock\n'), ((8662, 8685), 'qtpy.QtWidgets.QListWidget', 'QtWidgets.QListWidget', ([], {}), '()\n', (8683, 8685), False, 'from qtpy import QtGui, QtWidgets\n'), ((9118, 9138), 'qtpy.QtWidgets.QToolBar', 'QtWidgets.QToolBar', ([], {}), '()\n', (9136, 9138), False, 'from qtpy import QtGui, QtWidgets\n'), ((9275, 9297), 'qtpy.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', ([], {}), '()\n', (9295, 9297), False, 'from qtpy import QtGui, QtWidgets\n'), ((9371, 9395), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""????"""'], {}), "('????')\n", (9387, 9395), False, 'from qtpy import QtGui, QtWidgets\n'), ((9830, 9854), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""????"""'], {}), "('????')\n", (9846, 9854), False, 'from qtpy import QtGui, QtWidgets\n'), ((10324, 10330), 'pymodaq.daq_utils.plotting.qled.QLED', 'QLED', ([], {}), '()\n', (10328, 10330), False, 'from pymodaq.daq_utils.plotting.qled import QLED\n'), ((10813, 10882), 'pyqtgraph.parametertree.Parameter.create', 'Parameter.create', ([], {'name': '"""settings"""', 'type': '"""group"""', 'children': 'self.params'}), "(name='settings', type='group', children=self.params)\n", (10829, 10882), False, 'from pyqtgraph.parametertree import Parameter, ParameterTree\n'), ((10994, 11032), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Configuration"""'], {'size': '(300, 350)'}), "('Configuration', size=(300, 350))\n", (10998, 11032), False, 'from pyqtgraph.dockarea import Dock\n'), ((11185, 11200), 'pyqtgraph.parametertree.ParameterTree', 'ParameterTree', ([], {}), '()\n', (11198, 11200), False, 'from pyqtgraph.parametertree import Parameter, ParameterTree\n'), ((11594, 11630), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Calibration"""'], {'size': '(300, 350)'}), "('Calibration', size=(300, 350))\n", (11598, 11630), False, 'from pyqtgraph.dockarea import Dock\n'), ((11781, 11796), 'pyqtgraph.parametertree.ParameterTree', 'ParameterTree', ([], {}), '()\n', (11794, 11796), False, 'from pyqtgraph.parametertree import Parameter, ParameterTree\n'), ((12239, 12275), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Acquisition"""'], {'size': '(300, 350)'}), "('Acquisition', size=(300, 350))\n", (12243, 12275), False, 'from pyqtgraph.dockarea import Dock\n'), ((12426, 12441), 'pyqtgraph.parametertree.ParameterTree', 'ParameterTree', ([], {}), '()\n', (12439, 12441), False, 'from pyqtgraph.parametertree import Parameter, ParameterTree\n'), ((15903, 15941), 'qtpy.QtWidgets.QApplication.processEvents', 'QtWidgets.QApplication.processEvents', ([], {}), '()\n', (15939, 15941), False, 'from qtpy import QtGui, QtWidgets\n'), ((15980, 16018), 'qtpy.QtWidgets.QApplication.processEvents', 'QtWidgets.QApplication.processEvents', ([], {}), '()\n', (16016, 16018), False, 'from qtpy import QtGui, QtWidgets\n'), ((16059, 16097), 'qtpy.QtWidgets.QApplication.processEvents', 'QtWidgets.QApplication.processEvents', ([], {}), '()\n', (16095, 16097), False, 'from qtpy import QtGui, QtWidgets\n'), ((26987, 27010), 'qtpy.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', ([], {}), '()\n', (27008, 27010), False, 'from qtpy import QtGui, QtWidgets\n'), ((29171, 29183), 'pymodaq.daq_utils.daq_utils.Axis', 'utils.Axis', ([], {}), '()\n', (29181, 29183), True, 'from pymodaq.daq_utils import daq_utils as utils\n'), ((32667, 32683), 'units_converter.main.UnitsConverter', 'UnitsConverter', ([], {}), '()\n', (32681, 32683), False, 'from units_converter.main import UnitsConverter\n'), ((32709, 32749), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Units Converter"""'], {'size': '(300, 350)'}), "('Units Converter', size=(300, 350))\n", (32713, 32749), False, 'from pyqtgraph.dockarea import Dock\n'), ((32945, 32970), 'pymodaq.daq_utils.h5modules.browse_data', 'browse_data', ([], {'ret_all': '(True)'}), '(ret_all=True)\n', (32956, 32970), False, 'from pymodaq.daq_utils.h5modules import H5Browser, H5Saver, browse_data, H5BrowserUtil\n'), ((33845, 33858), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (33856, 33858), False, 'from qtpy import QtGui, QtWidgets\n'), ((34027, 34076), 'qtpy.QtWidgets.QAction', 'QtWidgets.QAction', (['iconquit', '"""Quit program"""', 'None'], {}), "(iconquit, 'Quit program', None)\n", (34044, 34076), False, 'from qtpy import QtGui, QtWidgets\n'), ((34209, 34222), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (34220, 34222), False, 'from qtpy import QtGui, QtWidgets\n'), ((34361, 34456), 'qtpy.QtWidgets.QAction', 'QtWidgets.QAction', (['iconload', '"""Load target file (.h5, .png, .jpg) or data from camera"""', 'None'], {}), "(iconload,\n 'Load target file (.h5, .png, .jpg) or data from camera', None)\n", (34378, 34456), False, 'from qtpy import QtGui, QtWidgets\n'), ((34580, 34593), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (34591, 34593), False, 'from qtpy import QtGui, QtWidgets\n'), ((34761, 34815), 'qtpy.QtWidgets.QAction', 'QtWidgets.QAction', (['iconsave', '"""Save current data"""', 'None'], {}), "(iconsave, 'Save current data', None)\n", (34778, 34815), False, 'from qtpy import QtGui, QtWidgets\n'), ((34941, 34954), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (34952, 34954), False, 'from qtpy import QtGui, QtWidgets\n'), ((35093, 35133), 'qtpy.QtWidgets.QAction', 'QtWidgets.QAction', (['iconrun', '"""Grab"""', 'None'], {}), "(iconrun, 'Grab', None)\n", (35110, 35133), False, 'from qtpy import QtGui, QtWidgets\n'), ((35310, 35323), 'qtpy.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (35321, 35323), False, 'from qtpy import QtGui, QtWidgets\n'), ((35463, 35504), 'qtpy.QtWidgets.QAction', 'QtWidgets.QAction', (['iconsnap', '"""Snap"""', 'None'], {}), "(iconsnap, 'Snap', None)\n", (35480, 35504), False, 'from qtpy import QtGui, QtWidgets\n'), ((39135, 39158), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39156, 39158), False, 'import datetime\n'), ((4299, 4345), 'qtpy.QtCore.QLocale', 'QLocale', (['QLocale.English', 'QLocale.UnitedStates'], {}), '(QLocale.English, QLocale.UnitedStates)\n', (4306, 4345), False, 'from qtpy.QtCore import QObject, Slot, Signal, QLocale, QDateTime, QRectF, QDate, QThread, Qt\n'), ((16174, 16206), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""get_exposure_ms"""'], {}), "('get_exposure_ms')\n", (16187, 16206), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((16290, 16330), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""set_exposure_ms"""', '[data]'], {}), "('set_exposure_ms', [data])\n", (16303, 16330), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((33022, 33037), 'pymodaq.daq_utils.h5modules.H5BrowserUtil', 'H5BrowserUtil', ([], {}), '()\n', (33035, 33037), False, 'from pymodaq.daq_utils.h5modules import H5Browser, H5Saver, browse_data, H5BrowserUtil\n'), ((33654, 33692), 'qtpy.QtWidgets.QApplication.processEvents', 'QtWidgets.QApplication.processEvents', ([], {}), '()\n', (33690, 33692), False, 'from qtpy import QtGui, QtWidgets\n'), ((33791, 33823), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""Acquisition:"""'], {}), "('Acquisition:')\n", (33807, 33823), False, 'from qtpy import QtGui, QtWidgets\n'), ((33886, 33934), 'qtpy.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/icons/Icon_Library/close2.png"""'], {}), "(':/icons/Icon_Library/close2.png')\n", (33899, 33934), False, 'from qtpy import QtGui, QtWidgets\n'), ((34250, 34296), 'qtpy.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/icons/Icon_Library/Open.png"""'], {}), "(':/icons/Icon_Library/Open.png')\n", (34263, 34296), False, 'from qtpy import QtGui, QtWidgets\n'), ((34621, 34669), 'qtpy.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/icons/Icon_Library/SaveAs.png"""'], {}), "(':/icons/Icon_Library/SaveAs.png')\n", (34634, 34669), False, 'from qtpy import QtGui, QtWidgets\n'), ((34981, 35027), 'qtpy.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/icons/Icon_Library/run2.png"""'], {}), "(':/icons/Icon_Library/run2.png')\n", (34994, 35027), False, 'from qtpy import QtGui, QtWidgets\n'), ((35351, 35397), 'qtpy.QtGui.QPixmap', 'QtGui.QPixmap', (['""":/icons/Icon_Library/snap.png"""'], {}), "(':/icons/Icon_Library/snap.png')\n", (35364, 35397), False, 'from qtpy import QtGui, QtWidgets\n'), ((36018, 36085), 'pymodaq.daq_utils.gui_utils.select_file', 'select_file', ([], {'start_path': 'self.save_file_pathname', 'save': '(True)', 'ext': 'ext'}), '(start_path=self.save_file_pathname, save=True, ext=ext)\n', (36029, 36085), False, 'from pymodaq.daq_utils.gui_utils import DockArea, select_file\n'), ((18795, 18826), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""get_spectro_wl"""'], {}), "('get_spectro_wl')\n", (18808, 18826), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((18876, 18901), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""get_axis"""'], {}), "('get_axis')\n", (18889, 18901), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((19244, 19273), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""get_laser_wl"""'], {}), "('get_laser_wl')\n", (19257, 19273), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((31338, 31373), 'pymodaq.daq_utils.gui_utils.select_file', 'select_file', ([], {'save': '(False)', 'ext': '"""dock"""'}), "(save=False, ext='dock')\n", (31349, 31373), False, 'from pymodaq.daq_utils.gui_utils import DockArea, select_file\n'), ((32194, 32245), 'pymodaq.daq_utils.gui_utils.select_file', 'select_file', ([], {'start_path': 'None', 'save': '(True)', 'ext': '"""dock"""'}), "(start_path=None, save=True, ext='dock')\n", (32205, 32245), False, 'from pymodaq.daq_utils.gui_utils import DockArea, select_file\n'), ((18403, 18448), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""set_spectro_wl"""', '[spectro_wl]'], {}), "('set_spectro_wl', [spectro_wl])\n", (18416, 18448), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((27755, 27804), 'pymodaq.daq_utils.daq_utils.ThreadCommand', 'ThreadCommand', (['"""set_laser_wl"""', '[laser_wavelength]'], {}), "('set_laser_wl', [laser_wavelength])\n", (27768, 27804), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((29582, 29618), 'pymodaq.daq_utils.daq_utils.nm2eV', 'nm2eV', (["self.viewer_freq_axis['data']"], {}), "(self.viewer_freq_axis['data'])\n", (29587, 29618), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((31488, 31502), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (31499, 31502), False, 'import pickle\n'), ((32348, 32398), 'pickle.dump', 'pickle.dump', (['dockstate', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(dockstate, f, pickle.HIGHEST_PROTOCOL)\n', (32359, 32398), False, 'import pickle\n'), ((36179, 36208), 'pymodaq.daq_utils.h5modules.H5Saver', 'H5Saver', ([], {'save_type': '"""detector"""'}), "(save_type='detector')\n", (36186, 36208), False, 'from pymodaq.daq_utils.h5modules import H5Browser, H5Saver, browse_data, H5BrowserUtil\n'), ((36810, 36863), 'pymodaq.daq_utils.custom_parameter_tree.parameter_to_xml_string', 'custom_tree.parameter_to_xml_string', (['h5saver.settings'], {}), '(h5saver.settings)\n', (36845, 36863), True, 'import pymodaq.daq_utils.custom_parameter_tree as custom_tree\n'), ((38606, 38652), 'numpy.savetxt', 'np.savetxt', (['path', 'data_to_save'], {'delimiter': '"""\t"""'}), "(path, data_to_save, delimiter='\\t')\n", (38616, 38652), True, 'import numpy as np\n'), ((17617, 17634), 'pymodaq.daq_utils.daq_utils.nm2eV', 'nm2eV', (['spectro_wl'], {}), '(spectro_wl)\n', (17622, 17634), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((28901, 28956), 'numpy.any', 'np.any', (["(x_axis['data'] != self.viewer_freq_axis['data'])"], {}), "(x_axis['data'] != self.viewer_freq_axis['data'])\n", (28907, 28956), True, 'import numpy as np\n'), ((32544, 32572), 'logging.getLogger', 'logging.getLogger', (['"""pymodaq"""'], {}), "('pymodaq')\n", (32561, 32572), False, 'import logging\n'), ((36363, 36413), 'pymodaq.daq_utils.custom_parameter_tree.parameter_to_xml_string', 'custom_tree.parameter_to_xml_string', (['self.settings'], {}), '(self.settings)\n', (36398, 36413), True, 'import pymodaq.daq_utils.custom_parameter_tree as custom_tree\n'), ((36504, 36563), 'pymodaq.daq_utils.custom_parameter_tree.parameter_to_xml_string', 'custom_tree.parameter_to_xml_string', (['self.detector.settings'], {}), '(self.detector.settings)\n', (36539, 36563), True, 'import pymodaq.daq_utils.custom_parameter_tree as custom_tree\n'), ((37086, 37101), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (37097, 37101), False, 'from collections import OrderedDict\n'), ((36688, 36778), 'pymodaq.daq_utils.custom_parameter_tree.parameter_to_xml_string', 'custom_tree.parameter_to_xml_string', (['self.detector.ui.viewers[0].roi_manager.settings'], {}), '(self.detector.ui.viewers[0].roi_manager\n .settings)\n', (36723, 36778), True, 'import pymodaq.daq_utils.custom_parameter_tree as custom_tree\n'), ((37284, 37299), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (37295, 37299), False, 'from collections import OrderedDict\n'), ((13588, 13643), 'numpy.any', 'np.any', (["(x_axis['data'] != self.viewer_freq_axis['data'])"], {}), "(x_axis['data'] != self.viewer_freq_axis['data'])\n", (13594, 13643), True, 'import numpy as np\n'), ((20847, 20858), 'pymodaq.daq_utils.daq_utils.eV2nm', 'eV2nm', (['data'], {}), '(data)\n', (20852, 20858), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((21789, 21812), 'pymodaq.daq_utils.daq_utils.nm2eV', 'nm2eV', (['self._spectro_wl'], {}), '(self._spectro_wl)\n', (21794, 21812), False, 'from pymodaq.daq_utils.daq_utils import Enm2cmrel, Ecmrel2Enm, nm2eV, eV2nm, eV2radfs, l2w, getLineInfo, ThreadCommand\n'), ((23559, 23628), 'pymodaq.daq_utils.gui_utils.select_file', 'select_file', ([], {'start_path': 'self.save_file_pathname', 'save': '(True)', 'ext': '"""xml"""'}), "(start_path=self.save_file_pathname, save=True, ext='xml')\n", (23570, 23628), False, 'from pymodaq.daq_utils.gui_utils import DockArea, select_file\n'), ((23062, 23088), 'pyqtgraph.dockarea.Dock', 'Dock', (['"""Calibration module"""'], {}), "('Calibration module')\n", (23066, 23088), False, 'from pyqtgraph.dockarea import Dock\n'), ((23203, 23229), 'pymodaq_spectro.utils.calibration.Calibration', 'Calibration', (['self.dockarea'], {}), '(self.dockarea)\n', (23214, 23229), False, 'from pymodaq_spectro.utils.calibration import Calibration\n'), ((23874, 23944), 'pymodaq.daq_utils.gui_utils.select_file', 'select_file', ([], {'start_path': 'self.save_file_pathname', 'save': '(False)', 'ext': '"""xml"""'}), "(start_path=self.save_file_pathname, save=False, ext='xml')\n", (23885, 23944), False, 'from pymodaq.daq_utils.gui_utils import DockArea, select_file\n'), ((24019, 24062), 'pymodaq.daq_utils.custom_parameter_tree.XML_file_to_parameter', 'custom_tree.XML_file_to_parameter', (['filename'], {}), '(filename)\n', (24052, 24062), True, 'import pymodaq.daq_utils.custom_parameter_tree as custom_tree\n'), ((25622, 25686), 'numpy.linspace', 'np.linspace', (['(0)', '(self.raw_data[0].size - 1)', 'self.raw_data[0].size'], {}), '(0, self.raw_data[0].size - 1, self.raw_data[0].size)\n', (25633, 25686), True, 'import numpy as np\n'), ((24183, 24287), 'pyqtgraph.parametertree.Parameter.create', 'Parameter.create', ([], {'title': '"""Calibration coeffs:"""', 'name': '"""calib_coeffs"""', 'type': '"""group"""', 'children': 'children'}), "(title='Calibration coeffs:', name='calib_coeffs', type=\n 'group', children=children)\n", (24199, 24287), False, 'from pyqtgraph.parametertree import Parameter, ParameterTree\n'), ((25778, 25797), 'numpy.max', 'np.max', (['x_axis_pxls'], {}), '(x_axis_pxls)\n', (25784, 25797), True, 'import numpy as np\n')] |
import os
import unittest
import imageio
import numpy as np
class TestBase(unittest.TestCase):
def setUp(self):
self.to_clear = []
def tearDown(self):
for path in self.to_clear:
os.remove(path)
def save_temp(self, path, image):
self.to_clear.append(path)
imageio.imsave(path, image)
def create_temp(self, path):
self.to_clear.append(path)
return open(path, "w")
def draw_cell(self, image, position, radius, value):
left = max(0, position[0] - radius)
top = max(0, position[1] - radius)
right = position[0] + radius
bottom = position[1] + radius
image[top: bottom + 1, left: right + 1] = value
def save_in_platform_format(self, filename, cells):
with self.create_temp(filename) as file:
file.write("Frame_nr, Cell_nr, Cell_colour, Position_X, Position_Y\n")
for cell in cells:
file.write("{frame}, {cellid}, {color}, {posx}, {posy}\n".format(
frame=cell.frame_number, cellid=cell.cell_id, posx=cell.position[0], posy=cell.position[1],
color=cell.colour))
def save_in_mask_format(self, filename, cells, radius):
image = np.zeros((510, 510), dtype=np.uint8)
for cell in cells:
self.draw_cell(image, cell.position, radius, cell.colour + 1)
self.save_temp(filename, image)
def assertEqualsCells(self, a, b, ignore_ids=False):
if a is None or b is None:
self.assertEqual(a, b)
else:
if not ignore_ids:
self.assertEquals(a, b)
else:
self.assertEquals((a.position, a.colour), (b.position, b.colour))
def assertEqualsCellsPairs(self, a2, b2, ignore_ids=False):
self.assertEqualsCells(a2[0], b2[0], ignore_ids)
self.assertEqualsCells(a2[1], b2[1], ignore_ids)
| [
"numpy.zeros",
"imageio.imsave",
"os.remove"
] | [((316, 343), 'imageio.imsave', 'imageio.imsave', (['path', 'image'], {}), '(path, image)\n', (330, 343), False, 'import imageio\n'), ((1251, 1287), 'numpy.zeros', 'np.zeros', (['(510, 510)'], {'dtype': 'np.uint8'}), '((510, 510), dtype=np.uint8)\n', (1259, 1287), True, 'import numpy as np\n'), ((218, 233), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (227, 233), False, 'import os\n')] |
import pymbar
from fe import endpoint_correction
from collections import namedtuple
import pickle
import dataclasses
import time
import functools
import copy
import jax
import numpy as np
from md import minimizer
from typing import Tuple, List, Any
import os
from fe import standard_state
from fe.utils import sanitize_energies, extract_delta_Us_from_U_knk
from timemachine.lib import potentials, custom_ops
@dataclasses.dataclass
class SimulationResult:
xs: np.array
boxes: np.array
du_dps: np.array
lambda_us: np.array
def flatten(v):
return tuple(), (v.xs, v.boxes, v.du_dps, v.lambda_us)
def unflatten(aux_data, children):
xs, boxes, du_dps, lambda_us = aux_data
return SimulationResult(xs, boxes, du_dps, lambda_us)
jax.tree_util.register_pytree_node(SimulationResult, flatten, unflatten)
def run_model_simulations(model, sys_params):
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
all_args = []
for lamb_idx, lamb in enumerate(model.lambda_schedule):
subsample_interval = 1000
all_args.append(
(
lamb,
model.box,
model.x0,
model.v0,
bound_potentials,
model.integrator,
model.barostat,
model.equil_steps,
model.prod_steps,
subsample_interval,
subsample_interval,
model.lambda_schedule,
)
)
if model.endpoint_correct:
assert isinstance(bound_potentials[-1], potentials.HarmonicBond)
all_args.append(
(
1.0,
model.box,
model.x0,
model.v0,
bound_potentials[:-1], # strip out the restraints
model.integrator,
model.barostat,
model.equil_steps,
model.prod_steps,
subsample_interval,
subsample_interval,
[], # no need to evaluate Us for the endpoint correction
)
)
results = []
if model.client is None:
for args in all_args:
results.append(simulate(*args))
else:
futures = []
for args in all_args:
futures.append(model.client.submit(simulate, *args))
for future in futures:
results.append(future.result())
return results
def simulate(
lamb,
box,
x0,
v0,
final_potentials,
integrator,
barostat,
equil_steps,
prod_steps,
x_interval,
u_interval,
lambda_windows,
):
"""
Run a simulation and collect relevant statistics for this simulation.
Parameters
----------
lamb: float
lambda value used for the equilibrium simulation
box: np.array
3x3 numpy array of the box, dtype should be np.float64
x0: np.array
Nx3 numpy array of the coordinates
v0: np.array
Nx3 numpy array of the velocities
final_potentials: list
list of unbound potentials
integrator: timemachine.Integrator
integrator to be used for dynamics
barostat: timemachine.Barostat
barostat to be used for equilibration
equil_steps: int
number of equilibration steps
prod_steps: int
number of production steps
x_interval: int
how often we store coordinates. If x_interval == 0 then
no frames are returned.
u_interval: int
how often we store energies. If u_interval == 0 then
no energies are returned
lambda_windows: list of float
lambda windows we evaluate energies at.
Returns
-------
SimulationResult
Results of the simulation.
"""
all_impls = []
# set up observables for du_dps here as well.
du_dp_obs = []
for bp in final_potentials:
impl = bp.bound_impl(np.float32)
all_impls.append(impl)
du_dp_obs.append(custom_ops.AvgPartialUPartialParam(impl, 25))
# fire minimize once again, needed for parameter interpolation
x0 = minimizer.fire_minimize(x0, all_impls, box, np.ones(100, dtype=np.float64) * lamb)
# sanity check that forces are well behaved
for bp in all_impls:
du_dx, du_dl, u = bp.execute(x0, box, lamb)
norm_forces = np.linalg.norm(du_dx, axis=1)
assert np.all(norm_forces < 25000), "Forces much greater than expected after minimization"
if integrator.seed == 0:
# this deepcopy is needed if we're running if client == None
integrator = copy.deepcopy(integrator)
integrator.seed = np.random.randint(np.iinfo(np.int32).max)
if barostat.seed == 0:
barostat = copy.deepcopy(barostat)
barostat.seed = np.random.randint(np.iinfo(np.int32).max)
intg_impl = integrator.impl()
# technically we need to only pass in the nonbonded impl
barostat_impl = barostat.impl(all_impls)
# context components: positions, velocities, box, integrator, energy fxns
ctxt = custom_ops.Context(x0, v0, box, intg_impl, all_impls, barostat_impl)
# equilibration
equil_schedule = np.ones(equil_steps) * lamb
ctxt.multiple_steps(equil_schedule)
# (ytz): intentionally hard-coded, I'd rather the end-user *not*
# muck with this unless they have a good reason to.
barostat_impl.set_interval(25)
for obs in du_dp_obs:
ctxt.add_observable(obs)
full_us, xs, boxes = ctxt.multiple_steps_U(lamb, prod_steps, np.array(lambda_windows), u_interval, x_interval)
# keep the structure of grads the same as that of final_potentials so we can properly
# form their vjps.
grads = []
for obs in du_dp_obs:
grads.append(obs.avg_du_dp())
result = SimulationResult(
xs=xs.astype("float32"),
boxes=boxes.astype("float32"),
du_dps=grads,
lambda_us=full_us,
)
return result
FreeEnergyModel = namedtuple(
"FreeEnergyModel",
[
"unbound_potentials",
"endpoint_correct",
"client",
"box",
"x0",
"v0",
"integrator",
"barostat",
"lambda_schedule",
"equil_steps",
"prod_steps",
"beta",
"prefix",
],
)
gradient = List[Any] # TODO: make this more descriptive of dG_grad structure
def _deltaG_from_results(model, results, sys_params) -> Tuple[Tuple[float, List], np.array]:
assert len(sys_params) == len(model.unbound_potentials)
bound_potentials = []
for params, unbound_pot in zip(sys_params, model.unbound_potentials):
bp = unbound_pot.bind(np.asarray(params))
bound_potentials.append(bp)
if model.endpoint_correct:
sim_results = results[:-1]
else:
sim_results = results
U_knk = []
N_k = []
for result in sim_results:
U_knk.append(result.lambda_us)
N_k.append(len(result.lambda_us)) # number of frames
U_knk = np.array(U_knk)
bar_dG = 0
bar_dG_err = 0
delta_Us = extract_delta_Us_from_U_knk(U_knk)
for lambda_idx in range(len(model.lambda_schedule) - 1):
fwd_delta_u = model.beta * delta_Us[lambda_idx][0]
rev_delta_u = model.beta * delta_Us[lambda_idx][1]
dG_exact, exact_bar_err = pymbar.BAR(fwd_delta_u, rev_delta_u)
bar_dG += dG_exact / model.beta
exact_bar_overlap = endpoint_correction.overlap_from_cdf(fwd_delta_u, rev_delta_u)
# probably off by a factor of two since we re-use samples.
bar_dG_err += (exact_bar_err / model.beta) ** 2
lamb_start = model.lambda_schedule[lambda_idx]
lamb_end = model.lambda_schedule[lambda_idx + 1]
print(
f"{model.prefix}_BAR: lambda {lamb_start:.3f} -> {lamb_end:.3f} dG: {dG_exact/model.beta:.3f} dG_err: {exact_bar_err/model.beta:.3f} overlap: {exact_bar_overlap:.3f}"
)
# for MBAR we need to sanitize the energies
clean_U_knks = [] # [K, F, K]
for lambda_idx, full_us in enumerate(U_knk):
clean_U_knks.append(sanitize_energies(full_us, lambda_idx))
print(
model.prefix,
" MBAR: amin",
np.amin(clean_U_knks),
"median",
np.median(clean_U_knks),
"max",
np.amax(clean_U_knks),
)
K = len(model.lambda_schedule)
clean_U_knks = np.array(clean_U_knks) # [K, F, K]
U_kn = np.reshape(clean_U_knks, (-1, K)).transpose() # [K, F*K]
u_kn = U_kn * model.beta
np.save(model.prefix + "_U_kn.npy", U_kn)
mbar = pymbar.MBAR(u_kn, N_k)
differences, error_estimates = mbar.getFreeEnergyDifferences()
f_k, error_k = differences[0], error_estimates[0]
mbar_dG = f_k[-1] / model.beta
mbar_dG_err = error_k[-1] / model.beta
bar_dG_err = np.sqrt(bar_dG_err)
dG = bar_dG # use the exact answer
dG_grad = []
# (ytz): results[-1].du_dps contain system parameter derivatives for the
# independent, gas phase simulation. They're usually ordered as:
# [Bonds, Angles, Torsions, Nonbonded]
#
# results[0].du_dps contain system parameter derivatives for the core
# restrained state. If we're doing the endpoint correction during
# decoupling stages, the derivatives are ordered as:
# [Bonds, Angles, Torsions, Nonbonded, RestraintBonds]
# Otherwise, in stages like conversion where the endpoint correction
# is turned off, the derivatives are ordered as :
# [Bonds, Angles, Torsions, Nonbonded]
# Note that this zip will always loop over only the
# [Bonds, Angles, Torsions, Nonbonded] terms, since it only
# enumerates over the smaller of the two lists.
for rhs, lhs in zip(results[-1].du_dps, results[0].du_dps):
dG_grad.append(rhs - lhs)
if model.endpoint_correct:
assert len(results[0].du_dps) - len(results[-1].du_dps) == 1
# (ytz): Fill in missing derivatives since zip() from above loops
# over the shorter array.
lhs = results[0].du_dps[-1]
rhs = 0 # zero as the energies do not depend the core restraints.
dG_grad.append(rhs - lhs)
core_restr = bound_potentials[-1]
# (ytz): tbd, automatically find optimal k_translation/k_rotation such that
# standard deviation and/or overlap is maximized
k_translation = 200.0
k_rotation = 100.0
start = time.time()
lhs_du, rhs_du, rotation_samples, translation_samples = endpoint_correction.estimate_delta_us(
k_translation=k_translation,
k_rotation=k_rotation,
core_idxs=core_restr.get_idxs(),
core_params=core_restr.params.reshape((-1, 2)),
beta=model.beta,
lhs_xs=results[-2].xs,
rhs_xs=results[-1].xs,
seed=2021,
)
dG_endpoint, endpoint_err = pymbar.BAR(model.beta * lhs_du, model.beta * np.array(rhs_du))
dG_endpoint = dG_endpoint / model.beta
endpoint_err = endpoint_err / model.beta
# compute standard state corrections for translation and rotation
dG_ssc_translation, dG_ssc_rotation = standard_state.release_orientational_restraints(
k_translation, k_rotation, model.beta
)
overlap = endpoint_correction.overlap_from_cdf(lhs_du, rhs_du)
lhs_mean = np.mean(lhs_du)
rhs_mean = np.mean(rhs_du)
print(
f"{model.prefix} bar (A) {bar_dG:.3f} bar_err {bar_dG_err:.3f} mbar (A) {mbar_dG:.3f} mbar_err {mbar_dG_err:.3f} dG_endpoint (E) {dG_endpoint:.3f} dG_endpoint_err {endpoint_err:.3f} dG_ssc_translation {dG_ssc_translation:.3f} dG_ssc_rotation {dG_ssc_rotation:.3f} overlap {overlap:.3f} lhs_mean {lhs_mean:.3f} rhs_mean {rhs_mean:.3f} lhs_n {len(lhs_du)} rhs_n {len(rhs_du)} | time: {time.time()-start:.3f}s"
)
dG += dG_endpoint + dG_ssc_translation + dG_ssc_rotation
bar_dG_err = np.sqrt(bar_dG_err ** 2 + endpoint_err ** 2)
else:
print(
f"{model.prefix} bar (A) {bar_dG:.3f} bar_err {bar_dG_err:.3f} mbar (A) {mbar_dG:.3f} mbar_err {mbar_dG_err:.3f} "
)
return (dG, bar_dG_err, results), dG_grad
@functools.partial(
jax.custom_vjp,
nondiff_argnums=(
0,
1,
),
)
def deltaG_from_results(model, results, sys_params) -> Tuple[float, List]:
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)[0]
def deltaG_from_results_fwd(model, results, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG_from_results, but returns the full tuple"""
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)
def deltaG_from_results_bwd(model, results, residual, grad) -> Tuple[np.array]:
"""Note: nondiff args must appear first here, even though one of them appears last in the original function's signature!"""
# residual are the partial dG / partial dparams for each term
# grad[0] is the adjoint of dG w.r.t. loss: partial L/partial dG
# grad[1] is the adjoint of dG_err w.r.t. loss: which we don't use
# grad[2] is the adjoint of simulation results w.r.t. loss: which we don't use
return ([grad[0] * r for r in residual],)
@functools.partial(jax.custom_vjp, nondiff_argnums=(0,))
def deltaG(model, sys_params) -> Tuple[float, List]:
results = run_model_simulations(model, sys_params)
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)[0]
def deltaG_fwd(model, sys_params) -> Tuple[Tuple[float, List], np.array]:
"""same signature as DeltaG_from_results, but returns the full tuple"""
results = run_model_simulations(model, sys_params)
return _deltaG_from_results(model=model, results=results, sys_params=sys_params)
def deltaG_bwd(model, residual, grad) -> Tuple[np.array]:
"""Note: nondiff args must appear first here, even though one of them appears last in the original function's signature!"""
# residual are the partial dG / partial dparams for each term
# grad[0] is the adjoint of dG w.r.t. loss: partial L/partial dG
# grad[1] is the adjoint of dG_err w.r.t. loss: which we don't use
# grad[2] is the adjoint of simulation results w.r.t. loss: which we don't use
return ([grad[0] * r for r in residual],)
deltaG_from_results.defvjp(deltaG_from_results_fwd, deltaG_from_results_bwd)
deltaG.defvjp(deltaG_fwd, deltaG_bwd)
| [
"numpy.sqrt",
"numpy.iinfo",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"fe.utils.sanitize_energies",
"pymbar.BAR",
"numpy.save",
"numpy.mean",
"numpy.reshape",
"fe.standard_state.release_orientational_restraints",
"numpy.asarray",
"timemachine.lib.custom_ops.AvgPartialUPartialPara... | [((761, 833), 'jax.tree_util.register_pytree_node', 'jax.tree_util.register_pytree_node', (['SimulationResult', 'flatten', 'unflatten'], {}), '(SimulationResult, flatten, unflatten)\n', (795, 833), False, 'import jax\n'), ((6167, 6369), 'collections.namedtuple', 'namedtuple', (['"""FreeEnergyModel"""', "['unbound_potentials', 'endpoint_correct', 'client', 'box', 'x0', 'v0',\n 'integrator', 'barostat', 'lambda_schedule', 'equil_steps',\n 'prod_steps', 'beta', 'prefix']"], {}), "('FreeEnergyModel', ['unbound_potentials', 'endpoint_correct',\n 'client', 'box', 'x0', 'v0', 'integrator', 'barostat',\n 'lambda_schedule', 'equil_steps', 'prod_steps', 'beta', 'prefix'])\n", (6177, 6369), False, 'from collections import namedtuple\n'), ((12363, 12420), 'functools.partial', 'functools.partial', (['jax.custom_vjp'], {'nondiff_argnums': '(0, 1)'}), '(jax.custom_vjp, nondiff_argnums=(0, 1))\n', (12380, 12420), False, 'import functools\n'), ((13425, 13480), 'functools.partial', 'functools.partial', (['jax.custom_vjp'], {'nondiff_argnums': '(0,)'}), '(jax.custom_vjp, nondiff_argnums=(0,))\n', (13442, 13480), False, 'import functools\n'), ((5260, 5328), 'timemachine.lib.custom_ops.Context', 'custom_ops.Context', (['x0', 'v0', 'box', 'intg_impl', 'all_impls', 'barostat_impl'], {}), '(x0, v0, box, intg_impl, all_impls, barostat_impl)\n', (5278, 5328), False, 'from timemachine.lib import potentials, custom_ops\n'), ((7187, 7202), 'numpy.array', 'np.array', (['U_knk'], {}), '(U_knk)\n', (7195, 7202), True, 'import numpy as np\n'), ((7254, 7288), 'fe.utils.extract_delta_Us_from_U_knk', 'extract_delta_Us_from_U_knk', (['U_knk'], {}), '(U_knk)\n', (7281, 7288), False, 'from fe.utils import sanitize_energies, extract_delta_Us_from_U_knk\n'), ((8562, 8584), 'numpy.array', 'np.array', (['clean_U_knks'], {}), '(clean_U_knks)\n', (8570, 8584), True, 'import numpy as np\n'), ((8701, 8742), 'numpy.save', 'np.save', (["(model.prefix + '_U_kn.npy')", 'U_kn'], {}), "(model.prefix + '_U_kn.npy', U_kn)\n", (8708, 8742), True, 'import numpy as np\n'), ((8755, 8777), 'pymbar.MBAR', 'pymbar.MBAR', (['u_kn', 'N_k'], {}), '(u_kn, N_k)\n', (8766, 8777), False, 'import pymbar\n'), ((8995, 9014), 'numpy.sqrt', 'np.sqrt', (['bar_dG_err'], {}), '(bar_dG_err)\n', (9002, 9014), True, 'import numpy as np\n'), ((4551, 4580), 'numpy.linalg.norm', 'np.linalg.norm', (['du_dx'], {'axis': '(1)'}), '(du_dx, axis=1)\n', (4565, 4580), True, 'import numpy as np\n'), ((4596, 4623), 'numpy.all', 'np.all', (['(norm_forces < 25000)'], {}), '(norm_forces < 25000)\n', (4602, 4623), True, 'import numpy as np\n'), ((4799, 4824), 'copy.deepcopy', 'copy.deepcopy', (['integrator'], {}), '(integrator)\n', (4812, 4824), False, 'import copy\n'), ((4940, 4963), 'copy.deepcopy', 'copy.deepcopy', (['barostat'], {}), '(barostat)\n', (4953, 4963), False, 'import copy\n'), ((5371, 5391), 'numpy.ones', 'np.ones', (['equil_steps'], {}), '(equil_steps)\n', (5378, 5391), True, 'import numpy as np\n'), ((5726, 5750), 'numpy.array', 'np.array', (['lambda_windows'], {}), '(lambda_windows)\n', (5734, 5750), True, 'import numpy as np\n'), ((7505, 7541), 'pymbar.BAR', 'pymbar.BAR', (['fwd_delta_u', 'rev_delta_u'], {}), '(fwd_delta_u, rev_delta_u)\n', (7515, 7541), False, 'import pymbar\n'), ((7610, 7672), 'fe.endpoint_correction.overlap_from_cdf', 'endpoint_correction.overlap_from_cdf', (['fwd_delta_u', 'rev_delta_u'], {}), '(fwd_delta_u, rev_delta_u)\n', (7646, 7672), False, 'from fe import endpoint_correction\n'), ((8381, 8402), 'numpy.amin', 'np.amin', (['clean_U_knks'], {}), '(clean_U_knks)\n', (8388, 8402), True, 'import numpy as np\n'), ((8430, 8453), 'numpy.median', 'np.median', (['clean_U_knks'], {}), '(clean_U_knks)\n', (8439, 8453), True, 'import numpy as np\n'), ((8478, 8499), 'numpy.amax', 'np.amax', (['clean_U_knks'], {}), '(clean_U_knks)\n', (8485, 8499), True, 'import numpy as np\n'), ((10582, 10593), 'time.time', 'time.time', ([], {}), '()\n', (10591, 10593), False, 'import time\n'), ((11325, 11415), 'fe.standard_state.release_orientational_restraints', 'standard_state.release_orientational_restraints', (['k_translation', 'k_rotation', 'model.beta'], {}), '(k_translation, k_rotation,\n model.beta)\n', (11372, 11415), False, 'from fe import standard_state\n'), ((11452, 11504), 'fe.endpoint_correction.overlap_from_cdf', 'endpoint_correction.overlap_from_cdf', (['lhs_du', 'rhs_du'], {}), '(lhs_du, rhs_du)\n', (11488, 11504), False, 'from fe import endpoint_correction\n'), ((11524, 11539), 'numpy.mean', 'np.mean', (['lhs_du'], {}), '(lhs_du)\n', (11531, 11539), True, 'import numpy as np\n'), ((11559, 11574), 'numpy.mean', 'np.mean', (['rhs_du'], {}), '(rhs_du)\n', (11566, 11574), True, 'import numpy as np\n'), ((12106, 12150), 'numpy.sqrt', 'np.sqrt', (['(bar_dG_err ** 2 + endpoint_err ** 2)'], {}), '(bar_dG_err ** 2 + endpoint_err ** 2)\n', (12113, 12150), True, 'import numpy as np\n'), ((1073, 1091), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (1083, 1091), True, 'import numpy as np\n'), ((4197, 4241), 'timemachine.lib.custom_ops.AvgPartialUPartialParam', 'custom_ops.AvgPartialUPartialParam', (['impl', '(25)'], {}), '(impl, 25)\n', (4231, 4241), False, 'from timemachine.lib import potentials, custom_ops\n'), ((4364, 4394), 'numpy.ones', 'np.ones', (['(100)'], {'dtype': 'np.float64'}), '(100, dtype=np.float64)\n', (4371, 4394), True, 'import numpy as np\n'), ((6850, 6868), 'numpy.asarray', 'np.asarray', (['params'], {}), '(params)\n', (6860, 6868), True, 'import numpy as np\n'), ((8276, 8314), 'fe.utils.sanitize_energies', 'sanitize_energies', (['full_us', 'lambda_idx'], {}), '(full_us, lambda_idx)\n', (8293, 8314), False, 'from fe.utils import sanitize_energies, extract_delta_Us_from_U_knk\n'), ((8609, 8642), 'numpy.reshape', 'np.reshape', (['clean_U_knks', '(-1, K)'], {}), '(clean_U_knks, (-1, K))\n', (8619, 8642), True, 'import numpy as np\n'), ((4869, 4887), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (4877, 4887), True, 'import numpy as np\n'), ((5006, 5024), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (5014, 5024), True, 'import numpy as np\n'), ((11091, 11107), 'numpy.array', 'np.array', (['rhs_du'], {}), '(rhs_du)\n', (11099, 11107), True, 'import numpy as np\n'), ((11985, 11996), 'time.time', 'time.time', ([], {}), '()\n', (11994, 11996), False, 'import time\n')] |
#from argparse import ArgumentParser #Discomment to pass the variables
import visual as v
import numpy as np
# This version is for visual 6.11
SF = 1e-6
G = C.G * SF ** 3
DT = 3600 * 24
RATE = 50000
scene = v.display(title="Solar System variations", x=0, y=0, z=0,
width=600, height=600, range=1000e3,
background=v.color.black, center=v.vector(0, 0, 0),
forward=(0, -0.3, -1))
class Body(v.sphere):
def __init__(self, mass, pos, vel, i, *arg, **kwargs):
super(Body, self).__init__(*arg, **kwargs)
self.vel = v.vector(vel)
self.pos = v.vector(pos)
self.mass = mass
self.id = i
def c_pos(self, obj, dt=DT):
xm = self.pos + 0.5 * dt * self.vel
self.vel = self.vel + dt * self.c_acel(xm, obj)
self.pos = xm + 0.5 * dt * self.vel
def c_acel(self, xm, obj):
acel = v.vector(0, 0, 0)
for i in obj:
if i.id != self.id:
acel += (i.mass * (v.norm(i.pos - xm) * G) /
(v.mag(i.pos - xm) ** 2))
return acel
def generate(file):
date = np.loadtxt(file, dtype='float')
obj = []
for i in date:
obj.append(Body(i[0], i[1:4], i[4:], len(obj), color=(1, 0, 0),
radius=30, make_trail=True, trail_type="points",
interval=50, retain=1000))
obj[len(obj) - 1].trail_object.size = 1
return obj
parser = ArgumentParser(prog='N-Body', description='This program make the \
simulation of N-Body')
parser.add_argument('-f', '--INPUT', help='File of intput')
parser.add_argument('-sf', '--SCALE_FACTOR', help='Factor of scale for the \
simulation', type=float)
parser.add_argument('-dt', '--INCRASE', help='Incrase of the time', type=float)
parser.add_argument('-r', '--RATE', help='Velocity of simulation', type=float)
args = parser.parse_args()
if args.SCALE_FACTOR:
SF = args.SCALE_FACTOR
if args.INCRASE:
DT = args.INCRASE
if args.INPUT:
obj = generate(args.INPUT)
if args.RATE:
RATE = args.RATE
else:
obj = generate('SSone.ap')
while True:
for i in obj:
i.c_pos(obj, dt=DT)
v.rate(RATE)
| [
"visual.vector",
"visual.norm",
"visual.rate",
"numpy.loadtxt",
"visual.mag"
] | [((1143, 1174), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'dtype': '"""float"""'}), "(file, dtype='float')\n", (1153, 1174), True, 'import numpy as np\n'), ((2228, 2240), 'visual.rate', 'v.rate', (['RATE'], {}), '(RATE)\n', (2234, 2240), True, 'import visual as v\n'), ((376, 393), 'visual.vector', 'v.vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (384, 393), True, 'import visual as v\n'), ((589, 602), 'visual.vector', 'v.vector', (['vel'], {}), '(vel)\n', (597, 602), True, 'import visual as v\n'), ((622, 635), 'visual.vector', 'v.vector', (['pos'], {}), '(pos)\n', (630, 635), True, 'import visual as v\n'), ((906, 923), 'visual.vector', 'v.vector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (914, 923), True, 'import visual as v\n'), ((1065, 1082), 'visual.mag', 'v.mag', (['(i.pos - xm)'], {}), '(i.pos - xm)\n', (1070, 1082), True, 'import visual as v\n'), ((1013, 1031), 'visual.norm', 'v.norm', (['(i.pos - xm)'], {}), '(i.pos - xm)\n', (1019, 1031), True, 'import visual as v\n')] |
import numpy as np
import matplotlib.pyplot as plt
plt.figure(figsize=(20.0, 6.0))
def f(x):
return 1 - np.sqrt(1 - x ** 2)
EXPECTED_AREA = 1.0 - np.pi / 4
def vegas(iterations=3, samples_per_iteration=333, num_bins=20, K=1000, alpha=1.0, make_plots=False):
bin_edges = np.linspace(start=0, stop=1, endpoint=True, num=num_bins+1)
bin_widths = bin_edges[1:] - bin_edges[:-1]
weighted_function_value_sum = 0.0
for j in range(iterations):
random_numbers = np.random.rand(samples_per_iteration)
random_bins = np.random.randint(low=0, high=num_bins, size=samples_per_iteration)
random_bins_low = bin_edges[random_bins]
random_bins_high = bin_edges[random_bins + 1]
random_bin_widths = random_bins_high - random_bins_low
random_numbers_transformed = random_bins_low + random_numbers * random_bin_widths
function_values = f(random_numbers_transformed)
weighted_function_values = function_values * random_bin_widths * num_bins
if make_plots:
plt.subplot(1, iterations, j+1)
plt.xlim(0, 1)
plt.ylim(0, 1)
plot_x = np.linspace(start=0.001, stop=1.0, num=1000, endpoint=True)
plt.vlines(
x=random_numbers_transformed[:100], ymin=0, ymax=weighted_function_values[:100], color="black",
label="$samples$"
)
plt.plot(plot_x, f(plot_x), label="$f(x)$")
plt.bar(
x=bin_edges[:-1], height=EXPECTED_AREA/(num_bins * bin_widths), width=bin_widths, align="edge",
color=(1.0, 0.0, 0.0, 0.5), label="$g(x)$"
)
plt.xlabel("$x$")
if j == 0:
plt.ylabel("$y$")
plt.legend(loc="upper left")
weighted_function_value_sum += np.sum(weighted_function_values)
bin_weights = np.zeros(num_bins)
for i in range(num_bins):
bin_weights[i] = np.sum(function_values[random_bins == i])
bin_weights *= bin_widths
#bin_splits = 1 + K * bin_weights / np.sum(bin_weights)
bin_splits = 1 + K * ((bin_weights / np.sum(bin_weights) - 1) / np.log(bin_weights / np.sum(bin_weights))) ** alpha
bin_splits = bin_splits.astype(int)
refined_bin_edges = np.zeros(1 + np.sum(bin_splits))
refined_bin_weights = np.zeros(refined_bin_edges.shape[0] - 1)
index = 0
for i in range(num_bins):
new_bin_edges = np.linspace(start=bin_edges[i], stop=bin_edges[i+1], num=bin_splits[i], endpoint=False)
refined_bin_edges[index:index+bin_splits[i]] = new_bin_edges
refined_bin_weights[index:index+bin_splits[i]] = bin_weights[i] / bin_splits[i]
index += bin_splits[i]
refined_bin_edges[-1] = 1.0
average_bin_weight = np.mean(bin_weights)
new_bin_edges = np.zeros_like(bin_edges)
current_sum = 0
current_refined_index = 0
for i in range(num_bins-1):
while current_sum < average_bin_weight:
current_sum += refined_bin_weights[current_refined_index]
current_refined_index += 1
current_sum -= average_bin_weight
new_bin_edges[i + 1] = refined_bin_edges[current_refined_index]
new_bin_edges[-1] = 1
bin_edges = new_bin_edges
bin_widths = bin_edges[1:] - bin_edges[:-1]
if make_plots:
plt.savefig("pi_vegas.png")
integral_estimate = weighted_function_value_sum / (iterations * samples_per_iteration)
return 4 * (1.0 - integral_estimate)
if __name__ == "__main__":
print(f"Estimate: {vegas(make_plots=True)} s")
#plt.show()
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.random.randint",
"numpy.zeros",
"... | [((51, 82), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20.0, 6.0)'}), '(figsize=(20.0, 6.0))\n', (61, 82), True, 'import matplotlib.pyplot as plt\n'), ((284, 345), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': '(1)', 'endpoint': '(True)', 'num': '(num_bins + 1)'}), '(start=0, stop=1, endpoint=True, num=num_bins + 1)\n', (295, 345), True, 'import numpy as np\n'), ((110, 129), 'numpy.sqrt', 'np.sqrt', (['(1 - x ** 2)'], {}), '(1 - x ** 2)\n', (117, 129), True, 'import numpy as np\n'), ((489, 526), 'numpy.random.rand', 'np.random.rand', (['samples_per_iteration'], {}), '(samples_per_iteration)\n', (503, 526), True, 'import numpy as np\n'), ((549, 616), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'num_bins', 'size': 'samples_per_iteration'}), '(low=0, high=num_bins, size=samples_per_iteration)\n', (566, 616), True, 'import numpy as np\n'), ((1832, 1864), 'numpy.sum', 'np.sum', (['weighted_function_values'], {}), '(weighted_function_values)\n', (1838, 1864), True, 'import numpy as np\n'), ((1888, 1906), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (1896, 1906), True, 'import numpy as np\n'), ((2369, 2409), 'numpy.zeros', 'np.zeros', (['(refined_bin_edges.shape[0] - 1)'], {}), '(refined_bin_edges.shape[0] - 1)\n', (2377, 2409), True, 'import numpy as np\n'), ((2844, 2864), 'numpy.mean', 'np.mean', (['bin_weights'], {}), '(bin_weights)\n', (2851, 2864), True, 'import numpy as np\n'), ((2889, 2913), 'numpy.zeros_like', 'np.zeros_like', (['bin_edges'], {}), '(bin_edges)\n', (2902, 2913), True, 'import numpy as np\n'), ((3443, 3470), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pi_vegas.png"""'], {}), "('pi_vegas.png')\n", (3454, 3470), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1080), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'iterations', '(j + 1)'], {}), '(1, iterations, j + 1)\n', (1058, 1080), True, 'import matplotlib.pyplot as plt\n'), ((1091, 1105), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (1099, 1105), True, 'import matplotlib.pyplot as plt\n'), ((1118, 1132), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1126, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1154, 1213), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.001)', 'stop': '(1.0)', 'num': '(1000)', 'endpoint': '(True)'}), '(start=0.001, stop=1.0, num=1000, endpoint=True)\n', (1165, 1213), True, 'import numpy as np\n'), ((1226, 1356), 'matplotlib.pyplot.vlines', 'plt.vlines', ([], {'x': 'random_numbers_transformed[:100]', 'ymin': '(0)', 'ymax': 'weighted_function_values[:100]', 'color': '"""black"""', 'label': '"""$samples$"""'}), "(x=random_numbers_transformed[:100], ymin=0, ymax=\n weighted_function_values[:100], color='black', label='$samples$')\n", (1236, 1356), True, 'import matplotlib.pyplot as plt\n'), ((1466, 1619), 'matplotlib.pyplot.bar', 'plt.bar', ([], {'x': 'bin_edges[:-1]', 'height': '(EXPECTED_AREA / (num_bins * bin_widths))', 'width': 'bin_widths', 'align': '"""edge"""', 'color': '(1.0, 0.0, 0.0, 0.5)', 'label': '"""$g(x)$"""'}), "(x=bin_edges[:-1], height=EXPECTED_AREA / (num_bins * bin_widths),\n width=bin_widths, align='edge', color=(1.0, 0.0, 0.0, 0.5), label='$g(x)$')\n", (1473, 1619), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1689), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (1682, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1970, 2011), 'numpy.sum', 'np.sum', (['function_values[random_bins == i]'], {}), '(function_values[random_bins == i])\n', (1976, 2011), True, 'import numpy as np\n'), ((2490, 2583), 'numpy.linspace', 'np.linspace', ([], {'start': 'bin_edges[i]', 'stop': 'bin_edges[i + 1]', 'num': 'bin_splits[i]', 'endpoint': '(False)'}), '(start=bin_edges[i], stop=bin_edges[i + 1], num=bin_splits[i],\n endpoint=False)\n', (2501, 2583), True, 'import numpy as np\n'), ((1729, 1746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$y$"""'], {}), "('$y$')\n", (1739, 1746), True, 'import matplotlib.pyplot as plt\n'), ((1763, 1791), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (1773, 1791), True, 'import matplotlib.pyplot as plt\n'), ((2319, 2337), 'numpy.sum', 'np.sum', (['bin_splits'], {}), '(bin_splits)\n', (2325, 2337), True, 'import numpy as np\n'), ((2155, 2174), 'numpy.sum', 'np.sum', (['bin_weights'], {}), '(bin_weights)\n', (2161, 2174), True, 'import numpy as np\n'), ((2203, 2222), 'numpy.sum', 'np.sum', (['bin_weights'], {}), '(bin_weights)\n', (2209, 2222), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import sys
import math
import os.path as osp
import torch
from torch_geometric.data import Data
from torch_geometric.data import Dataset
from torch_geometric.data import InMemoryDataset
from sklearn.preprocessing import StandardScaler
from torch_geometric.data import DataLoader
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
from torch_geometric.nn import GCNConv
from torch_geometric.nn import DataParallel
from torch import nn
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
from ovito.io import import_file
from ovito.io import export_file
import datetime
starttime = datetime.datetime.now()
#read file
file_path = './predict/z_indent/average_50_*.dump'
node = import_file(file_path)
def voted(data, model1, model2, model3):
predict1 = model1(data).max(1)[1]
predict1 = predict1.cpu().numpy()
predict1 = np.reshape(predict1, (-1,1))
predict2 = model2(data).max(1)[1]
predict2 = predict2.cpu().numpy()
predict2 = np.reshape(predict2, (-1,1))
predict3 = model3(data).max(1)[1]
predict3 = predict3.cpu().numpy()
predict3 = np.reshape(predict3, (-1,1))
predict = np.column_stack((predict1, predict2, predict3))
result = np.zeros((predict.shape[0],1))
for j in range(predict.shape[0]) :
result[j] = np.argmax(np.bincount(predict[j]))
result = np.reshape(result,(1,-1))
result = pd.DataFrame(result)
return result
def compute_myproperty(frame, data, result):
variant = result.to_numpy()
variant = variant.reshape(-1)
variant = variant.tolist()
return variant
class PredictDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(PredictDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
return []
@property
def processed_file_names(self):
return ['/data2/GCN_dataset/dataset_root/predict_set.pt']
def download(self):
pass
def process(self):
data_list = []
for i in range(len(predict_content)) :
content = predict_content[i]
graph = predict_graph[i]
node_feature = content[:]
node_feature = torch.FloatTensor(node_feature).squeeze(1)
source_nodes = graph[:,0]
target_nodes = graph[:,1]
edge_index = torch.tensor([source_nodes, target_nodes], dtype=torch.long)
x = node_feature
num_nodes = x.shape[0]
data = Data(x=x, edge_index=edge_index, num_nodes = num_nodes)
data_list.append(data)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
predict_set = PredictDataset(root='/data2/GCN_dataset/dataset_root/')
class GSGNet(torch.nn.Module):
def __init__(self):
super(GSGNet, self).__init__()
self.conv1 = SAGEConv(predict_set.num_node_features, 40)
self.conv2 = SAGEConv(40, 24)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = self.conv1(x, edge_index)
x = F.relu(x, inplace = True)
x = F.dropout(x, p = 0.5,training=self.training)
x = self.conv2(x, edge_index)
#return x
return F.log_softmax(x, dim=1)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model1 = GSGNet()
model1_checkpoint = torch.load('./Trained_Model/train_model_gsg_1.pth.tar')
model1.load_state_dict(model1_checkpoint['state_dict'])
model1.to(device)
model1.eval()
model2 = GSGNet()
model2_checkpoint = torch.load('./Trained_Model/train_model_gsg_2.pth.tar')
model2.load_state_dict(model2_checkpoint['state_dict'])
model2.to(device)
model2.eval()
model3 = GSGNet()
model3_checkpoint = torch.load('./Trained_Model/train_model_gsg_3.pth.tar')
model3.load_state_dict(model3_checkpoint['state_dict'])
model3.to(device)
model3.eval()
predict_loader = DataLoader(predict_set, batch_size = 1,shuffle=False)
i = 0
for data in predict_loader :
data = data.to(device)
pipe = node.compute(i)
predict = voted(data, model1, model2, model3)
result = compute_myproperty(i, data, predict)
pipe.particles_.create_property('Variants', data = result)
export_file(pipe, './predict/z_indent/predict_%s.dump'%i, 'lammps/dump',columns = ['Particle Identifier','Particle Type','Position.X','Position.Y','Position.Z','Variants'],frame = i)
i = i + 1
print('Done')
endtime = datetime.datetime.now()
print (endtime - starttime)
| [
"numpy.column_stack",
"torch.cuda.is_available",
"numpy.reshape",
"ovito.io.export_file",
"pandas.DataFrame",
"torch_geometric.nn.SAGEConv",
"torch.nn.functional.dropout",
"torch.save",
"torch.nn.functional.relu",
"torch.nn.functional.log_softmax",
"numpy.bincount",
"torch_geometric.data.Data"... | [((723, 746), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (744, 746), False, 'import datetime\n'), ((816, 838), 'ovito.io.import_file', 'import_file', (['file_path'], {}), '(file_path)\n', (827, 838), False, 'from ovito.io import import_file\n'), ((3282, 3337), 'torch.load', 'torch.load', (['"""./Trained_Model/train_model_gsg_1.pth.tar"""'], {}), "('./Trained_Model/train_model_gsg_1.pth.tar')\n", (3292, 3337), False, 'import torch\n'), ((3465, 3520), 'torch.load', 'torch.load', (['"""./Trained_Model/train_model_gsg_2.pth.tar"""'], {}), "('./Trained_Model/train_model_gsg_2.pth.tar')\n", (3475, 3520), False, 'import torch\n'), ((3648, 3703), 'torch.load', 'torch.load', (['"""./Trained_Model/train_model_gsg_3.pth.tar"""'], {}), "('./Trained_Model/train_model_gsg_3.pth.tar')\n", (3658, 3703), False, 'import torch\n'), ((3810, 3862), 'torch_geometric.data.DataLoader', 'DataLoader', (['predict_set'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(predict_set, batch_size=1, shuffle=False)\n', (3820, 3862), False, 'from torch_geometric.data import DataLoader\n'), ((4320, 4343), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4341, 4343), False, 'import datetime\n'), ((964, 993), 'numpy.reshape', 'np.reshape', (['predict1', '(-1, 1)'], {}), '(predict1, (-1, 1))\n', (974, 993), True, 'import numpy as np\n'), ((1077, 1106), 'numpy.reshape', 'np.reshape', (['predict2', '(-1, 1)'], {}), '(predict2, (-1, 1))\n', (1087, 1106), True, 'import numpy as np\n'), ((1190, 1219), 'numpy.reshape', 'np.reshape', (['predict3', '(-1, 1)'], {}), '(predict3, (-1, 1))\n', (1200, 1219), True, 'import numpy as np\n'), ((1231, 1278), 'numpy.column_stack', 'np.column_stack', (['(predict1, predict2, predict3)'], {}), '((predict1, predict2, predict3))\n', (1246, 1278), True, 'import numpy as np\n'), ((1289, 1320), 'numpy.zeros', 'np.zeros', (['(predict.shape[0], 1)'], {}), '((predict.shape[0], 1))\n', (1297, 1320), True, 'import numpy as np\n'), ((1415, 1442), 'numpy.reshape', 'np.reshape', (['result', '(1, -1)'], {}), '(result, (1, -1))\n', (1425, 1442), True, 'import numpy as np\n'), ((1451, 1471), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (1463, 1471), True, 'import pandas as pd\n'), ((4102, 4297), 'ovito.io.export_file', 'export_file', (['pipe', "('./predict/z_indent/predict_%s.dump' % i)", '"""lammps/dump"""'], {'columns': "['Particle Identifier', 'Particle Type', 'Position.X', 'Position.Y',\n 'Position.Z', 'Variants']", 'frame': 'i'}), "(pipe, './predict/z_indent/predict_%s.dump' % i, 'lammps/dump',\n columns=['Particle Identifier', 'Particle Type', 'Position.X',\n 'Position.Y', 'Position.Z', 'Variants'], frame=i)\n", (4113, 4297), False, 'from ovito.io import export_file\n'), ((1838, 1873), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (1848, 1873), False, 'import torch\n'), ((2608, 2659), 'torch.save', 'torch.save', (['(data, slices)', 'self.processed_paths[0]'], {}), '((data, slices), self.processed_paths[0])\n', (2618, 2659), False, 'import torch\n'), ((2832, 2875), 'torch_geometric.nn.SAGEConv', 'SAGEConv', (['predict_set.num_node_features', '(40)'], {}), '(predict_set.num_node_features, 40)\n', (2840, 2875), False, 'from torch_geometric.nn import SAGEConv\n'), ((2891, 2907), 'torch_geometric.nn.SAGEConv', 'SAGEConv', (['(40)', '(24)'], {}), '(40, 24)\n', (2899, 2907), False, 'from torch_geometric.nn import SAGEConv\n'), ((3016, 3039), 'torch.nn.functional.relu', 'F.relu', (['x'], {'inplace': '(True)'}), '(x, inplace=True)\n', (3022, 3039), True, 'import torch.nn.functional as F\n'), ((3048, 3091), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (3057, 3091), True, 'import torch.nn.functional as F\n'), ((3146, 3169), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (3159, 3169), True, 'import torch.nn.functional as F\n'), ((3205, 3230), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3228, 3230), False, 'import torch\n'), ((1380, 1403), 'numpy.bincount', 'np.bincount', (['predict[j]'], {}), '(predict[j])\n', (1391, 1403), True, 'import numpy as np\n'), ((2366, 2426), 'torch.tensor', 'torch.tensor', (['[source_nodes, target_nodes]'], {'dtype': 'torch.long'}), '([source_nodes, target_nodes], dtype=torch.long)\n', (2378, 2426), False, 'import torch\n'), ((2483, 2536), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'num_nodes': 'num_nodes'}), '(x=x, edge_index=edge_index, num_nodes=num_nodes)\n', (2487, 2536), False, 'from torch_geometric.data import Data\n'), ((2249, 2280), 'torch.FloatTensor', 'torch.FloatTensor', (['node_feature'], {}), '(node_feature)\n', (2266, 2280), False, 'import torch\n')] |
import logging
import numpy as np
from tramp.models import glm_generative
from tramp.experiments import save_experiments, BayesOptimalScenario
from tramp.algos import EarlyStopping
def run_perceptron(N, alpha, p_pos):
model = glm_generative(
N=N, alpha=alpha,
ensemble_type="gaussian", prior_type="binary", output_type="sgn",
prior_p_pos=p_pos
)
scenario = BayesOptimalScenario(model, x_ids=["x"])
early = EarlyStopping()
records = scenario.run_all(max_iter=200, callback=early)
return records
if __name__=="__main__":
csv_file = __file__.replace(".py", ".csv")
logging.basicConfig(level=logging.INFO)
save_experiments(
run_perceptron, csv_file,
N=1000, p_pos=[0.25, 0.50, 0.75], alpha=np.linspace(0, 2, 101)[1:]
) | [
"logging.basicConfig",
"tramp.experiments.BayesOptimalScenario",
"tramp.models.glm_generative",
"numpy.linspace",
"tramp.algos.EarlyStopping"
] | [((231, 353), 'tramp.models.glm_generative', 'glm_generative', ([], {'N': 'N', 'alpha': 'alpha', 'ensemble_type': '"""gaussian"""', 'prior_type': '"""binary"""', 'output_type': '"""sgn"""', 'prior_p_pos': 'p_pos'}), "(N=N, alpha=alpha, ensemble_type='gaussian', prior_type=\n 'binary', output_type='sgn', prior_p_pos=p_pos)\n", (245, 353), False, 'from tramp.models import glm_generative\n'), ((396, 436), 'tramp.experiments.BayesOptimalScenario', 'BayesOptimalScenario', (['model'], {'x_ids': "['x']"}), "(model, x_ids=['x'])\n", (416, 436), False, 'from tramp.experiments import save_experiments, BayesOptimalScenario\n'), ((449, 464), 'tramp.algos.EarlyStopping', 'EarlyStopping', ([], {}), '()\n', (462, 464), False, 'from tramp.algos import EarlyStopping\n'), ((622, 661), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (641, 661), False, 'import logging\n'), ((767, 789), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(101)'], {}), '(0, 2, 101)\n', (778, 789), True, 'import numpy as np\n')] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import logging
import os
import time
import warnings
from collections import defaultdict
from multiprocessing.pool import ThreadPool
import boto3
import numpy as np
from lookoutvision.manifest import Manifest
from lookoutvision.metrics import Metrics
class LookoutForVision:
"""LookoutForVision class to build, train and deploy.
This class helps to build, train and deploy a Amazon Lookout for Vision
project. It implements the three most common methods for model deployment:
# - .fit()
# - .deploy()
# - .predict()
Attributes:
project_name The name of the Amazon Lookout for Vision project.
lv The Amazon Lookout for Vision boto3 client.
model_version The (initial) model version.
"""
def __init__(self, project_name, model_version="1"):
"""Build, train and deploy Amazon Lookout for Vision models.
Technical documentation on how Amazon Lookout for Vision works can be
found at: https://aws.amazon.com/lookout-for-vision/
Args:
project_name (str): Name of the Amazon Lookout for Vision project to interact with.
model_version (str): The (initial) model version.
"""
# super(LookoutForVision, self).__init__()
self.project_name = project_name
self.lv = boto3.client("lookoutvision")
self.s3 = boto3.client("s3")
self.model_version = model_version
self.describe_project()
@classmethod
def _get_param_names(self):
"""Internal get parameter names helper.
It will retrieve all the parameters used within your class.
Args:
None
Returns:
list: all class parameters
"""
init = getattr(self.__init__, "deprecated_original", self.__init__)
init_signature = inspect.signature(init)
parameters = [p for p in init_signature.parameters.values() if p.name != "self" and p.kind != p.VAR_KEYWORD]
return sorted([p.name for p in parameters])
def describe_project(self):
"""Describe a project.
Args:
None
Returns:
json: The project details
"""
project = {}
# First try to describe the project given by the name:
try:
project = self.lv.describe_project(ProjectName=self.project_name)["ProjectDescription"]["ProjectArn"]
print("Project already exists with arn: " + project)
except Exception as e:
if "ResourceNotFoundException" in str(e):
print(
f"Project {self.project_name} does not exist yet...use the create_project() method to set up your first project"
)
else:
raise Exception
return project
def create_project(self):
"""Create a project.
Args:
None
Returns:
json: The project details
"""
project = {}
# First try to create a new project:
try:
project = self.lv.create_project(ProjectName=self.project_name)["ProjectMetadata"]["ProjectArn"]
print(f"Creating the project: {self.project_name}")
except Exception as e:
if "ConflictException" in str(e):
project = self.lv.describe_project(ProjectName=self.project_name)["ProjectDescription"]["ProjectArn"]
else:
raise Exception
return project
def get_params(self, deep=True):
"""Get class parameters.
Args:
deep (bool): Make a deep copy of parameters for output.
Returns:
json: an object with the internal parameters and their values.
"""
output = dict()
for key in self._get_param_names():
value = getattr(self, key)
if deep and hasattr(value, "get_params"):
deep_items = value.get_params().items()
output.update((key + "__" + i, val) for i, val in deep_items)
output[key] = value
return output
def set_params(self, **params):
"""Set class parameters.
Args:
**params (dict): New parameters in key=value format.
Returns:
self: the class itself
"""
if not params:
return self
valid = self.get_params(deep=True)
nested = defaultdict(dict)
for key, value in params.items():
key, delim, sub = key.partition("__")
if key not in valid:
raise ValueError(
"Invalid parameter %s for class %s. "
"Check the list of available parameters "
"with `cls.get_params().keys()`." % (key, self)
)
if delim:
nested[key][sub] = value
else:
setattr(self, key, value)
valid[key] = value
for key, sub_params in nested.items():
valid[key].set_params(**sub_params)
return self
def update_datasets(self, dataset_type, wait=True):
"""Create a dataset.
Args:
dataset_type (dict): A setting from where to get input data from
Format of this input is of type:
"train": {
"bucket": "my_s3_bucket",
"key": "training.manifest",
"version": "1"
},
"test": {
"bucket": "my_s3_bucket",
"key": "validation.manifest",
"version": "1"
}
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
# For each dataset possible...
p = self.lv.describe_project(ProjectName=self.project_name)
for item in p["ProjectDescription"]["Datasets"]:
dtype = item["DatasetType"]
try:
self.lv.delete_dataset(ProjectName=self.project_name, DatasetType=dtype)
except Exception as e:
print("Error in dataset deletion with exception: {}".format(e))
print("Please check CloudWatch logs for troubleshooting!")
return self.create_datasets(dataset_type=dataset_type, wait=wait)
def create_datasets(self, dataset_type, wait=True):
"""Create a dataset.
Args:
dataset_type (dict): A setting from where to get input data from
Format of this input is of type:
"train": {
"bucket": "my_s3_bucket",
"key": "training.manifest",
"version": "1"
},
"test": {
"bucket": "my_s3_bucket",
"key": "validation.manifest",
"version": "1"
}
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
datasets = {}
# For each dataset used...
for key in dataset_type:
# ...create a dataset
d_type = "train" if (key == "training" or key == "train") else "test"
try:
dataset = self.lv.create_dataset(
ProjectName=self.project_name,
DatasetType=d_type,
DatasetSource={
"GroundTruthManifest": {
"S3Object": {"Bucket": dataset_type[key]["bucket"], "Key": dataset_type[key]["key"]}
}
},
)["DatasetMetadata"]
# Log output
datasets[key] = dataset
except Exception as e:
if "ConflictException" in str(e):
print("Dataset already existed in the project")
print("If the dataset already exists try updating it with: update_datasets")
else:
print("Error in create_datasets with exception: {}".format(e))
raise Exception
return datasets
# Notify user when creation is done:
print("Creating dataset(s):", end=" ")
if wait:
stati = ["CREATE_IN_PROGRESS"]
while (np.array(stati) == "CREATE_IN_PROGRESS").any():
stati = []
for key in dataset_type:
d_type = "train" if (key == "training" or key == "train") else "test"
stat = self.lv.describe_dataset(ProjectName=self.project_name, DatasetType=d_type)[
"DatasetDescription"
]
stati.append(stat["Status"])
datasets[key]["Status"] = stat["Status"]
datasets[key]["StatusMessage"] = stat["StatusMessage"]
if stat["Status"] == "CREATE_FAILED":
warnings.warn(
"Failed to create dataset {} with status message: {}.".format(key, stat["StatusMessage"])
)
print("-", end="")
time.sleep(5)
print("!")
return datasets
def fit(self, output_bucket, model_prefix=None, train_and_test=True, wait=True):
"""Train the model.
Create a model from the datasets
At first check whether the minimum no of images are available to train the model.
There should be min 20 normal and 10 anomalous images in training/train dataset.
Args:
output_bucket (str): The output S3 bucket to be used for model logging.
model_prefix (str): Optional to add a model prefix name for logging.
train_and_test (bool): Whether to us train or train and test set
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
ready_to_go = True
test_dataset = {"Status": "No test dataset used!"}
train_dataset = self.lv.describe_dataset(ProjectName=self.project_name, DatasetType="train")[
"DatasetDescription"
]
normal_no_images_train = train_dataset["ImageStats"]["Normal"]
anomaly_no_images_train = train_dataset["ImageStats"]["Anomaly"]
try:
if train_and_test:
test_dataset = self.lv.describe_dataset(ProjectName=self.project_name, DatasetType="test")[
"DatasetDescription"
]
normal_no_images_test = test_dataset["ImageStats"]["Normal"]
anomaly_no_images_test = test_dataset["ImageStats"]["Anomaly"]
if normal_no_images_train < 10 and normal_no_images_test < 10 and anomaly_no_images_test < 10:
ready_to_go = False
else:
if normal_no_images_train < 20 and anomaly_no_images_train < 10:
ready_to_go = False
except Exception as e:
if "ResourceNotFoundException" in str(e):
print(
"There is no Test Dataset, hence model will be trained with Training Dataset only and can not be validated with Test Dataset"
)
if ready_to_go:
try:
model = self.lv.create_model(
ProjectName=self.project_name,
OutputConfig={
"S3Location": {
"Bucket": output_bucket,
"Prefix": model_prefix if model_prefix is not None else "",
}
},
)["ModelMetadata"]
if wait:
print("Model training started:", end=" ")
version = model["ModelVersion"]
status = model["Status"]
while status not in ["TRAINED", "TRAINING_FAILED"]:
update = self.lv.describe_model(ProjectName=self.project_name, ModelVersion=version)[
"ModelDescription"
]
status = update["Status"]
print("-", end="")
time.sleep(60)
print("!")
else:
print(
"""Model is being created. Training will take a while.\n
Please check your Management Console on progress.\n
You can continue with deployment once the model is trained.\n
"""
)
# Return success
return {
"status": "Success!",
"project": self.project_name,
"train_datasets": train_dataset,
"test_datasets": test_dataset,
"model": model,
}
except Exception as e:
if "ServiceQuotaExceededException" in str(e):
print(
"You've reached the limit (2) for concurrent model trainings. Try again after at least one model has finished training. You can also request a limit increase. "
)
else:
print(
"""Number of images is not sufficient, at least 20 normal and 10 anomaly\n
imgages are required for training images
"""
)
return {
"status": "Failure!",
"project": self.project_name,
"train_datasets": train_dataset,
"test_datasets": test_dataset,
"model": None,
}
def deploy(self, min_inf_units=1, model_version=None, wait=True):
"""Deploy your model.
Args:
min_inf_units (int): Minimal number of inference units.
model_version (str): The model version to deploy.
wait (bool): Either to wait in the console uppon succes or escape function
Returns:
json: an object with metadata on success
"""
# Check the current status of the model
current_status = self.lv.describe_model(
ProjectName=self.project_name, ModelVersion=self.model_version if model_version is None else model_version
)["ModelDescription"]["Status"]
# If model status is TRAINED , then only start the model.
# otherwise print the message with actual status that it can not be started
if current_status != "TRAINED":
print(
"current model with version {} is in the status {}, hence it can not be started/hosted. The model needs to be in TRAINED status to be started/hosted".format(
self.model_version if model_version is None else model_version, current_status
)
)
else:
# Start the model either using the internal model version
# or use the one supplied to this method:
status = self.lv.start_model(
ProjectName=self.project_name,
ModelVersion=self.model_version if model_version is None else model_version,
MinInferenceUnits=min_inf_units,
)["Status"]
# Wait until model is trained:
print("Model will be hosted now")
if wait:
while status not in ["HOSTED", "HOSTING_FAILED"]:
status = self.lv.describe_model(
ProjectName=self.project_name,
ModelVersion=self.model_version if model_version is None else model_version,
)["ModelDescription"]["Status"]
print("-", end="")
time.sleep(60)
print("!")
print("Your model is now hosted!")
# Return success:
return {
"status": status,
"model": self.lv.describe_model(
ProjectName=self.project_name,
ModelVersion=self.model_version if model_version is None else model_version,
)["ModelDescription"],
}
def predict(self, model_version=None, local_file="", bucket="", key="", content_type="image/jpeg"):
"""Predict using your Amazon Lookout for Vision model.
You can either predict from S3 object or local images.
Args:
model_version (str): The model version to deploy.
local_file (str): Path to local image.
bucket (str): S3 bucket name.
key (str): Object in S3 bucket.
content_type (str): Either "image/jpeg" or "image/png".
Returns:
json: an object with results of prediction
"""
# If no paths are set return warning:
if local_file == "" and bucket == "" and key == "":
print("Warning: either local_file OR bucket & key need to be present!")
return {"Source": {"Type": "warning"}, "IsAnomalous": None, "Confidence": -1.0}
# If paths for local file AND S3 bucket are set return another warning:
if local_file != "" and bucket != "" and key != "":
print("Warning: either local_file OR bucket & key need to be present!")
return {"Source": {"Type": "warning"}, "IsAnomalous": None, "Confidence": -1.0}
# If method is used properly then...
obj = None
if local_file != "":
# ...set obj to bytearray using local image...
try:
with open(local_file, "rb") as image:
f = image.read()
obj = bytearray(f)
except IsADirectoryError:
print("Warning: you specified a directory, instead of a single file path")
print("Maybe you would like to try '_batch_predict_local' or 'batch_predict_s3' method!")
return
elif bucket != "" and key != "":
# ...or a byte object by pulling from S3
obj = boto3.client("s3").get_object(Bucket=bucket, Key=key)["Body"].read()
else:
# If file not found:
print("Warning: No file found!")
return {"Source": {"Type": "warning"}, "IsAnomalous": None, "Confidence": -1.0}
# Predict using your model:
result = self.lv.detect_anomalies(
ProjectName=self.project_name,
ModelVersion=self.model_version if model_version is None else model_version,
Body=obj,
ContentType=content_type,
)["DetectAnomalyResult"]
return result
def _batch_predict_local(self, local_path, model_version=None, content_type="image/jpeg"):
"""Predict for all the images using your Amazon Lookout for Vision model
from S3 objects.
Args:
local_path (str): Path to local images.
model_version (str): The model version to deploy.
content_type (str): Either "image/jpeg" or "image/png".
Returns:
json: s3 objects with location which stores the results of prediction
"""
predictions = []
files = os.listdir(local_path)
for file in files:
filename = "{}/{}".format(local_path, file)
# ...set obj to bytearray using local image...
with open(filename, "rb") as image:
f = image.read()
obj = bytearray(f)
try:
# Predict using your model:
result = self.lv.detect_anomalies(
ProjectName=self.project_name,
ModelVersion=self.model_version if model_version is None else model_version,
Body=obj,
ContentType=content_type,
)["DetectAnomalyResult"]
predictions.append(result)
except Exception as e:
print("Warning: prediction failed for file: {}".format(filename))
print("with error message: {}".format(e))
return {"status": "Success!", "predicted_result": predictions}
def batch_predict(
self,
model_version=None,
local_path=None,
input_bucket="",
input_prefix=None,
output_bucket="",
output_prefix=None,
content_type="image/jpeg",
):
"""Predict for all the images using your Amazon Lookout for Vision model
from S3 objects.
Args:
model_version (str): The model version to deploy.
local_path (str): Path to local images.
input_bucket (str): S3 bucket name for input images.
input_prefix(str): S3 folder names ( if any) for input bucket location
output_bucket (str): S3 bucket name to store predicted results.
output_prefix(str): S3 folder names ( if any) for output bucket location to store predicted results
content_type (str): Either "image/jpeg" or "image/png".
Returns:
json: s3 objects with location which stores the results of prediction
"""
if local_path is not None:
return self._batch_predict_local(
local_path=local_path, model_version=model_version, content_type=content_type
)
# If no input bucket is set return warning:
if input_bucket == "":
print("Warning: S3 bucket need to be present for input images!")
return {"status": "Error!", "predicted_result": None}
# If no output bucket is set return warning:
if output_bucket == "":
print("Warning: S3 bucket need to be present to load prediction result!")
return {"status": "Error!", "predicted_result": None}
obj = None
if input_bucket != "":
success = {}
kwargs = {"Bucket": input_bucket}
if input_prefix is not None:
if isinstance(input_prefix, str):
kwargs["Prefix"] = input_prefix
else:
kwargs["Prefix"] = str(input_prefix)
paginator = self.s3.get_paginator("list_objects_v2")
pages = paginator.paginate(**kwargs)
# Try...
try:
for page in pages:
for obj in page["Contents"]:
key = obj["Key"]
if key[-1] == "/":
continue
body = self.s3.get_object(Bucket=input_bucket, Key=key)["Body"].read()
file_name = key.split("/")[-1]
# Predict using your model:
result = self.lv.detect_anomalies(
ProjectName=self.project_name,
ModelVersion=self.model_version if model_version is None else model_version,
Body=body,
ContentType=content_type,
)["DetectAnomalyResult"]
# Upload the manifest to S3
self.s3.put_object(
Bucket=output_bucket,
Key=output_prefix + "{}.json".format(file_name),
Body=json.dumps(result),
ServerSideEncryption="AES256",
)
success = {
"output_bucket": output_bucket,
"prdected_file_key": output_prefix + "{}.json".format(file_name),
}
print("Predicted output is uploaded to s3 :" + json.dumps(success))
except Exception as e:
logging.error(e)
print("Key object corresponding to error :" + key)
return {"status": "Success!", "predicted_result": "s3://{}/{}".format(output_bucket, output_prefix)}
else:
# If file not found:
print("Warning: No file found!")
return {"status": "Error!", "predicted_result": None}
def stop_model(self, model_version=None, wait=True):
"""Stop deployed model.
Args:
model_version (str): The model version to deploy.
Returns:
json: an object with results of prediction
"""
response = {}
try:
# Stop the model
ModelVersion = self.model_version if model_version is None else model_version
print("Stopping model version " + ModelVersion + " for project " + self.project_name)
response = self.lv.stop_model(ProjectName=self.project_name, ModelVersion=ModelVersion)
status = response["Status"]
print("Model will be stopped now")
if wait:
while status != "TRAINED":
status = self.lv.describe_model(ProjectName=self.project_name, ModelVersion=ModelVersion)[
"ModelDescription"
]["Status"]
print("-", end="")
time.sleep(5)
print("!")
print("Your model is now stopped!")
print("Status: " + response["Status"])
except Exception as e:
response["Error"] = e
print("Something went wrong: ", e)
return response
def train_one_fold(
self,
input_bucket: str,
output_bucket: str,
s3_path: str,
model_prefix: str,
i_split: int,
delete_kfold_projects: bool = True,
):
"""
Train one of k folds by creating for each fold a separate project with it's i-th fold dataset.
Arguments:
input_bucket (str): S3 bucket name for input images.
output_bucket (str): The output S3 bucket to be used for model evaluation results.
s3_path (str): S3 path containing the k different splits
model_prefix (str): Optional to add a model prefix name for the evaluation results.
i_split (int): number of the i-th split. This is a number with 0 <= i_split <= n_splits
delete_kfold_projects (bool): delete projects which were created for k-fold cross validation
Returns:
response (dict): the response of the model fit call
"""
s3_path = s3_path + "/" if not s3_path.endswith("/") else s3_path
mft = Manifest(
bucket=input_bucket, s3_path=f"{s3_path}{self.project_name}_{i_split}/", datasets=["training", "validation"]
)
mft_resp = mft.push_manifests()
l4v = LookoutForVision(project_name=f"{self.project_name}_{i_split}")
# If project does not exist: create it
l4v.create_project()
# based on the manifest files in S3 create your Lookout for Vision datasets:
l4v.create_datasets(mft_resp, wait=True)
try:
# train the model
response = l4v.fit(output_bucket=output_bucket, model_prefix=model_prefix, wait=True)
finally:
if delete_kfold_projects:
self.delete_lookoutvision_project(f"{self.project_name}_{i_split}")
return response
def train_k_fold(
self,
input_bucket: str,
output_bucket: str,
s3_path: str,
n_splits: int,
parallel_training: bool = True,
delete_kfold_projects: bool = True,
):
"""
Train k folds by creating for each fold a separate project and returning the evaluation results
Arguments:
input_bucket (str): S3 bucket name for input images.
output_bucket (str): The output S3 bucket to be used for model evaluation results.
s3_path (str): S3 path containing the k different splits
n_splits (int): number of splits within the k-fold cross validation. n_splits = k in that regard.
parallel_training (bool): boolean to do parallel training (True) or in sequence (False)
delete_kfold_projects (bool): delete projects which were created for k-fold cross validation
Returns:
kfold_summary (pd.DataFrame): the summary of the evaluation results of all the k models
"""
model_prefix = "k_fold_"
if not parallel_training:
responses = []
for i_split in range(n_splits):
response = self.train_one_fold(
input_bucket=input_bucket,
output_bucket=output_bucket,
s3_path=s3_path,
model_prefix=model_prefix,
i_split=i_split,
delete_kfold_projects=delete_kfold_projects,
)
responses.append(response)
else:
parallel_loop_input = []
for i_split in range(n_splits):
parallel_loop_input.append(
(input_bucket, output_bucket, s3_path, model_prefix, i_split, delete_kfold_projects)
)
try:
pool = ThreadPool(processes=2)
pool.starmap(self.train_one_fold, parallel_loop_input)
finally:
pool.close()
pool.join()
met = Metrics(self.project_name)
kfold_summary = met.k_fold_model_summary(output_bucket, model_prefix, n_splits)
return kfold_summary
def delete_lookoutvision_project(self, project_name: str):
"""
Deletes the whole project including the datasets and model.
Arguments:
project_name (str): project name to delete
Returns:
response (dict): response contains a field "Success" telling whether the deletion was successful or not
if not an Error is provided in an additional field "Error"
"""
response = {}
try:
print(f"Deleting project {project_name}.")
for dataset_type in ["train", "test"]:
if (
len(
self.lv.list_dataset_entries(ProjectName=project_name, DatasetType=dataset_type)[
"DatasetEntries"
]
)
> 0
):
self.lv.delete_dataset(ProjectName=project_name, DatasetType=dataset_type)
if len(self.lv.list_models(ProjectName=project_name)["Models"]) > 0:
self.lv.delete_model(ProjectName=project_name, ModelVersion="1")
while len(self.lv.list_models(ProjectName=project_name)["Models"]) > 0:
print("-", end="")
time.sleep(5)
self.lv.delete_project(ProjectName=project_name)
print("!")
response["Success"] = True
except Exception as e:
response["Error"] = e
response["success"] = False
print("Something went wrong: ", e)
return response
| [
"os.listdir",
"boto3.client",
"json.dumps",
"inspect.signature",
"lookoutvision.metrics.Metrics",
"time.sleep",
"multiprocessing.pool.ThreadPool",
"numpy.array",
"lookoutvision.manifest.Manifest",
"collections.defaultdict",
"logging.error"
] | [((2035, 2064), 'boto3.client', 'boto3.client', (['"""lookoutvision"""'], {}), "('lookoutvision')\n", (2047, 2064), False, 'import boto3\n'), ((2084, 2102), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (2096, 2102), False, 'import boto3\n'), ((2564, 2587), 'inspect.signature', 'inspect.signature', (['init'], {}), '(init)\n', (2581, 2587), False, 'import inspect\n'), ((5229, 5246), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (5240, 5246), False, 'from collections import defaultdict\n'), ((20582, 20604), 'os.listdir', 'os.listdir', (['local_path'], {}), '(local_path)\n', (20592, 20604), False, 'import os\n'), ((28092, 28219), 'lookoutvision.manifest.Manifest', 'Manifest', ([], {'bucket': 'input_bucket', 's3_path': 'f"""{s3_path}{self.project_name}_{i_split}/"""', 'datasets': "['training', 'validation']"}), "(bucket=input_bucket, s3_path=\n f'{s3_path}{self.project_name}_{i_split}/', datasets=['training',\n 'validation'])\n", (28100, 28219), False, 'from lookoutvision.manifest import Manifest\n'), ((30998, 31024), 'lookoutvision.metrics.Metrics', 'Metrics', (['self.project_name'], {}), '(self.project_name)\n', (31005, 31024), False, 'from lookoutvision.metrics import Metrics\n'), ((10241, 10254), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (10251, 10254), False, 'import time\n'), ((30804, 30827), 'multiprocessing.pool.ThreadPool', 'ThreadPool', ([], {'processes': '(2)'}), '(processes=2)\n', (30814, 30827), False, 'from multiprocessing.pool import ThreadPool\n'), ((32450, 32463), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (32460, 32463), False, 'import time\n'), ((17079, 17093), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (17089, 17093), False, 'import time\n'), ((25322, 25338), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (25335, 25338), False, 'import logging\n'), ((26714, 26727), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (26724, 26727), False, 'import time\n'), ((9372, 9387), 'numpy.array', 'np.array', (['stati'], {}), '(stati)\n', (9380, 9387), True, 'import numpy as np\n'), ((13427, 13441), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (13437, 13441), False, 'import time\n'), ((24847, 24865), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (24857, 24865), False, 'import json\n'), ((25246, 25265), 'json.dumps', 'json.dumps', (['success'], {}), '(success)\n', (25256, 25265), False, 'import json\n'), ((19422, 19440), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (19434, 19440), False, 'import boto3\n')] |
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
import numpy as np
import pandas as pd
def get_splits(dataset_size):
return int(np.round(dataset_size / (0.2 * dataset_size)))
def append_values(y, split_id):
df = pd.DataFrame({f"split_{split_id}": y}).transpose()
df.columns = [f"y_{i}" for i in range(df.shape[1])]
return df
df = pd.read_csv(snakemake.input[0], index_col=0)
X, y = df.iloc[:, :-1].values, df["y"].values
brf = \
RandomForestClassifier(n_estimators=100, random_state=0)
cv = \
RepeatedStratifiedKFold(n_splits=get_splits(df.shape[0]),
n_repeats=10, random_state=42)
df_y_true, df_y_pred, df_y_prob, df_imp, df_seqs = \
pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()
for i, (train_index, test_index) in enumerate(cv.split(X, y)):
X_train, y_train = X[train_index], y[train_index]
X_test, y_test = X[test_index], y[test_index]
df_seqs = pd.concat([df_seqs, append_values(df.index[test_index], i)])
df_y_true = pd.concat([df_y_true, append_values(y_test, i)])
brf.fit(X_train, y_train)
df_imp_tmp = pd.DataFrame({f"fold_{i}": brf.feature_importances_})
df_imp = pd.concat([df_imp, df_imp_tmp.transpose()])
y_pred_class = brf.predict(X_test)
df_y_pred = pd.concat([df_y_pred, append_values(y_pred_class, i)])
y_pred_proba = brf.predict_proba(X_test)
df_y_prob = pd.concat([df_y_prob, append_values(y_pred_proba[:, 1], i)])
df_y_true.to_csv(snakemake.output[0])
df_y_pred.to_csv(snakemake.output[1])
df_y_prob.to_csv(snakemake.output[2])
df_imp.to_csv(snakemake.output[3])
df_seqs.to_csv(snakemake.output[4])
| [
"pandas.DataFrame",
"numpy.round",
"sklearn.ensemble.RandomForestClassifier",
"pandas.read_csv"
] | [((417, 461), 'pandas.read_csv', 'pd.read_csv', (['snakemake.input[0]'], {'index_col': '(0)'}), '(snakemake.input[0], index_col=0)\n', (428, 461), True, 'import pandas as pd\n'), ((521, 577), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'random_state': '(0)'}), '(n_estimators=100, random_state=0)\n', (543, 577), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((765, 779), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (777, 779), True, 'import pandas as pd\n'), ((781, 795), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (793, 795), True, 'import pandas as pd\n'), ((797, 811), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (809, 811), True, 'import pandas as pd\n'), ((813, 827), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (825, 827), True, 'import pandas as pd\n'), ((829, 843), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (841, 843), True, 'import pandas as pd\n'), ((1203, 1256), 'pandas.DataFrame', 'pd.DataFrame', (["{f'fold_{i}': brf.feature_importances_}"], {}), "({f'fold_{i}': brf.feature_importances_})\n", (1215, 1256), True, 'import pandas as pd\n'), ((199, 244), 'numpy.round', 'np.round', (['(dataset_size / (0.2 * dataset_size))'], {}), '(dataset_size / (0.2 * dataset_size))\n', (207, 244), True, 'import numpy as np\n'), ((289, 327), 'pandas.DataFrame', 'pd.DataFrame', (["{f'split_{split_id}': y}"], {}), "({f'split_{split_id}': y})\n", (301, 327), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed May 23 16:54:52 2018
@author: tommy_mizuki
"""
def my_func(x,y):
import numpy as np
element0 = np.cos(x) + 10*y
element1 = np.sin(x) - 20*y
A = [element0,element1]
print(A)
| [
"numpy.sin",
"numpy.cos"
] | [((148, 157), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (154, 157), True, 'import numpy as np\n'), ((180, 189), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (186, 189), True, 'import numpy as np\n')] |
#-*- encoding:utf8 -*-
import os
import time
import pickle
import torch as t
import numpy as np
from torch.utils import data
import nibabel as nib
class dataSet_V1(data.Dataset):
def __init__(self,sequences_file=None,label_file=None,gos_gile=None,embeddings_file=None, PPI_embedding_file=None):
super(dataSet_V1,self).__init__()
with open(sequences_file,"rb") as fp_seq:
self.all_sequences = pickle.load(fp_seq)
with open(label_file,"rb") as fp_label:
self.all_labels = pickle.load(fp_label)
with open(gos_gile, "rb") as fp_gos:
self.all_gos = pickle.load(fp_gos)
self.embeddings = np.load(embeddings_file)
self.lookup_matrix = self.embeddings
if PPI_embedding_file:
with open(PPI_embedding_file,"rb") as fp_emb:
self.all_net_embeddings = pickle.load(fp_emb,encoding="bytes")
else:
self.all_net_embeddings = None
def __getitem__(self,index):
feavalue_value = []
for idx in self.all_sequences[index][:1000]:
feavalue_value.append(self.lookup_matrix[idx])
feavalue_value = np.stack(feavalue_value)
label = self.all_labels[index]
label = np.array(label,dtype=np.float32)
gos = self.all_gos[index]
if self.all_net_embeddings:
embedding = np.array(self.all_net_embeddings[index],dtype=np.float32)
return feavalue_value,embedding,label,gos
else:
return feavalue_value, label,gos
def __len__(self):
return len(self.all_sequences)
class dataSet_V2(data.Dataset):
def __init__(self,sequences_file=None,label_file=None,gos_gile=None,embeddings_file=None, PPI_embedding_file=None,interpro_file=None):
super(dataSet_V2,self).__init__()
with open(sequences_file,"rb") as fp_seq:
self.all_sequences = pickle.load(fp_seq)
with open(label_file,"rb") as fp_label:
self.all_labels = pickle.load(fp_label)
with open(gos_gile, "rb") as fp_gos:
self.all_gos = pickle.load(fp_gos)
with open(interpro_file,"rb") as fp_pro:
self.inter_pro = pickle.load(fp_pro)
self.embeddings = np.load(embeddings_file)
self.lookup_matrix = self.embeddings
if PPI_embedding_file:
with open(PPI_embedding_file,"rb") as fp_emb:
self.all_net_embeddings = pickle.load(fp_emb,encoding="bytes")
else:
self.all_net_embeddings = None
def __getitem__(self,index):
feavalue_value = []
for idx in self.all_sequences[index][:1000]:
feavalue_value.append(self.lookup_matrix[idx])
feavalue_value = np.stack(feavalue_value)
label = self.all_labels[index]
label = np.array(label,dtype=np.float32)
gos = self.all_gos[index]
no_zero_index = self.inter_pro[index]
interpro_info = np.zeros(35020,dtype=np.float32)
interpro_info[no_zero_index] = 1
if self.all_net_embeddings:
embedding = np.array(self.all_net_embeddings[index],dtype=np.float32)
return feavalue_value,embedding,interpro_info,label,gos
else:
return feavalue_value, label,gos
def __len__(self):
return len(self.all_sequences)
class dataSet_V3(data.Dataset):
def __init__(self,label_file=None,gos_gile=None, PPI_embedding_file=None,interpro_file=None):
super(dataSet_V3,self).__init__()
with open(label_file,"rb") as fp_label:
self.all_labels = pickle.load(fp_label)
with open(gos_gile, "rb") as fp_gos:
self.all_gos = pickle.load(fp_gos)
with open(interpro_file,"rb") as fp_pro:
self.inter_pro = pickle.load(fp_pro)
if PPI_embedding_file:
with open(PPI_embedding_file,"rb") as fp_emb:
self.all_net_embeddings = pickle.load(fp_emb,encoding="bytes")
else:
self.all_net_embeddings = None
def __getitem__(self,index):
feavalue_value = [[0],[0]]
feavalue_value = np.stack(feavalue_value)
label = self.all_labels[index]
label = np.array(label,dtype=np.float32)
gos = self.all_gos[index]
no_zero_index = self.inter_pro[index]
interpro_info = np.zeros(35020,dtype=np.float32)
interpro_info[no_zero_index] = 1
if self.all_net_embeddings:
embedding = np.array(self.all_net_embeddings[index],dtype=np.float32)
return feavalue_value,embedding,interpro_info,label,gos
else:
return feavalue_value, label,gos
def __len__(self):
return len(self.all_labels)
| [
"pickle.load",
"numpy.stack",
"numpy.array",
"numpy.zeros",
"numpy.load"
] | [((668, 692), 'numpy.load', 'np.load', (['embeddings_file'], {}), '(embeddings_file)\n', (675, 692), True, 'import numpy as np\n'), ((1189, 1213), 'numpy.stack', 'np.stack', (['feavalue_value'], {}), '(feavalue_value)\n', (1197, 1213), True, 'import numpy as np\n'), ((1279, 1312), 'numpy.array', 'np.array', (['label'], {'dtype': 'np.float32'}), '(label, dtype=np.float32)\n', (1287, 1312), True, 'import numpy as np\n'), ((2310, 2334), 'numpy.load', 'np.load', (['embeddings_file'], {}), '(embeddings_file)\n', (2317, 2334), True, 'import numpy as np\n'), ((2822, 2846), 'numpy.stack', 'np.stack', (['feavalue_value'], {}), '(feavalue_value)\n', (2830, 2846), True, 'import numpy as np\n'), ((2903, 2936), 'numpy.array', 'np.array', (['label'], {'dtype': 'np.float32'}), '(label, dtype=np.float32)\n', (2911, 2936), True, 'import numpy as np\n'), ((3041, 3074), 'numpy.zeros', 'np.zeros', (['(35020)'], {'dtype': 'np.float32'}), '(35020, dtype=np.float32)\n', (3049, 3074), True, 'import numpy as np\n'), ((4285, 4309), 'numpy.stack', 'np.stack', (['feavalue_value'], {}), '(feavalue_value)\n', (4293, 4309), True, 'import numpy as np\n'), ((4366, 4399), 'numpy.array', 'np.array', (['label'], {'dtype': 'np.float32'}), '(label, dtype=np.float32)\n', (4374, 4399), True, 'import numpy as np\n'), ((4506, 4539), 'numpy.zeros', 'np.zeros', (['(35020)'], {'dtype': 'np.float32'}), '(35020, dtype=np.float32)\n', (4514, 4539), True, 'import numpy as np\n'), ((426, 445), 'pickle.load', 'pickle.load', (['fp_seq'], {}), '(fp_seq)\n', (437, 445), False, 'import pickle\n'), ((526, 547), 'pickle.load', 'pickle.load', (['fp_label'], {}), '(fp_label)\n', (537, 547), False, 'import pickle\n'), ((621, 640), 'pickle.load', 'pickle.load', (['fp_gos'], {}), '(fp_gos)\n', (632, 640), False, 'import pickle\n'), ((1408, 1466), 'numpy.array', 'np.array', (['self.all_net_embeddings[index]'], {'dtype': 'np.float32'}), '(self.all_net_embeddings[index], dtype=np.float32)\n', (1416, 1466), True, 'import numpy as np\n'), ((1969, 1988), 'pickle.load', 'pickle.load', (['fp_seq'], {}), '(fp_seq)\n', (1980, 1988), False, 'import pickle\n'), ((2069, 2090), 'pickle.load', 'pickle.load', (['fp_label'], {}), '(fp_label)\n', (2080, 2090), False, 'import pickle\n'), ((2164, 2183), 'pickle.load', 'pickle.load', (['fp_gos'], {}), '(fp_gos)\n', (2175, 2183), False, 'import pickle\n'), ((2263, 2282), 'pickle.load', 'pickle.load', (['fp_pro'], {}), '(fp_pro)\n', (2274, 2282), False, 'import pickle\n'), ((3178, 3236), 'numpy.array', 'np.array', (['self.all_net_embeddings[index]'], {'dtype': 'np.float32'}), '(self.all_net_embeddings[index], dtype=np.float32)\n', (3186, 3236), True, 'import numpy as np\n'), ((3739, 3760), 'pickle.load', 'pickle.load', (['fp_label'], {}), '(fp_label)\n', (3750, 3760), False, 'import pickle\n'), ((3834, 3853), 'pickle.load', 'pickle.load', (['fp_gos'], {}), '(fp_gos)\n', (3845, 3853), False, 'import pickle\n'), ((3933, 3952), 'pickle.load', 'pickle.load', (['fp_pro'], {}), '(fp_pro)\n', (3944, 3952), False, 'import pickle\n'), ((4640, 4698), 'numpy.array', 'np.array', (['self.all_net_embeddings[index]'], {'dtype': 'np.float32'}), '(self.all_net_embeddings[index], dtype=np.float32)\n', (4648, 4698), True, 'import numpy as np\n'), ((870, 907), 'pickle.load', 'pickle.load', (['fp_emb'], {'encoding': '"""bytes"""'}), "(fp_emb, encoding='bytes')\n", (881, 907), False, 'import pickle\n'), ((2512, 2549), 'pickle.load', 'pickle.load', (['fp_emb'], {'encoding': '"""bytes"""'}), "(fp_emb, encoding='bytes')\n", (2523, 2549), False, 'import pickle\n'), ((4086, 4123), 'pickle.load', 'pickle.load', (['fp_emb'], {'encoding': '"""bytes"""'}), "(fp_emb, encoding='bytes')\n", (4097, 4123), False, 'import pickle\n')] |
import numpy as np
import cv2
import random
import tqdm
import utils
import os
DAVIS17_TRAINING_VIDEOS = [
'bear', 'bmx-bumps', 'boat', 'boxing-fisheye', 'breakdance-flare', 'bus',
'car-turn', 'cat-girl', 'classic-car', 'color-run', 'crossing',
'dance-jump', 'dancing', 'disc-jockey', 'dog-agility', 'dog-gooses',
'dogs-scale', 'drift-turn', 'drone', 'elephant', 'flamingo', 'hike',
'hockey', 'horsejump-low', 'kid-football', 'kite-walk', 'koala',
'lady-running', 'lindy-hop', 'longboard', 'lucia', 'mallard-fly',
'mallard-water', 'miami-surf', 'motocross-bumps', 'motorbike', 'night-race',
'paragliding', 'planes-water', 'rallye', 'rhino', 'rollerblade',
'schoolgirls', 'scooter-board', 'scooter-gray', 'sheep', 'skate-park',
'snowboard', 'soccerball', 'stroller', 'stunt', 'surf', 'swing', 'tennis',
'tractor-sand', 'train', 'tuk-tuk', 'upside-down', 'varanus-cage', 'walking'
]
DAVIS17_VALIDATION_VIDEOS = [
'bike-packing', 'blackswan', 'bmx-trees', 'breakdance', 'camel',
'car-roundabout', 'car-shadow', 'cows', 'dance-twirl', 'dog', 'dogs-jump',
'drift-chicane', 'drift-straight', 'goat', 'gold-fish', 'horsejump-high',
'india', 'judo', 'kite-surf', 'lab-coat', 'libby', 'loading', 'mbike-trick',
'motocross-jump', 'paragliding-launch', 'parkour', 'pigs', 'scooter-black',
'shooting', 'soapbox'
]
def get_img_paths(difficulty, date_path, train_or_val=None):
num_frames = utils.DIFFICULTY_NUM_VIDEOS[difficulty]
if train_or_val is None:
dataset_images = sorted(os.listdir(date_path))
elif train_or_val in ['trian', 'training']:
dataset_images = DAVIS17_TRAINING_VIDEOS
elif train_or_val in ['val', 'validation']:
dataset_images = DAVIS17_VALIDATION_VIDEOS
else:
raise Exception("train_or_val %s not defined." % train_or_val)
image_paths = [os.path.join(date_path, subdir) for subdir in dataset_images]
if num_frames is not None:
if num_frames > len(image_paths) or num_frames < 0:
raise ValueError(f'`num_bakground_paths` is {num_frames} but should not be larger than the '
f'number of available background paths ({len(image_paths)}) and at least 0.')
image_paths = image_paths[:num_frames]
return image_paths
class ImageSource(object):
def get_image(self):
pass
def reset(self):
pass
class RandomColorSource(ImageSource):
def __init__(self, shape):
self.shape = shape
self.bg = np.zeros((self.shape[0], self.shape[1], 3))
self.reset()
def reset(self):
self._color = np.random.randint(0, 256, size=(3,))
self.bg[:, :] = self._color
def get_image(self, obs):
self.bg = cv2.resize(self.bg, (obs.shape[1], obs.shape[0]))
mask = np.logical_and((obs[:, :, 2] > obs[:, :, 1]), (obs[:, :, 2] > obs[:, :, 0]))
obs[mask] = self.bg[mask]
return obs
class NoiseSource(ImageSource):
def __init__(self, shape, strength=255):
self.strength = strength
self.shape = shape
def get_image(self, obs):
self.bg = np.random.rand(obs.shape[0], obs.shape[1], 3) * self.strength
self.bg = self.bg.astype(np.uint8)
mask = np.logical_and((obs[:, :, 2] > obs[:, :, 1]), (obs[:, :, 2] > obs[:, :, 0]))
obs[mask] = self.bg[mask]
return obs
class RandomDotsSource(ImageSource):
def __init__(self, shape, difficulty, ground=None):
self.shape = shape
num_dots = utils.DIFFICULTY_NUM_VIDEOS[difficulty]
self.num_dots = num_dots if num_dots else 16
self.num_frames = 1000
self.ground = ground
self.lim_low = 0.1
self.lim_high = 0.9
self.reset()
def reset(self):
self.idx = 0
self.colors = []
self.positions = []
self.sizes = []
self.move = []
for i in range(self.num_dots):
self.colors.append(np.random.rand(3))
self.positions.append(np.random.uniform(self.lim_low, self.lim_high, 2))
self.sizes.append(np.random.uniform(0.5, 1))
self.move.append([0, 0])
def limit_pos(self, i):
if self.positions[i][0] < self.lim_low:
self.positions[i][0] = self.lim_low
self.move[i][0] = 0
elif self.positions[i][0] > self.lim_high:
self.positions[i][0] = self.lim_high
self.move[i][0] = 0
if self.positions[i][1] < self.lim_low:
self.positions[i][1] = self.lim_low
self.move[i][1] = 0
elif self.positions[i][1] > self.lim_high:
self.positions[i][1] = self.lim_high
self.move[i][1] = 0
def build_bg(self, w, h):
self.bg = np.zeros((h, w, 3))
for i in range(self.num_dots):
color, position, size = self.colors[i], self.positions[i], self.sizes[i]
position = (int(position[0] * w), int(position[1] * h))
cv2.circle(self.bg, position, int(size * w / 20), color, -1)
self.move[i] = np.random.normal(self.move[i], 0.01, 2)
self.move[i] = self.move[i] if np.random.rand() < 0.8 else self.move[i] / 5
self.positions[i] += self.move[i]
self.limit_pos(i)
self.colors[i] += np.random.normal(1 / 255, 0.005, 3)
self.bg *= 255
self.bg = self.bg.astype(np.uint8)
def get_image(self, obs):
if self.idx == self.num_frames:
self.reset()
h, w, _ = obs.shape
self.build_bg(w, h)
if self.ground == 'forground':
mask = np.logical_or(self.bg[:, :, 0] > 0, self.bg[:, :, 1] > 0, self.bg[:, :, 2] > 0)
obs[mask] = self.bg[mask]
else:
mask1 = np.logical_or(self.bg[:, :, 0] > 0, self.bg[:, :, 1] > 0, self.bg[:, :, 2] > 0)
mask2 = np.logical_and((obs[:, :, 2] > obs[:, :, 1]), (obs[:, :, 2] > obs[:, :, 0]))
mask = np.logical_and(mask1, mask2)
obs[mask] = self.bg[mask]
self.idx += 1
return obs
class RandomImageSource(ImageSource):
def __init__(self, shape, difficulty, date_path, train_or_val=None):
self.shape = shape
self.image_paths = get_img_paths(difficulty, date_path, train_or_val)
self.image_files = []
for image_path in self.image_paths:
image_files = sorted(os.listdir(image_path))
[self.image_files.append(os.path.join(image_path, image_file)) for image_file in image_files]
self.total_frames = len(self.image_files)
self.count = 0
self.build_bg_arr()
self.reset()
def build_bg_arr(self):
self.bg_arr = np.zeros((self.total_frames, self.shape[0], self.shape[1], 3))
for i in range(self.total_frames):
fname = self.image_files[i]
img = cv2.imread(fname, cv2.IMREAD_COLOR)
self.bg_arr[i] = cv2.resize(img, (self.shape[1], self.shape[0]))
def reset(self):
self.idx = np.random.randint(0, self.total_frames)
def get_image(self, shape=None):
self.bg = self.bg_arr[self.idx]
self.bg = cv2.resize(self.bg, shape) if shape else self.bg
self.count += 1
return self.bg
class RandomVideoSource(ImageSource):
def __init__(self, shape, difficulty, date_path, train_or_val=None, ground=None):
self.ground = ground
self.shape = shape
self.image_paths = get_img_paths(difficulty, date_path, train_or_val)
self.num_path = len(self.image_paths)
self.reset()
def build_bg_arr(self):
self.image_path = self.image_paths[self._loc]
self.image_files = os.listdir(self.image_path)
self.bg_arr = []
for fname in self.image_files:
fpath = os.path.join(self.image_path, fname)
img = cv2.imread(fpath, cv2.IMREAD_COLOR)
if self.ground == 'forground':
mpath = fpath.replace("JPEGImages", "Annotations").replace("jpg", "png")
mask = cv2.imread(mpath, cv2.IMREAD_GRAYSCALE)
mask = np.logical_not(mask)
img[mask] = 0
self.bg_arr.append(img)
def reset(self):
self.idx = 0
self._loc = np.random.randint(0, self.num_path)
self.build_bg_arr()
def get_image(self, obs):
if self.idx == len(self.image_files):
self.reset()
self.bg = self.bg_arr[self.idx]
self.bg = cv2.resize(self.bg, (obs.shape[1], obs.shape[0]))
if self.ground == 'forground':
mask = np.logical_or(self.bg[:, :, 0] > 0, self.bg[:, :, 1] > 0, self.bg[:, :, 2] > 0)
obs[mask] = self.bg[mask]
else:
mask = np.logical_and((obs[:, :, 2] > obs[:, :, 1]), (obs[:, :, 2] > obs[:, :, 0]))
obs[mask] = self.bg[mask]
self.idx += 1
return obs
| [
"numpy.random.normal",
"os.listdir",
"numpy.random.rand",
"numpy.logical_and",
"numpy.logical_not",
"os.path.join",
"numpy.logical_or",
"numpy.zeros",
"numpy.random.randint",
"numpy.random.uniform",
"cv2.resize",
"cv2.imread"
] | [((1872, 1903), 'os.path.join', 'os.path.join', (['date_path', 'subdir'], {}), '(date_path, subdir)\n', (1884, 1903), False, 'import os\n'), ((2527, 2570), 'numpy.zeros', 'np.zeros', (['(self.shape[0], self.shape[1], 3)'], {}), '((self.shape[0], self.shape[1], 3))\n', (2535, 2570), True, 'import numpy as np\n'), ((2636, 2672), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(3,)'}), '(0, 256, size=(3,))\n', (2653, 2672), True, 'import numpy as np\n'), ((2758, 2807), 'cv2.resize', 'cv2.resize', (['self.bg', '(obs.shape[1], obs.shape[0])'], {}), '(self.bg, (obs.shape[1], obs.shape[0]))\n', (2768, 2807), False, 'import cv2\n'), ((2823, 2895), 'numpy.logical_and', 'np.logical_and', (['(obs[:, :, 2] > obs[:, :, 1])', '(obs[:, :, 2] > obs[:, :, 0])'], {}), '(obs[:, :, 2] > obs[:, :, 1], obs[:, :, 2] > obs[:, :, 0])\n', (2837, 2895), True, 'import numpy as np\n'), ((3261, 3333), 'numpy.logical_and', 'np.logical_and', (['(obs[:, :, 2] > obs[:, :, 1])', '(obs[:, :, 2] > obs[:, :, 0])'], {}), '(obs[:, :, 2] > obs[:, :, 1], obs[:, :, 2] > obs[:, :, 0])\n', (3275, 3333), True, 'import numpy as np\n'), ((4770, 4789), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (4778, 4789), True, 'import numpy as np\n'), ((6713, 6775), 'numpy.zeros', 'np.zeros', (['(self.total_frames, self.shape[0], self.shape[1], 3)'], {}), '((self.total_frames, self.shape[0], self.shape[1], 3))\n', (6721, 6775), True, 'import numpy as np\n'), ((7031, 7070), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.total_frames'], {}), '(0, self.total_frames)\n', (7048, 7070), True, 'import numpy as np\n'), ((7700, 7727), 'os.listdir', 'os.listdir', (['self.image_path'], {}), '(self.image_path)\n', (7710, 7727), False, 'import os\n'), ((8271, 8306), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.num_path'], {}), '(0, self.num_path)\n', (8288, 8306), True, 'import numpy as np\n'), ((8495, 8544), 'cv2.resize', 'cv2.resize', (['self.bg', '(obs.shape[1], obs.shape[0])'], {}), '(self.bg, (obs.shape[1], obs.shape[0]))\n', (8505, 8544), False, 'import cv2\n'), ((1552, 1573), 'os.listdir', 'os.listdir', (['date_path'], {}), '(date_path)\n', (1562, 1573), False, 'import os\n'), ((3141, 3186), 'numpy.random.rand', 'np.random.rand', (['obs.shape[0]', 'obs.shape[1]', '(3)'], {}), '(obs.shape[0], obs.shape[1], 3)\n', (3155, 3186), True, 'import numpy as np\n'), ((5082, 5121), 'numpy.random.normal', 'np.random.normal', (['self.move[i]', '(0.01)', '(2)'], {}), '(self.move[i], 0.01, 2)\n', (5098, 5121), True, 'import numpy as np\n'), ((5316, 5351), 'numpy.random.normal', 'np.random.normal', (['(1 / 255)', '(0.005)', '(3)'], {}), '(1 / 255, 0.005, 3)\n', (5332, 5351), True, 'import numpy as np\n'), ((5629, 5708), 'numpy.logical_or', 'np.logical_or', (['(self.bg[:, :, 0] > 0)', '(self.bg[:, :, 1] > 0)', '(self.bg[:, :, 2] > 0)'], {}), '(self.bg[:, :, 0] > 0, self.bg[:, :, 1] > 0, self.bg[:, :, 2] > 0)\n', (5642, 5708), True, 'import numpy as np\n'), ((5781, 5860), 'numpy.logical_or', 'np.logical_or', (['(self.bg[:, :, 0] > 0)', '(self.bg[:, :, 1] > 0)', '(self.bg[:, :, 2] > 0)'], {}), '(self.bg[:, :, 0] > 0, self.bg[:, :, 1] > 0, self.bg[:, :, 2] > 0)\n', (5794, 5860), True, 'import numpy as np\n'), ((5881, 5953), 'numpy.logical_and', 'np.logical_and', (['(obs[:, :, 2] > obs[:, :, 1])', '(obs[:, :, 2] > obs[:, :, 0])'], {}), '(obs[:, :, 2] > obs[:, :, 1], obs[:, :, 2] > obs[:, :, 0])\n', (5895, 5953), True, 'import numpy as np\n'), ((5977, 6005), 'numpy.logical_and', 'np.logical_and', (['mask1', 'mask2'], {}), '(mask1, mask2)\n', (5991, 6005), True, 'import numpy as np\n'), ((6877, 6912), 'cv2.imread', 'cv2.imread', (['fname', 'cv2.IMREAD_COLOR'], {}), '(fname, cv2.IMREAD_COLOR)\n', (6887, 6912), False, 'import cv2\n'), ((6942, 6989), 'cv2.resize', 'cv2.resize', (['img', '(self.shape[1], self.shape[0])'], {}), '(img, (self.shape[1], self.shape[0]))\n', (6952, 6989), False, 'import cv2\n'), ((7167, 7193), 'cv2.resize', 'cv2.resize', (['self.bg', 'shape'], {}), '(self.bg, shape)\n', (7177, 7193), False, 'import cv2\n'), ((7812, 7848), 'os.path.join', 'os.path.join', (['self.image_path', 'fname'], {}), '(self.image_path, fname)\n', (7824, 7848), False, 'import os\n'), ((7867, 7902), 'cv2.imread', 'cv2.imread', (['fpath', 'cv2.IMREAD_COLOR'], {}), '(fpath, cv2.IMREAD_COLOR)\n', (7877, 7902), False, 'import cv2\n'), ((8603, 8682), 'numpy.logical_or', 'np.logical_or', (['(self.bg[:, :, 0] > 0)', '(self.bg[:, :, 1] > 0)', '(self.bg[:, :, 2] > 0)'], {}), '(self.bg[:, :, 0] > 0, self.bg[:, :, 1] > 0, self.bg[:, :, 2] > 0)\n', (8616, 8682), True, 'import numpy as np\n'), ((8754, 8826), 'numpy.logical_and', 'np.logical_and', (['(obs[:, :, 2] > obs[:, :, 1])', '(obs[:, :, 2] > obs[:, :, 0])'], {}), '(obs[:, :, 2] > obs[:, :, 1], obs[:, :, 2] > obs[:, :, 0])\n', (8768, 8826), True, 'import numpy as np\n'), ((3974, 3991), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (3988, 3991), True, 'import numpy as np\n'), ((4027, 4076), 'numpy.random.uniform', 'np.random.uniform', (['self.lim_low', 'self.lim_high', '(2)'], {}), '(self.lim_low, self.lim_high, 2)\n', (4044, 4076), True, 'import numpy as np\n'), ((4108, 4133), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1)'], {}), '(0.5, 1)\n', (4125, 4133), True, 'import numpy as np\n'), ((6410, 6432), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (6420, 6432), False, 'import os\n'), ((8058, 8097), 'cv2.imread', 'cv2.imread', (['mpath', 'cv2.IMREAD_GRAYSCALE'], {}), '(mpath, cv2.IMREAD_GRAYSCALE)\n', (8068, 8097), False, 'import cv2\n'), ((8121, 8141), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (8135, 8141), True, 'import numpy as np\n'), ((5165, 5181), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5179, 5181), True, 'import numpy as np\n'), ((6471, 6507), 'os.path.join', 'os.path.join', (['image_path', 'image_file'], {}), '(image_path, image_file)\n', (6483, 6507), False, 'import os\n')] |
from contextlib import suppress
from datetime import datetime
import requests
import numpy as np
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import get_sun, AltAz
from panoptes.utils.utils import get_quantity_value
AAT_URL = 'http://aat-ops.anu.edu.au/met/metdata.dat'
AAT_COLUMNS = ['time',
'outside_temp',
'inside_temp',
'mirror_temp',
'outside_dewpoint',
'outside_humidity',
'pressure',
'wind_speed_avg',
'wind_gust_max',
'wind_direction_avg',
'dome_state',
'is_raining',
'inside_dewpoint',
'sky_ambient_diff_C',
'sky_ambient_diff_error',
'daytime_brightness',
'rain_detections_past_10minutes',
'wetness_detections_past_10minutes',
'rain_since_9am',
'sqm_brightness',
]
def get_solar_altaz(time, location):
""" Return the altaz of the Sun at a given time and location.
Args:
time (astropy.time.Time): The time of the observation.
location (astropy.coordinates.EarthLocation): The location of the observation.
Returns:
astropy.coordinates.AltAz: The alt/az of the Sun.
"""
frame = AltAz(obstime=time, location=location)
sunaltaz = get_sun(time).transform_to(frame)
return sunaltaz
def get_solar_separation(coord, time, location):
""" Get the angular separation between a coordinate and the Sun at a given time & location.
Args:
coord (astropy.coordinates.SkyCoord): The coordinate.
time (astropy.time.Time): The time of the observation.
location (astropy.coordinates.EarthLocation): The location of the observation.
Returns:
astropy.Quantity: The angular separation.
"""
frame = AltAz(obstime=time, location=location)
# Calculate observation alt/az
obsaltaz = coord.transform_to(frame)
# Calculate Solar alt/az
sunaltaz = get_solar_altaz(time, location)
return obsaltaz.separation(sunaltaz)
def check_solar_separation_safety(observation, location, min_separation, time=None,
overhead_time=120, time_check_interval=60):
""" Check if the solar separation satisfies safety condition over observation time.
Args:
observation (Observation): The observation object.
location (astropy.coordinates.EarthLocation): The location of the observation.
min_separation (astropy.Quantity): The minimum safe separation.
overhead_time (float, optional): Observation overhead time in seconds. Default 120s.
time_check_interval (float, optional): Check safety at this interval in seconds.
Default 60s.
"""
coord = observation.field.coord
min_separation = get_quantity_value(min_separation, u.deg) * u.deg
exp_duration = get_quantity_value(observation.minimum_duration, u.second)
overhead_time = get_quantity_value(overhead_time, u.second)
time_check_interval = get_quantity_value(time_check_interval, u.second)
if time is None:
time = Time(datetime.now())
# Evaluate safety at regular time intervals over observation
obstime = exp_duration + overhead_time
times = np.arange(0, obstime + time_check_interval, time_check_interval) * u.second + time
# Calculate solar separation at each time
separations = get_solar_separation(coord, times, location)
return all([c > min_separation for c in separations])
def get_aat_weather(aat_url=AAT_URL, response_columns=AAT_COLUMNS):
"""Fetch met weather data from AAO weather station.
Args:
aat_url (string, optional): URL to query for weather reading. Defaults to AAT_URL.
response_columns (list, optional): List of column names that map onto the values contained
in a succesful reponse. Defaults to AAT_COLUMNS.
"""
response = requests.get(aat_url)
# raise an exception if response was not successful
response.raise_for_status()
date, raw_data, _ = response.content.decode().split('\n')
data = {name: value for name, value in zip(response_columns, raw_data.split('\t'))}
data['date'] = date
# Try and parse values to float
for k, v in data.items():
with suppress(ValueError):
data[k] = float(v)
# Explicitly parse is_raining to bool
# At the time of writing this is the only boolean quantity coming from AAT
data["is_raining"] = bool(data["is_raining"])
return data
| [
"astropy.coordinates.get_sun",
"requests.get",
"datetime.datetime.now",
"contextlib.suppress",
"panoptes.utils.utils.get_quantity_value",
"astropy.coordinates.AltAz",
"numpy.arange"
] | [((1377, 1415), 'astropy.coordinates.AltAz', 'AltAz', ([], {'obstime': 'time', 'location': 'location'}), '(obstime=time, location=location)\n', (1382, 1415), False, 'from astropy.coordinates import get_sun, AltAz\n'), ((1939, 1977), 'astropy.coordinates.AltAz', 'AltAz', ([], {'obstime': 'time', 'location': 'location'}), '(obstime=time, location=location)\n', (1944, 1977), False, 'from astropy.coordinates import get_sun, AltAz\n'), ((2996, 3054), 'panoptes.utils.utils.get_quantity_value', 'get_quantity_value', (['observation.minimum_duration', 'u.second'], {}), '(observation.minimum_duration, u.second)\n', (3014, 3054), False, 'from panoptes.utils.utils import get_quantity_value\n'), ((3075, 3118), 'panoptes.utils.utils.get_quantity_value', 'get_quantity_value', (['overhead_time', 'u.second'], {}), '(overhead_time, u.second)\n', (3093, 3118), False, 'from panoptes.utils.utils import get_quantity_value\n'), ((3145, 3194), 'panoptes.utils.utils.get_quantity_value', 'get_quantity_value', (['time_check_interval', 'u.second'], {}), '(time_check_interval, u.second)\n', (3163, 3194), False, 'from panoptes.utils.utils import get_quantity_value\n'), ((4033, 4054), 'requests.get', 'requests.get', (['aat_url'], {}), '(aat_url)\n', (4045, 4054), False, 'import requests\n'), ((2926, 2967), 'panoptes.utils.utils.get_quantity_value', 'get_quantity_value', (['min_separation', 'u.deg'], {}), '(min_separation, u.deg)\n', (2944, 2967), False, 'from panoptes.utils.utils import get_quantity_value\n'), ((1432, 1445), 'astropy.coordinates.get_sun', 'get_sun', (['time'], {}), '(time)\n', (1439, 1445), False, 'from astropy.coordinates import get_sun, AltAz\n'), ((3237, 3251), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3249, 3251), False, 'from datetime import datetime\n'), ((3374, 3438), 'numpy.arange', 'np.arange', (['(0)', '(obstime + time_check_interval)', 'time_check_interval'], {}), '(0, obstime + time_check_interval, time_check_interval)\n', (3383, 3438), True, 'import numpy as np\n'), ((4398, 4418), 'contextlib.suppress', 'suppress', (['ValueError'], {}), '(ValueError)\n', (4406, 4418), False, 'from contextlib import suppress\n')] |
# ~~~
# This file is part of the paper:
#
# "A NON-CONFORMING DUAL APPROACH FOR ADAPTIVE TRUST-REGION REDUCED BASIS
# APPROXIMATION OF PDE-CONSTRAINED OPTIMIZATION"
#
# https://github.com/TiKeil/NCD-corrected-TR-RB-approach-for-pde-opt
#
# Copyright 2019-2020 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# <NAME> (2019 - 2020)
# <NAME> (2019 - 2020)
# ~~~
import numpy as np
import time
from copy import deepcopy
from pdeopt.tools import truncated_conj_grad as TruncCG
def projection_onto_range(parameter_space, mu):
ranges = parameter_space.ranges
for (key, item) in parameter_space.parameter_type.items():
range_ = ranges[key]
if sum(item) < 2: # these are the cases () and (1,)
if mu[key] < range_[0]:
if item == ():
mu[key] = range_[0]
else:
mu[key] = [range_[0]]
if mu[key] > range_[1]:
if item == ():
mu[key] = range_[1]
else:
mu[key] = [range_[1]]
else:
for j in range(item[0]):
if mu[key][j] < range_[0]:
mu[key][j] = range_[0]
if mu[key][j] > range_[1]:
mu[key][j] = range_[1]
return mu
def active_and_inactive_sets(parameter_space, mu, epsilon):
Act = []
ranges = parameter_space.ranges
for (key,item) in parameter_space.parameter_type.items():
range_ = ranges[key]
if sum(item) < 2:
if mu[key] - range_[0] <= epsilon:
Act.append(1.0)
elif range_[1] - mu[key] <= epsilon:
Act.append(1.0)
else:
Act.append(0.0)
else:
for j in range(item[0]):
if mu[key][j] - range_[0] <= epsilon:
Act.append(1.0)
elif range_[1] - mu[key][j] <= epsilon:
Act.append(1.0)
else:
Act.append(0.0)
Act = np.array(Act)
Inact = np.ones(Act.shape) - Act
return Act, Inact
def armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, direction):
j = 0
condition = True
while condition and j < TR_parameters['max_iterations_armijo']:
mu_ip1 = mu_i + (TR_parameters['initial_step_armijo'] ** j) * direction
mu_ip1_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_ip1))
mu_ip1_dict = projection_onto_range(parameter_space,mu_ip1_dict)
mu_ip1 = opt_model.parse_parameter_inverse(mu_ip1_dict)
Jip1 = opt_model.output_functional_hat(mu_ip1_dict)
if not TR_parameters['full_order_model']:
u_cp = opt_model.solve(mu_ip1_dict)
p_cp = opt_model.solve_dual(mu_ip1_dict)
est = opt_model.estimate_output_functional_hat(u_cp, p_cp, mu_ip1_dict)
else:
est = 0.0
if Jip1 <= Ji - (TR_parameters['armijo_alpha'] / ((TR_parameters['initial_step_armijo'] ** j))) * (np.linalg.norm(mu_ip1-mu_i)**2) and abs(est / Jip1) <= TR_parameters['radius']:
condition = False
j = j + 1
if condition: # This means that we exit the loop because of maximum iteration reached
print("Maximum iteration for Armijo rule reached")
mu_ip1 = mu_i
mu_ip1_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_ip1))
Jip1 = Ji
est = TR_parameters['radius']*Ji # so that the Qian-Grepl method stops as well
return mu_ip1, mu_ip1_dict, Jip1, abs(est / Jip1) #the last is needed for the boundary criterium
def compute_new_hessian_approximation(new_mu,old_mu,new_gradient,old_gradient,old_B):
gk = new_gradient-old_gradient
pk = new_mu-old_mu
den = gk.dot(pk)
if den>0.0:
Hkgk = old_B.dot(gk)
coeff = gk.dot(Hkgk)
Hkgkpkt = np.outer(Hkgk,pk)
pkHkgkt = np.outer(pk,Hkgk)
pkpkt = np.outer(pk,pk)
new_B = old_B + (den+coeff)/(den*den) * pkpkt - (1.0/den) * Hkgkpkt - (1.0/den)*pkHkgkt
else:
print("Curvature condition: {}".format(den))
print("Reset direction to - gradient")
new_B = np.eye(old_gradient.size)
return new_B
def compute_modified_hessian_action_matrix_version(H,Active,Inactive,eta):
etaA = np.multiply(Active, eta)
etaI = np.multiply(Inactive, eta)
Hessian_prod = H.dot(etaI)
Action_of_modified_H = etaA + np.multiply(Inactive, Hessian_prod)
return Action_of_modified_H
def solve_optimization_subproblem_BFGS(opt_model, parameter_space, mu_k_dict, TR_parameters, timing=False):
if not TR_parameters['full_order_model']:
print('___ starting subproblem')
if 'beta' not in TR_parameters:
print('Setting beta to the default 0.95')
TR_parameters['beta'] = 0.95
else:
print("Starting parameter {}".format(mu_k_dict))
tic_ = time.time()
times = []
mus = []
Js = []
FOCs = []
mu_diff = 1e6
J_diff = 1e6
Ji = opt_model.output_functional_hat(mu_k_dict)
gradient = opt_model.output_functional_hat_gradient(mu_k_dict)
normgrad = np.linalg.norm(gradient)
mu_i = opt_model.parse_parameter_inverse(mu_k_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space, opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
epsilon_i = TR_parameters['epsilon_i']
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)#/(np.linalg.norm(mu_i)+1e-8)
B = np.eye(mu_i.size)
Active_i, Inactive_i = active_and_inactive_sets(opt_model.parameter_space, mu_i_dict, epsilon_i)
i = 0
while i < TR_parameters['max_iterations_subproblem']:
if i>0:
if not TR_parameters['full_order_model']:
if boundary_TR_criterium >= TR_parameters['beta']*TR_parameters['radius']:
print('boundary criterium of the TR satisfied, so stopping the sub-problem solver')
return mu_ip1_dict, Jcp, i, Jip1, FOCs
if normgrad < TR_parameters['sub_tolerance'] or J_diff < TR_parameters['safety_tolerance'] or mu_diff< TR_parameters['safety_tolerance']:
print("Subproblem converged: FOC = {}, mu_diff = {}, J_diff = {} ".format(normgrad,mu_diff,J_diff))
break
else:
if normgrad < TR_parameters['sub_tolerance']:
print("Converged: FOC = {}".format(normgrad))
break
if i == 0 and not TR_parameters['full_order_model']:
print("Computing the approximate Cauchy point and then start the BFGS method")
direction = -gradient
else:
if Inactive_i.sum() == 0.0:
if TR_parameters["full_order_model"]:
print("All indexes are active, I am using -gradient as direction")
direction = -gradient
else:
direction = compute_modified_hessian_action_matrix_version(B, Active_i, Inactive_i, -gradient)
if np.dot(direction,gradient) > 0:
print('Not a descendent direction ... taking -gradient as direction')
direction = -gradient
if TR_parameters["full_order_model"]:
mu_ip1, mu_ip1_dict, Jip1, _ = armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, direction)
else:
mu_ip1, mu_ip1_dict, Jip1, boundary_TR_criterium = armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, direction)
if i == 0:
if not TR_parameters['full_order_model']:
Jcp = Jip1
else:
Jcp = None
mu_diff = np.linalg.norm(mu_i - mu_ip1) / np.linalg.norm(mu_i)
J_diff = abs(Ji - Jip1) / abs(Ji)
old_mu = deepcopy(mu_i)
mu_i_dict = mu_ip1_dict
Ji = Jip1
old_gradient = deepcopy(gradient)
gradient = opt_model.output_functional_hat_gradient(mu_i_dict)
mu_box = opt_model.parse_parameter(opt_model.pre_parse_parameter(opt_model.parse_parameter_inverse(mu_i_dict)-gradient))
first_order_criticity = opt_model.parse_parameter_inverse(mu_i_dict)-opt_model.parse_parameter_inverse(projection_onto_range(parameter_space, mu_box))
normgrad = np.linalg.norm(first_order_criticity)
mu_i = opt_model.parse_parameter_inverse(mu_i_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space,opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)
Active_i, Inactive_i = active_and_inactive_sets(opt_model.parameter_space, mu_i_dict, epsilon_i)
B = compute_new_hessian_approximation(mu_i, old_mu, gradient, old_gradient, B)
if TR_parameters["full_order_model"]:
print("Step {}, functional {} , FOC condition {}".format(mu_ip1, Ji, np.linalg.norm(first_order_criticity)))
times.append(time.time() -tic_)
mus.append(mu_ip1)
Js.append(Ji)
FOCs.append(normgrad)
i = i + 1
print("relative differences mu {} and J {}".format(mu_diff, J_diff))
if timing:
return mu_ip1_dict, Jcp, i, Jip1, times, mus, Js, FOCs
else:
return mu_ip1_dict, Jcp, i, Jip1, FOCs
def modified_hessian_action(mu,Active,Inactive,opt_model,eta):
# Used only by the projected Newton Method
etaA = np.multiply(Active,eta)
etaI = np.multiply(Inactive,eta)
Action_on_I = opt_model.output_functional_hessian_operator(mu, etaI, False)
Action_of_modified_operator = etaA + np.multiply(Inactive,Action_on_I)
return Action_of_modified_operator
def solve_optimization_NewtonMethod(opt_model, parameter_space, mu_k_dict, TR_parameters, timing=False):
#This method is used to compute an accurate approximation of the optimal parameter mu_bar with the FOM.
# (Eventually also in the global Greedy). It is not used in the TR algorithm in this paper.
print("Starting parameter {}".format(mu_k_dict))
if 'global_RB' not in TR_parameters:
TR_parameters['global_RB']=False
tic_toc = time.time()
times = []
mus = []
Js = []
FOCs = []
Jcp = None
mu_diff = 1e6
J_diff = 1e6
Ji = opt_model.output_functional_hat(mu_k_dict)
gradient = opt_model.output_functional_hat_gradient(mu_k_dict)
normgrad = np.linalg.norm(gradient)
mu_i = opt_model.parse_parameter_inverse(mu_k_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space, opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
epsilon_i = TR_parameters['epsilon_i']
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)
i = 0
while i < TR_parameters['max_iterations']:
if i>0:
if TR_parameters['full_order_model'] or TR_parameters['global_RB']:
if normgrad < TR_parameters['sub_tolerance']:
print("Converged: FOC = {}".format(normgrad))
break
Active_i, Inactive_i = active_and_inactive_sets(opt_model.parameter_space, mu_i_dict, epsilon_i)
if Inactive_i.sum() == 0.0:
deltamu = gradient
if TR_parameters["full_order_model"] or TR_parameters['global_RB']:
print("I am using projected gradient instead of Newton")
else:
print("Using truncated CG for the linear system")
deltamu, itcg,rescg, infocg = TruncCG(A_func=lambda v: modified_hessian_action(mu=mu_i_dict, Active= Active_i, Inactive= Inactive_i, opt_model=opt_model, eta=v), b= gradient, tol = 1.e-10)
if infocg > 0:
print("Choosing the gradient as direction")
deltamu = gradient
if np.dot(-deltamu,gradient) >= -1.e-14:
print('Not a descendent direction ... taking gradient as direction')
deltamu = gradient
mu_ip1, mu_ip1_dict, Jip1, _, = armijo_rule(opt_model, parameter_space, TR_parameters, mu_i, Ji, -deltamu)
mu_diff = np.linalg.norm(mu_i - mu_ip1) / np.linalg.norm(mu_i)
J_diff = abs(Ji - Jip1) / abs(Ji)
mu_i_dict = mu_ip1_dict
Ji = Jip1
gradient = opt_model.output_functional_hat_gradient(mu_i_dict)
mu_box = opt_model.parse_parameter(opt_model.pre_parse_parameter(opt_model.parse_parameter_inverse(mu_i_dict)-gradient))
first_order_criticity = opt_model.parse_parameter_inverse(mu_i_dict)-opt_model.parse_parameter_inverse(projection_onto_range(parameter_space, mu_box))
normgrad = np.linalg.norm(first_order_criticity)
mu_i = opt_model.parse_parameter_inverse(mu_i_dict)
mu_i_dict = opt_model.primal_model.parse_parameter(opt_model.pre_parse_parameter(mu_i))
mu_i_1 = mu_i - gradient
mu_i_1_dict = projection_onto_range(opt_model.parameter_space,opt_model.parse_parameter(opt_model.pre_parse_parameter(mu_i_1)))
mu_i_1 = opt_model.parse_parameter_inverse(mu_i_1_dict)
if not isinstance(epsilon_i,float):
epsilon_i = np.linalg.norm(mu_i_1 - mu_i)
print("Step {}, functional {} , FOC condition {}".format(mu_ip1, Ji, np.linalg.norm(first_order_criticity)))
times.append(time.time() -tic_toc)
mus.append(mu_ip1)
Js.append(Ji)
FOCs.append(normgrad)
i = i + 1
print("relative differences mu {} and J {}".format(mu_diff, J_diff))
if timing:
return mu_ip1_dict, Jcp, i, Jip1, times, mus, Js, FOCs
else:
return mu_ip1_dict, Jcp, i, Jip1, FOCs, 0
def enrichment_step(mu, reductor, opt_fom=None):
new_reductor = deepcopy(reductor)
u, p = new_reductor.extend_bases(mu)
opt_rom = new_reductor.reduce()
return opt_rom, new_reductor, u, p
def TR_algorithm(opt_rom, reductor, TR_parameters=None, extension_params=None, opt_fom=None, return_opt_rom=False):
if TR_parameters is None:
mu_k = opt_rom.parameter_space.sample_randomly(1)[0]
TR_parameters = {'radius': 0.1, 'sub_tolerance': 1e-8, 'max_iterations': 30, 'max_iterations_subproblem':400,
'starting_parameter': mu_k, 'max_iterations_armijo': 50, 'initial_step_armijo': 0.5,
'armijo_alpha': 1e-4, 'full_order_model': False,
'epsilon_i': 1e-8, 'Qian-Grepl': False, 'safety_tolerance': 1e-16, 'beta': 0.95}
else:
if 'radius' not in TR_parameters:
TR_parameters['radius'] = 0.1
if 'sub_tolerance' not in TR_parameters:
TR_parameters['sub_tolerance'] = 1e-8
if 'max_iterations' not in TR_parameters:
TR_parameters['max_iterations'] = 30
if 'max_iterations_subproblem' not in TR_parameters:
TR_parameters['max_iterations_subproblem'] = 400
if 'starting_parameter' not in TR_parameters:
TR_parameters['starting_parameter'] = opt_rom.parameter_space.sample_randomly(1)[0]
if 'max_iterations_armijo' not in TR_parameters:
TR_parameters['max_iterations_armijo'] = 50
if 'initial_step_armijo' not in TR_parameters:
TR_parameters['initial_step_armijo'] = 0.5
if 'armijo_alpha' not in TR_parameters:
TR_parameters['armijo_alpha'] = 1.e-4
if 'full_order_model' not in TR_parameters:
TR_parameters['full_order_model'] = False
if 'printing' not in TR_parameters:
TR_parameters['printing'] = False
if 'epsilon_i' not in TR_parameters:
TR_parameters['epsilon_i'] = 1e-8
if 'Qian-Grepl' not in TR_parameters:
TR_parameters['Qian-Grepl'] = False
if 'safety_tolerance' not in TR_parameters:
TR_parameters['safety_tolerance'] = 1e-16
if 'beta' not in TR_parameters:
TR_parameters['beta'] = 0.95
mu_k = TR_parameters['starting_parameter']
if extension_params is None:
extension_params={'Check_suff_and_nec_conditions': True, 'Enlarge_radius': True, 'opt_fom': None }
elif TR_parameters['Qian-Grepl']:
extension_params['Check_suff_and_nec_conditions'] = True
extension_params['Enlarge_radius'] = False
if 'opt_fom' not in extension_params:
extension_params['opt_fom'] = None
else:
if 'Check_suff_and_nec_conditions' not in extension_params:
extension_params['Check_suff_and_nec_conditions'] = True
if 'Enlarge_radius' not in extension_params:
extension_params['Enlarge_radius'] = True
if 'opt_fom' not in extension_params:
extension_params['opt_fom'] = None
if opt_fom is None:
opt_fom = extension_params['opt_fom']
if 'FOC_tolerance' not in TR_parameters:
TR_parameters['FOC_tolerance'] = TR_parameters['sub_tolerance']
if TR_parameters['Qian-Grepl']:
print('QIAN et al. 2017 Method')
print('starting parameter {}'.format(mu_k))
# timings
tic = time.time()
Js = []
FOCs = []
times = []
parameter_space = opt_rom.parameter_space
mu_list = []
mu_list.append(mu_k)
JFE_list = []
normgrad = 1e6
estimate_gradient = 1e6 # Used only for Qian et al. method
model_has_been_enriched = False
point_rejected = False
J_k = opt_rom.output_functional_hat(mu_k)
print("Starting value of the cost: {}".format(J_k))
print("******************************* \n")
k = 0
while k < TR_parameters['max_iterations']:
if point_rejected:
point_rejected = False
if TR_parameters['radius'] < 2.22*1e-16:
print('\nTR-radius is below machine precision... stopping')
break
else:
if not TR_parameters['Qian-Grepl']:
if (normgrad < TR_parameters['FOC_tolerance']):
print('\nStopping criteria fulfilled: FOM FOC condition {} '.format(normgrad))
break
else:
if (normgrad + estimate_gradient < TR_parameters['FOC_tolerance']):
print('\nStopping criteria fulfilled: normgrad {} + estimate_gradient {}'.format(normgrad,estimate_gradient))
break
mu_kp1, Jcp, j, J_kp1, _ = solve_optimization_subproblem_BFGS(opt_rom, parameter_space, mu_k,
TR_parameters)
u_rom = opt_rom.solve(mu_kp1)
p_rom = opt_rom.solve_dual(mu_kp1, U=u_rom)
estimate_J = opt_rom.estimate_output_functional_hat(u_rom, p_rom, mu_kp1)
if TR_parameters['Qian-Grepl']:
estimate_gradient = opt_rom.estimate_output_functional_hat_gradient_norm(mu_kp1, u_rom, p_rom)
if J_kp1 + estimate_J < Jcp:
print('checked sufficient condition, starting the enrichment')
opt_rom, reductor, u, p = enrichment_step(mu_kp1, reductor, opt_fom=extension_params['opt_fom'])
JFE_list.append(reductor.fom.output_functional_hat(mu_kp1,u))
model_has_been_enriched = True
if extension_params['Enlarge_radius']:
if len(JFE_list) > 2:
if (k-1!= 0) and (JFE_list[-2]-JFE_list[-1])/(J_k-J_kp1) > 0.75:
TR_parameters['radius'] *= 2
print('enlarging the TR radius to {}'.format(TR_parameters['radius']))
print("k: {} - j {} - Cost Functional: {} - mu: {}".format(k, j, J_kp1, mu_kp1))
mu_list.append(mu_kp1)
times.append(time.time() -tic)
Js.append(J_kp1)
mu_k = mu_kp1
J_k = opt_rom.output_functional_hat(mu_k)
elif J_kp1 - estimate_J > Jcp:
print('necessary condition failed')
TR_parameters['radius'] = TR_parameters['radius'] * 0.5
print("Shrinking the TR radius to: {} because Jcp {} and J_kp1 {}".format(TR_parameters['radius'], Jcp,
J_kp1))
point_rejected = True
else:
print('enriching to check the sufficient decrease condition')
new_rom, new_reductor, u, p = enrichment_step(mu_kp1, reductor, opt_fom=extension_params['opt_fom'])
JFE_list.append(reductor.fom.output_functional_hat(mu_kp1, u))
model_has_been_enriched = True
J_kp1 = new_rom.output_functional_hat(mu_kp1)
print("k: {} - j {} - Cost Functional: {} - mu: {}".format(k, j, J_kp1, mu_kp1))
if J_kp1 > Jcp + 1e-8: # add a safety tolerance of 1e-8 for avoiding numerical stability effects
TR_parameters['radius'] = TR_parameters['radius'] * 0.5
print("Shrinking the TR radius to: {} because Jcp {} and J_kp1 {}".format(TR_parameters['radius']
,Jcp,J_kp1))
point_rejected = True
JFE_list.pop(-1) #We need to remove the value from the list, because we reject the parameter
else:
opt_rom = new_rom
reductor = new_reductor
mu_list.append(mu_kp1)
times.append(time.time() -tic)
Js.append(J_kp1)
mu_k = mu_kp1
if extension_params['Enlarge_radius']:
if len(JFE_list) > 2:
if (k-1!= 0) and (JFE_list[-2]-JFE_list[-1])/(J_k-J_kp1) > 0.75:
TR_parameters['radius'] *= 2
print('enlarging the TR radius to {}'.format(TR_parameters['radius']))
J_k = J_kp1
if model_has_been_enriched and TR_parameters['Qian-Grepl']:
# Qian et al. method does not use the fom gradient
model_has_been_enriched = False
if not point_rejected:
if model_has_been_enriched:
print('computing the fom gradient since the model was enriched')
gradient = reductor.fom.output_functional_hat_gradient(mu_k, U=u, P=p)
mu_box = opt_rom.parse_parameter(opt_rom.pre_parse_parameter(opt_rom.parse_parameter_inverse(mu_k)-gradient))
first_order_criticity = opt_rom.parse_parameter_inverse(mu_k)-opt_rom.parse_parameter_inverse(projection_onto_range(parameter_space, mu_box))
normgrad = np.linalg.norm(first_order_criticity)
model_has_been_enriched = False
else:
estimate_gradient = opt_rom.estimate_output_functional_hat_gradient_norm(mu_k, non_assembled=True)
gradient = opt_rom.output_functional_hat_gradient(mu_k)
mu_box = opt_rom.parse_parameter(opt_rom.pre_parse_parameter(opt_rom.parse_parameter_inverse(mu_k)-gradient))
first_order_criticity = opt_rom.parse_parameter_inverse(mu_k)-opt_rom.parse_parameter_inverse(projection_onto_range(parameter_space, mu_box))
normgrad = np.linalg.norm(first_order_criticity)
FOCs.append(normgrad)
if TR_parameters['Qian-Grepl']:
print('estimate_gradient {}'.format(estimate_gradient))
print("First order critical condition: {}".format(normgrad))
k= k + 1
print("******************************* \n")
if extension_params['Enlarge_radius']:
Js = JFE_list # This is for speeding-up the post-processing computation of the error for the TR method
# This procedure does not give additional speed-up to our method,
# but improves only the time of the post-processing step,
# to have the plot of the error, which is not counted in the computational time of the method.
if k >= TR_parameters['max_iterations']:
print (" WARNING: Maximum number of iteration for the TR algorithm reached")
if 'timings' in extension_params:
if extension_params['timings']:
if return_opt_rom:
return mu_list, times, Js, FOCs, opt_rom
else:
return mu_list, times, Js, FOCs
else:
return mu_list, times, Js, FOCs
return mu_list
| [
"numpy.multiply",
"numpy.eye",
"copy.deepcopy",
"numpy.ones",
"numpy.array",
"numpy.dot",
"numpy.outer",
"numpy.linalg.norm",
"time.time"
] | [((2148, 2161), 'numpy.array', 'np.array', (['Act'], {}), '(Act)\n', (2156, 2161), True, 'import numpy as np\n'), ((4486, 4510), 'numpy.multiply', 'np.multiply', (['Active', 'eta'], {}), '(Active, eta)\n', (4497, 4510), True, 'import numpy as np\n'), ((4522, 4548), 'numpy.multiply', 'np.multiply', (['Inactive', 'eta'], {}), '(Inactive, eta)\n', (4533, 4548), True, 'import numpy as np\n'), ((5099, 5110), 'time.time', 'time.time', ([], {}), '()\n', (5108, 5110), False, 'import time\n'), ((5336, 5360), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (5350, 5360), True, 'import numpy as np\n'), ((5902, 5919), 'numpy.eye', 'np.eye', (['mu_i.size'], {}), '(mu_i.size)\n', (5908, 5919), True, 'import numpy as np\n'), ((10096, 10120), 'numpy.multiply', 'np.multiply', (['Active', 'eta'], {}), '(Active, eta)\n', (10107, 10120), True, 'import numpy as np\n'), ((10131, 10157), 'numpy.multiply', 'np.multiply', (['Inactive', 'eta'], {}), '(Inactive, eta)\n', (10142, 10157), True, 'import numpy as np\n'), ((10824, 10835), 'time.time', 'time.time', ([], {}), '()\n', (10833, 10835), False, 'import time\n'), ((11076, 11100), 'numpy.linalg.norm', 'np.linalg.norm', (['gradient'], {}), '(gradient)\n', (11090, 11100), True, 'import numpy as np\n'), ((14591, 14609), 'copy.deepcopy', 'deepcopy', (['reductor'], {}), '(reductor)\n', (14599, 14609), False, 'from copy import deepcopy\n'), ((17972, 17983), 'time.time', 'time.time', ([], {}), '()\n', (17981, 17983), False, 'import time\n'), ((2174, 2192), 'numpy.ones', 'np.ones', (['Act.shape'], {}), '(Act.shape)\n', (2181, 2192), True, 'import numpy as np\n'), ((4041, 4059), 'numpy.outer', 'np.outer', (['Hkgk', 'pk'], {}), '(Hkgk, pk)\n', (4049, 4059), True, 'import numpy as np\n'), ((4078, 4096), 'numpy.outer', 'np.outer', (['pk', 'Hkgk'], {}), '(pk, Hkgk)\n', (4086, 4096), True, 'import numpy as np\n'), ((4113, 4129), 'numpy.outer', 'np.outer', (['pk', 'pk'], {}), '(pk, pk)\n', (4121, 4129), True, 'import numpy as np\n'), ((4353, 4378), 'numpy.eye', 'np.eye', (['old_gradient.size'], {}), '(old_gradient.size)\n', (4359, 4378), True, 'import numpy as np\n'), ((4615, 4650), 'numpy.multiply', 'np.multiply', (['Inactive', 'Hessian_prod'], {}), '(Inactive, Hessian_prod)\n', (4626, 4650), True, 'import numpy as np\n'), ((5835, 5864), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i_1 - mu_i)'], {}), '(mu_i_1 - mu_i)\n', (5849, 5864), True, 'import numpy as np\n'), ((8240, 8254), 'copy.deepcopy', 'deepcopy', (['mu_i'], {}), '(mu_i)\n', (8248, 8254), False, 'from copy import deepcopy\n'), ((8329, 8347), 'copy.deepcopy', 'deepcopy', (['gradient'], {}), '(gradient)\n', (8337, 8347), False, 'from copy import deepcopy\n'), ((8726, 8763), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (8740, 8763), True, 'import numpy as np\n'), ((10280, 10314), 'numpy.multiply', 'np.multiply', (['Inactive', 'Action_on_I'], {}), '(Inactive, Action_on_I)\n', (10291, 10314), True, 'import numpy as np\n'), ((11575, 11604), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i_1 - mu_i)'], {}), '(mu_i_1 - mu_i)\n', (11589, 11604), True, 'import numpy as np\n'), ((13488, 13525), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (13502, 13525), True, 'import numpy as np\n'), ((8128, 8157), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i - mu_ip1)'], {}), '(mu_i - mu_ip1)\n', (8142, 8157), True, 'import numpy as np\n'), ((8160, 8180), 'numpy.linalg.norm', 'np.linalg.norm', (['mu_i'], {}), '(mu_i)\n', (8174, 8180), True, 'import numpy as np\n'), ((9222, 9251), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i_1 - mu_i)'], {}), '(mu_i_1 - mu_i)\n', (9236, 9251), True, 'import numpy as np\n'), ((12964, 12993), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i - mu_ip1)'], {}), '(mu_i - mu_ip1)\n', (12978, 12993), True, 'import numpy as np\n'), ((12996, 13016), 'numpy.linalg.norm', 'np.linalg.norm', (['mu_i'], {}), '(mu_i)\n', (13010, 13016), True, 'import numpy as np\n'), ((13993, 14022), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i_1 - mu_i)'], {}), '(mu_i_1 - mu_i)\n', (14007, 14022), True, 'import numpy as np\n'), ((7474, 7501), 'numpy.dot', 'np.dot', (['direction', 'gradient'], {}), '(direction, gradient)\n', (7480, 7501), True, 'import numpy as np\n'), ((9641, 9652), 'time.time', 'time.time', ([], {}), '()\n', (9650, 9652), False, 'import time\n'), ((12663, 12689), 'numpy.dot', 'np.dot', (['(-deltamu)', 'gradient'], {}), '(-deltamu, gradient)\n', (12669, 12689), True, 'import numpy as np\n'), ((14110, 14147), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (14124, 14147), True, 'import numpy as np\n'), ((14180, 14191), 'time.time', 'time.time', ([], {}), '()\n', (14189, 14191), False, 'import time\n'), ((23472, 23509), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (23486, 23509), True, 'import numpy as np\n'), ((24074, 24111), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (24088, 24111), True, 'import numpy as np\n'), ((9572, 9609), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (9586, 9609), True, 'import numpy as np\n'), ((20581, 20592), 'time.time', 'time.time', ([], {}), '()\n', (20590, 20592), False, 'import time\n'), ((3162, 3191), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_ip1 - mu_i)'], {}), '(mu_ip1 - mu_i)\n', (3176, 3191), True, 'import numpy as np\n'), ((22293, 22304), 'time.time', 'time.time', ([], {}), '()\n', (22302, 22304), False, 'import time\n')] |
from __future__ import print_function
import gdal
import os
import numpy as np
from time import time
import osr
import argparse
import shutil
from scipy import ndimage as ndi
from skimage.filters import gabor_kernel
import multiprocessing as mp
from sklearn.cluster import AffinityPropagation
from sklearn import preprocessing
import pandas as pd
import string
y_block_size = 500
x_block_size = 500
n_bands = 4
null_pix_value = 0
# Prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
n_filters = len(kernels)
def bgr2grey(bgr):
grey = (bgr[0, :, :] * 0.1140 + bgr[1, :, :] * 0.5870 + bgr[2, :, :] * 0.2989)
return grey
def compute_filter_feats(arr, bank):
feats = np.zeros((2 * n_filters), dtype=np.double)
for k, item in enumerate(bank):
filtered = ndi.convolve(arr, item, mode='wrap')
feats[k] = filtered.mean()
feats[k+n_filters] = filtered.var()
return feats
def get_feats(io_arg):
arr, chip_idx = io_arg
arr_ds = gdal.Open(arr)
chip_feats = np.zeros((1, 42))
chip_feats[0][0] = chip_idx[1]
chip_feats[0][1] = chip_idx[0]
chip = arr_ds.ReadAsArray(chip_idx[1], chip_idx[0], 500, 500)
for b in range(0, chip.shape[0]):
chip_feats[0][b + 2] = chip[b, :, :].mean()
chip_feats[0][b + 2 + chip.shape[0]] = chip[b, :, :].std()
chip_feats[0][10:] = compute_filter_feats(bgr2grey(chip), kernels)
return chip_feats
def get_chips(im_ras):
samples = []
arr = gdal.Open(im_ras)
y_size = arr.RasterYSize
x_size = arr.RasterXSize
for y in range(0, y_size, y_block_size):
if y + y_block_size < y_size:
rows = y_block_size
else:
continue
for x in range(0, x_size, x_block_size):
if x + x_block_size < x_size:
cols = x_block_size
block = arr.ReadAsArray(x, y, cols, rows)
if null_pix_value not in block:
samples.append((y, x))
return samples
def mp_feats(arr, chips, processes):
chips = zip([arr] * len(chips), [i for i in chips])
num_proc = processes
pool = mp.Pool(processes=num_proc)
mpresult = []
mpresult.extend(pool.map(get_feats, chips))
pool.close()
pool.join()
return mpresult
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ready')
parser.add_argument('--wd', dest='wd', required=True,
help='Working directory to store temp raster')
parser.add_argument('--list', dest='filelist', required=True,
help='Path to text file listing images to process')
parser.add_argument('--dst', dest='dst_folder', required=True,
help='Top level folder for output')
parser.add_argument('--fn', dest='out_fn', required=True,
help='Prefix for output statistics files')
parser.add_argument('--n_proc', dest='n_proc', required=True, type=int,
help='Number of worker processes to use')
parser.add_argument('--pref', dest='pref', required=False, type=float,
help='AP preference value to be used instead /'
'of default')
args = parser.parse_args()
wd = args.wd
filelist = args.filelist
dst_folder = args.dst_folder
out_fn = args.out_fn
n_proc = args.n_proc
pref = args.pref
if pref:
print("Preference set to {}".format(pref))
else:
print("Default preference will be used")
# Create output folder if it doesn't already exist
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
scene_list = [line.rstrip('\n\r', ) for line in open(filelist)]
for i in range(0, len(scene_list)):
scene = scene_list[i]
start_scene = time()
im_basename = os.path.splitext(os.path.basename(scene))[0]
im_folder = "ex_" + im_basename
out_path = os.path.join(dst_folder, im_folder)
if not os.path.exists(out_path):
os.mkdir(out_path)
os.chdir(out_path)
temp_dst = os.path.join(wd, os.path.basename(scene))
shutil.copy(scene, temp_dst)
src_ds = gdal.Open(temp_dst)
# Get raster parameters from sat image
x_cell_size = src_ds.GetGeoTransform()[1]
y_cell_size = src_ds.GetGeoTransform()[5]
skew = 0
x_min = src_ds.GetGeoTransform()[0]
y_max = src_ds.GetGeoTransform()[3]
wkt = src_ds.GetProjection()
srs = osr.SpatialReference()
srs.ImportFromWkt(wkt)
chips = get_chips(scene)
start_blocks = time()
print("Beginning scene {} of {}".format((i + 1), (len(scene_list))))
result = mp_feats(temp_dst, chips, n_proc)
minutes = ((time() - start_blocks) / 60)
print("Scene scan finished in {} minutes".format(minutes))
# Create numpy array to hold samples
result_array = np.zeros((len(result), 42))
for n in range(0, len(result)):
result_array[n][:] = result[n]
df = pd.DataFrame(data=result_array, columns=['x_origin', 'y_origin', 'blue_mean', 'green_mean',
'red_mean', 'nir_mean', 'blue_std', 'green_std',
'red_std', 'nir_std', 'f1_mean', 'f2_mean', 'f3_mean',
'f4_mean', 'f5_mean', 'f6_mean', 'f7_mean', 'f8_mean',
'f9_mean', 'f10_mean', 'f11_mean', 'f12_mean', 'f13_mean',
'f14_mean', 'f5_mean', 'f16_mean', 'f1_std', 'f2_std',
'f3_std', 'f4_std', 'f5_std', 'f6_std', 'f7_std',
'f8_std', 'f9_std', 'f10_std', 'f11_std', 'f12_std',
'f13_std', 'f14_std', 'f15_std', 'f16_std'])
df.to_csv("{}_ex_{}.csv".format(out_fn, im_basename), index=False,
header=True)
samples = result_array[:, 2:]
sample_idxs = result_array[:, :2]
# Initialize array to hold scaled samples
scaled_samples = np.zeros(samples.shape)
# Scale each feature type separately
scaled_samples[:, 0:4] = preprocessing.scale(samples[:, 0:4])
scaled_samples[:, 4:8] = preprocessing.scale(samples[:, 4:8])
scaled_samples[:, 8:24] = preprocessing.scale(samples[:, 8:24])
scaled_samples[:, 24:] = preprocessing.scale(samples[:, 24:])
if pref:
ap = AffinityPropagation(max_iter=10000, convergence_iter=100,
preference=pref,
affinity="euclidean").fit(scaled_samples)
else:
ap = AffinityPropagation(max_iter=10000, convergence_iter=100,
affinity="euclidean").fit(scaled_samples)
cluster_center_indices = ap.cluster_centers_indices_
chip_count = 1
for tile in cluster_center_indices:
subset = sample_idxs[tile]
path = os.path.join(out_path, im_basename + "_" + str(chip_count).zfill(4) + '.tif')
# Get all raster bands for subset
band = src_ds.GetRasterBand(1)
red = band.ReadAsArray(int(subset[0]), int(subset[1]), x_block_size, y_block_size)
band = src_ds.GetRasterBand(2)
green = band.ReadAsArray(int(subset[0]), int(subset[1]), win_xsize=500, win_ysize=500)
band = src_ds.GetRasterBand(3)
blue = band.ReadAsArray(int(subset[0]), int(subset[1]), win_xsize=500, win_ysize=500)
band = src_ds.GetRasterBand(4)
nir = band.ReadAsArray(int(subset[0]), int(subset[1]), win_xsize=500, win_ysize=500)
# Create geotransform information for destination raster
x_origin = x_min + (subset[0] * x_cell_size)
y_origin = y_max + (subset[1] * y_cell_size)
new_transform = (x_origin, x_cell_size, skew, y_origin, skew, y_cell_size)
# Create destination dataset
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(path, x_block_size,
y_block_size, 4,
gdal.GDT_UInt16)
# Write subsetted bands to destination dataset
dst_ds.GetRasterBand(1).WriteArray(red)
dst_ds.GetRasterBand(2).WriteArray(green)
dst_ds.GetRasterBand(3).WriteArray(blue)
dst_ds.GetRasterBand(4).WriteArray(nir)
# Set geotransform and projection info
dst_ds.SetGeoTransform(new_transform)
dst_ds.SetProjection(srs.ExportToWkt())
# Close output
dst_ds = None
chip_count += 1
src_ds = None
os.remove(temp_dst)
minutes = ((time() - start_scene) / 60)
print("Scene finished in {} minutes".format(minutes))
| [
"gdal.GetDriverByName",
"os.remove",
"os.path.exists",
"argparse.ArgumentParser",
"os.mkdir",
"pandas.DataFrame",
"osr.SpatialReference",
"sklearn.cluster.AffinityPropagation",
"scipy.ndimage.convolve",
"shutil.copy",
"time.time",
"sklearn.preprocessing.scale",
"gdal.Open",
"os.makedirs",
... | [((1002, 1042), 'numpy.zeros', 'np.zeros', (['(2 * n_filters)'], {'dtype': 'np.double'}), '(2 * n_filters, dtype=np.double)\n', (1010, 1042), True, 'import numpy as np\n'), ((1308, 1322), 'gdal.Open', 'gdal.Open', (['arr'], {}), '(arr)\n', (1317, 1322), False, 'import gdal\n'), ((1341, 1358), 'numpy.zeros', 'np.zeros', (['(1, 42)'], {}), '((1, 42))\n', (1349, 1358), True, 'import numpy as np\n'), ((1820, 1837), 'gdal.Open', 'gdal.Open', (['im_ras'], {}), '(im_ras)\n', (1829, 1837), False, 'import gdal\n'), ((2494, 2521), 'multiprocessing.Pool', 'mp.Pool', ([], {'processes': 'num_proc'}), '(processes=num_proc)\n', (2501, 2521), True, 'import multiprocessing as mp\n'), ((2696, 2740), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Ready"""'}), "(description='Ready')\n", (2719, 2740), False, 'import argparse\n'), ((1102, 1138), 'scipy.ndimage.convolve', 'ndi.convolve', (['arr', 'item'], {'mode': '"""wrap"""'}), "(arr, item, mode='wrap')\n", (1114, 1138), True, 'from scipy import ndimage as ndi\n'), ((4002, 4028), 'os.path.exists', 'os.path.exists', (['dst_folder'], {}), '(dst_folder)\n', (4016, 4028), False, 'import os\n'), ((4039, 4062), 'os.makedirs', 'os.makedirs', (['dst_folder'], {}), '(dst_folder)\n', (4050, 4062), False, 'import os\n'), ((4231, 4237), 'time.time', 'time', ([], {}), '()\n', (4235, 4237), False, 'from time import time\n'), ((4367, 4402), 'os.path.join', 'os.path.join', (['dst_folder', 'im_folder'], {}), '(dst_folder, im_folder)\n', (4379, 4402), False, 'import os\n'), ((4486, 4504), 'os.chdir', 'os.chdir', (['out_path'], {}), '(out_path)\n', (4494, 4504), False, 'import os\n'), ((4578, 4606), 'shutil.copy', 'shutil.copy', (['scene', 'temp_dst'], {}), '(scene, temp_dst)\n', (4589, 4606), False, 'import shutil\n'), ((4625, 4644), 'gdal.Open', 'gdal.Open', (['temp_dst'], {}), '(temp_dst)\n', (4634, 4644), False, 'import gdal\n'), ((4958, 4980), 'osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (4978, 4980), False, 'import osr\n'), ((5075, 5081), 'time.time', 'time', ([], {}), '()\n', (5079, 5081), False, 'from time import time\n'), ((5537, 6077), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'result_array', 'columns': "['x_origin', 'y_origin', 'blue_mean', 'green_mean', 'red_mean', 'nir_mean',\n 'blue_std', 'green_std', 'red_std', 'nir_std', 'f1_mean', 'f2_mean',\n 'f3_mean', 'f4_mean', 'f5_mean', 'f6_mean', 'f7_mean', 'f8_mean',\n 'f9_mean', 'f10_mean', 'f11_mean', 'f12_mean', 'f13_mean', 'f14_mean',\n 'f5_mean', 'f16_mean', 'f1_std', 'f2_std', 'f3_std', 'f4_std', 'f5_std',\n 'f6_std', 'f7_std', 'f8_std', 'f9_std', 'f10_std', 'f11_std', 'f12_std',\n 'f13_std', 'f14_std', 'f15_std', 'f16_std']"}), "(data=result_array, columns=['x_origin', 'y_origin',\n 'blue_mean', 'green_mean', 'red_mean', 'nir_mean', 'blue_std',\n 'green_std', 'red_std', 'nir_std', 'f1_mean', 'f2_mean', 'f3_mean',\n 'f4_mean', 'f5_mean', 'f6_mean', 'f7_mean', 'f8_mean', 'f9_mean',\n 'f10_mean', 'f11_mean', 'f12_mean', 'f13_mean', 'f14_mean', 'f5_mean',\n 'f16_mean', 'f1_std', 'f2_std', 'f3_std', 'f4_std', 'f5_std', 'f6_std',\n 'f7_std', 'f8_std', 'f9_std', 'f10_std', 'f11_std', 'f12_std',\n 'f13_std', 'f14_std', 'f15_std', 'f16_std'])\n", (5549, 6077), True, 'import pandas as pd\n'), ((6765, 6788), 'numpy.zeros', 'np.zeros', (['samples.shape'], {}), '(samples.shape)\n', (6773, 6788), True, 'import numpy as np\n'), ((6871, 6907), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['samples[:, 0:4]'], {}), '(samples[:, 0:4])\n', (6890, 6907), False, 'from sklearn import preprocessing\n'), ((6942, 6978), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['samples[:, 4:8]'], {}), '(samples[:, 4:8])\n', (6961, 6978), False, 'from sklearn import preprocessing\n'), ((7014, 7051), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['samples[:, 8:24]'], {}), '(samples[:, 8:24])\n', (7033, 7051), False, 'from sklearn import preprocessing\n'), ((7086, 7122), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['samples[:, 24:]'], {}), '(samples[:, 24:])\n', (7105, 7122), False, 'from sklearn import preprocessing\n'), ((9507, 9526), 'os.remove', 'os.remove', (['temp_dst'], {}), '(temp_dst)\n', (9516, 9526), False, 'import os\n'), ((4419, 4443), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (4433, 4443), False, 'import os\n'), ((4458, 4476), 'os.mkdir', 'os.mkdir', (['out_path'], {}), '(out_path)\n', (4466, 4476), False, 'import os\n'), ((4544, 4567), 'os.path.basename', 'os.path.basename', (['scene'], {}), '(scene)\n', (4560, 4567), False, 'import os\n'), ((8760, 8789), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (8780, 8789), False, 'import gdal\n'), ((649, 715), 'skimage.filters.gabor_kernel', 'gabor_kernel', (['frequency'], {'theta': 'theta', 'sigma_x': 'sigma', 'sigma_y': 'sigma'}), '(frequency, theta=theta, sigma_x=sigma, sigma_y=sigma)\n', (661, 715), False, 'from skimage.filters import gabor_kernel\n'), ((4278, 4301), 'os.path.basename', 'os.path.basename', (['scene'], {}), '(scene)\n', (4294, 4301), False, 'import os\n'), ((5237, 5243), 'time.time', 'time', ([], {}), '()\n', (5241, 5243), False, 'from time import time\n'), ((9550, 9556), 'time.time', 'time', ([], {}), '()\n', (9554, 9556), False, 'from time import time\n'), ((7161, 7261), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'max_iter': '(10000)', 'convergence_iter': '(100)', 'preference': 'pref', 'affinity': '"""euclidean"""'}), "(max_iter=10000, convergence_iter=100, preference=pref,\n affinity='euclidean')\n", (7180, 7261), False, 'from sklearn.cluster import AffinityPropagation\n'), ((7387, 7466), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {'max_iter': '(10000)', 'convergence_iter': '(100)', 'affinity': '"""euclidean"""'}), "(max_iter=10000, convergence_iter=100, affinity='euclidean')\n", (7406, 7466), False, 'from sklearn.cluster import AffinityPropagation\n')] |
import io
import os
import time
from collections import Counter
from tempfile import NamedTemporaryFile
import cv2
import numpy as np
import pyautogui
from gtts import gTTS
from mpg123 import Mpg123, Out123
def get_screen_image():
with NamedTemporaryFile() as f:
pil_image = pyautogui.screenshot(imageFilename=f.name)
opencvImage = cv2.cvtColor(np.array(pil_image), cv2.COLOR_RGB2BGR)
return opencvImage
def extract_qr_codes(image):
qrCodeDetector = cv2.QRCodeDetector()
res = qrCodeDetector.detectAndDecodeMulti(image)
return Counter(res[1])
def audio_describe(codes):
text = ''
for code, count in codes.items():
if code == 'thumbs_up':
text += f'{count} users are presenting thumbs-up, '
elif code == 'smiling':
text += f'{count} users are smiling, '
if text == '':
return
with io.BytesIO() as f:
gTTS(text=text, lang='en', slow=False).write_to_fp(f)
f.seek(0)
mp3 = Mpg123()
mp3.feed(f.read())
out = Out123()
for frame in mp3.iter_frames(out.start):
out.play(frame)
if __name__ == '__main__':
while True:
# Sanity check using a pre-made image.
# Comment out the get_screen_image() call to test this.
# image = cv2.imread('multi.png')
s = time.perf_counter()
image = get_screen_image()
print(f'Screenshot time: {time.perf_counter() - s:0.2f} secs')
s = time.perf_counter()
codes_and_counts = extract_qr_codes(image)
print(f'QR extraction time: {time.perf_counter() - s:0.2f} secs')
if len(codes_and_counts) > 0:
s = time.perf_counter()
audio_describe(codes_and_counts)
print(f'Audio time: {time.perf_counter() - s:0.2f} secs')
else:
print('No QR codes detected')
| [
"pyautogui.screenshot",
"io.BytesIO",
"time.perf_counter",
"collections.Counter",
"numpy.array",
"mpg123.Mpg123",
"gtts.gTTS",
"tempfile.NamedTemporaryFile",
"mpg123.Out123",
"cv2.QRCodeDetector"
] | [((480, 500), 'cv2.QRCodeDetector', 'cv2.QRCodeDetector', ([], {}), '()\n', (498, 500), False, 'import cv2\n'), ((565, 580), 'collections.Counter', 'Counter', (['res[1]'], {}), '(res[1])\n', (572, 580), False, 'from collections import Counter\n'), ((243, 263), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {}), '()\n', (261, 263), False, 'from tempfile import NamedTemporaryFile\n'), ((290, 332), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {'imageFilename': 'f.name'}), '(imageFilename=f.name)\n', (310, 332), False, 'import pyautogui\n'), ((365, 384), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (373, 384), True, 'import numpy as np\n'), ((886, 898), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (896, 898), False, 'import io\n'), ((999, 1007), 'mpg123.Mpg123', 'Mpg123', ([], {}), '()\n', (1005, 1007), False, 'from mpg123 import Mpg123, Out123\n'), ((1049, 1057), 'mpg123.Out123', 'Out123', ([], {}), '()\n', (1055, 1057), False, 'from mpg123 import Mpg123, Out123\n'), ((1346, 1365), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1363, 1365), False, 'import time\n'), ((1485, 1504), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1502, 1504), False, 'import time\n'), ((1684, 1703), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1701, 1703), False, 'import time\n'), ((913, 951), 'gtts.gTTS', 'gTTS', ([], {'text': 'text', 'lang': '"""en"""', 'slow': '(False)'}), "(text=text, lang='en', slow=False)\n", (917, 951), False, 'from gtts import gTTS\n'), ((1435, 1454), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1452, 1454), False, 'import time\n'), ((1593, 1612), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1610, 1612), False, 'import time\n'), ((1782, 1801), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1799, 1801), False, 'import time\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Provides support utilities for manipulating cube units."""
import iris
import numpy as np
from cf_units import Unit
from iris.exceptions import CoordinateNotFoundError
from improver.units import DEFAULT_UNITS
def enforce_units_and_dtypes(cubes, coords=None, enforce=True):
"""
Function to check the units and datatypes of cube diagnostics and
coordinates against the manifest in improver.units, with option to
enforce or fail for non-conforming data.
Args:
cubes (iris.cube.Cube or iris.cube.CubeList):
Cube or list of cubes to be checked
coords (list or None):
List of coordinate names to check. If None, checks all
coordinates present on the input cubes.
enforce (bool):
If True, this function returns a list of conformant cubes.
If False, a ValueError is thrown if the cubes do not conform.
Raises:
ValueError: if "enforce=False" and the input cubes do not conform
to the datatypes and units standard.
Returns:
new_cubes (iris.cube.CubeList):
New cubelist with conformant datatypes and units
"""
# convert input to CubeList
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
# create a list of copied cubes to modify
new_cubes = [cube.copy() for cube in cubes]
# construct a list of objects (cubes and coordinates) to be checked
object_list = []
for cube in new_cubes:
object_list.append(cube)
if coords is not None:
for coord in coords:
try:
object_list.append(cube.coord(coord))
except CoordinateNotFoundError:
pass
else:
object_list.extend(cube.coords())
error_string = ''
for item in object_list:
units, dtype = _get_required_units_and_dtype(item.name())
if not enforce:
# if not enforcing, throw an error if non-compliant
conforms = _check_units_and_dtype(item, units, dtype)
if not conforms:
msg = ('{} with units {} and datatype {} does not conform'
' to expected standard (units {}, datatype {})\n')
msg = msg.format(item.name(), item.units, item.dtype,
units, dtype)
error_string += msg
continue
# attempt to convert units and record any errors
try:
item.convert_units(units)
except ValueError:
msg = '{} units cannot be converted to "{}"\n'
error_string += msg
# attempt to convert datatype and record any errors
try:
if isinstance(item, iris.cube.Cube):
_convert_diagnostic_dtype(item, dtype)
else:
_convert_coordinate_dtype(item, dtype)
except ValueError as cause:
error_string += cause + '\n'
# if any errors were raised, re-raise with all messages here
if error_string:
msg = 'The following errors were raised during processing:\n'
raise ValueError(msg+error_string)
return iris.cube.CubeList(new_cubes)
def _find_dict_key(input_key):
"""
If input_key is not in the DEFAULT_UNITS dict, test for substrings of
input_key that are available. This allows, for example, use of
"temperature" and "probability" in the DEFAULT_UNITS dict to avoid multiple
duplicate entries.
Args:
input_key (str):
Key that didn't return an entry in DEFAULT_UNITS
Returns:
str: New key to identify required entry
Raises:
KeyError: If the function finds either zero or multiple matches
"""
if "probability" in input_key:
# this avoids duplicate results from key matching below
return "probability"
matching_keys = []
for key in DEFAULT_UNITS.keys():
if key in input_key:
matching_keys.append(key)
if len(matching_keys) != 1:
msg = ("Name '{}' is not uniquely defined in units.py; "
"matching keys: {}")
raise KeyError(msg.format(input_key, matching_keys))
return matching_keys[0]
def _get_required_units_and_dtype(key):
"""
Read DEFAULT_UNITS dict and return the required units and datatypes
for the given coordinate / diagnostic name.
Args:
key (str):
String name of coordinate or diagnostic to be checked
Returns:
units, dtype (tuple):
Tuple with string and type object identifying the required units
and datatype
Raises:
KeyError:
If the input_key (or suitable substring) is not present in
DEFAULT_UNITS.
"""
try:
unit = DEFAULT_UNITS[key]["unit"]
except KeyError:
# hold the error and check for valid substrings
key = _find_dict_key(key)
unit = DEFAULT_UNITS[key]["unit"]
try:
dtype = DEFAULT_UNITS[key]["dtype"]
except KeyError:
dtype = np.float32
return unit, dtype
def _check_units_and_dtype(obj, units, dtype):
"""
Check whether the units and datatype of the input object conform
to the standard given.
Args:
obj (iris.cube.Cube or iris.coords.Coord):
Cube or coordinate to be checked
units (str):
Required units
dtype (type):
Required datatype
Returns:
bool:
True if object conforms; False if not
"""
if Unit(obj.units) != Unit(units):
return False
if obj.dtype != dtype:
return False
return True
def _convert_coordinate_dtype(coord, dtype):
"""
Convert a coordinate to the required units and datatype.
Args:
coord (iris.coords.Coord):
Coordinate instance to be modified in place
dtype (type):
Required datatype
"""
if check_precision_loss(dtype, coord.points):
coord.points = coord.points.astype(dtype)
else:
msg = ('Data type of coordinate "{}" could not be'
' enforced without losing significant precision.')
raise ValueError(msg.format(coord.name()))
def _convert_diagnostic_dtype(cube, dtype):
"""
Convert cube data to the required units and datatype.
Args:
cube (iris.cube.Cube):
Cube to be modified in place
dtype (type):
Required datatype
"""
# if units conversion succeeded, convert datatype
if check_precision_loss(dtype, cube.data):
cube.data = cube.data.astype(dtype)
else:
msg = ('Data type of diagnostic "{}" could not be'
' enforced without losing significant precision.')
raise ValueError(msg.format(cube.name()))
def check_precision_loss(dtype, data, precision=5):
"""
This function checks that when converting data types there is not a loss
of significant information. Float to integer conversion, and integer to
integer conversion are covered by this function. Float to float conversion
may be lossy if changing from 64 bit to 32 bit floats, but changes at this
precision are not captured here by design.
If the conversion is lossless (to the defined precision) this function
returns True. If there is loss, the function returns False.
.. See the documentation for examples of where such loss is important.
.. include:: extended_documentation/utilities/cube_units/
check_precision_loss_examples.rst
Args:
dtype (dtype):
The data type to which the data is being converted.
data (numpy.ndarray):
The data that is to be checked for precision loss under data type
conversion.
precision (int):
The number of decimal places beyond which differences are ignored.
Returns:
bool:
True if the conversion is lossless to the given precision.
False if the conversion if lossy to the given precision.
"""
if not np.issubdtype(dtype, np.integer):
return True
if np.issubdtype(data.dtype, np.integer):
values = dtype(data)
integers = data
else:
values = np.round(data, precision)
_, integers = np.modf(values)
return (values == integers).all()
| [
"iris.cube.CubeList",
"cf_units.Unit",
"improver.units.DEFAULT_UNITS.keys",
"numpy.issubdtype",
"numpy.modf",
"numpy.round"
] | [((4842, 4871), 'iris.cube.CubeList', 'iris.cube.CubeList', (['new_cubes'], {}), '(new_cubes)\n', (4860, 4871), False, 'import iris\n'), ((5578, 5598), 'improver.units.DEFAULT_UNITS.keys', 'DEFAULT_UNITS.keys', ([], {}), '()\n', (5596, 5598), False, 'from improver.units import DEFAULT_UNITS\n'), ((9808, 9845), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.integer'], {}), '(data.dtype, np.integer)\n', (9821, 9845), True, 'import numpy as np\n'), ((7220, 7235), 'cf_units.Unit', 'Unit', (['obj.units'], {}), '(obj.units)\n', (7224, 7235), False, 'from cf_units import Unit\n'), ((7239, 7250), 'cf_units.Unit', 'Unit', (['units'], {}), '(units)\n', (7243, 7250), False, 'from cf_units import Unit\n'), ((9747, 9779), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (9760, 9779), True, 'import numpy as np\n'), ((9927, 9952), 'numpy.round', 'np.round', (['data', 'precision'], {}), '(data, precision)\n', (9935, 9952), True, 'import numpy as np\n'), ((9975, 9990), 'numpy.modf', 'np.modf', (['values'], {}), '(values)\n', (9982, 9990), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import mapping as mp
from . import strategy
def get_relative_to_expiry_instrument_weights(dates, root_generics, expiries,
offsets, all_monthly=False,
holidays=None):
"""
Generate instrument weights for each root generic where the position is
rolled entirely in one day based on an offset from the earlier of the
contracts First Notice Date and Last Trade Date.
Parameters
----------
dates: Iterable
Iterable of pandas.Timestamps, dates to generate instrument weights
for.
root_generics: dict
Dictionary with key as root generic and value as list of future
generics, e.g. {"CL": ["CL1", "CL2"]}
expiries: pd.DataFrame
A pd.DataFrame with columns ["contract", "first_notice",
"last_trade"] where "first_notice" and "last_trade" must be
parseable to datetimes with format %Y-%m-%d and "contract" must be
a string in the form YYYYNNC representing the contract name, e.g.
"2007ESU".
offsets: int or dict
Number of business days to roll relative to earlier of the
instruments First Notice and Last Trade date. If int is given use
the same number for all futures, if dict is given keys must cover
all root generics and contain an integer for each.
all_monthly: boolean
Whether to roll each contract individually based on the offset from
the earlier of its First Notice and Last Trade date or to roll all
contracts with the same month code based on the earliest date.
holidays: list
list of timezone aware pd.Timestamps used for holidays when calculating
relative date roll logic.
Returns
-------
A dictionary of DataFrames of instrument weights indexed by root
generic, see mapper.mappings.roller()
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> dts = pd.date_range("2018-01-01", "2018-02-01", freq="B")
>>> rg = {"CL": ["CL1"], "ES": ["ES1"]}
>>> exp = pd.DataFrame(
... [["2018CLF", "2018-01-28", "2018-01-27"],
... ["2018CLG", "2018-02-28", "2018-02-27"],
... ["2018ESF", "2018-01-20", "2018-01-21"],
... ["2018ESG", "2018-02-20", "2018-02-21"]],
... columns=["contract", "first_notice", "last_trade"]
... )
>>> offsets = -5
>>> rebal.get_relative_to_expiry_instrument_weights(dts, rg, exp, offsets)
"""
close_by = _close_by_dates(expiries, all_monthly)
cntrct_close_by_dates = {}
for grp, dts in close_by.groupby("root_generic"):
cntrct_close_by_dates[grp] = dts.loc[:, "close_by"]
wts = {}
for root in root_generics:
gnrcs = root_generics[root]
cols = pd.MultiIndex.from_product([gnrcs, ['front', 'back']])
if not isinstance(offsets, int):
offset = offsets[root]
else:
offset = offsets
idx = [offset, offset + 1]
trans = np.tile(np.array([[1.0, 0.0], [0.0, 1.0]]), len(gnrcs))
transition = pd.DataFrame(trans, index=idx,
columns=cols)
wts[root] = mp.mappings.roller(dates,
cntrct_close_by_dates[root],
mp.mappings.static_transition,
transition=transition,
holidays=holidays)
return wts
def get_relative_to_expiry_rebalance_dates(start_date, end_date, expiries,
offsets, all_monthly=False,
holidays=None):
"""
Rebalance days for trading strategy. These are defined as the offset
given from the earlier of the instruments First Notice and Last Trade
date. If all_monthly=True then then roll all monthly contracts
together based on earliest date for this set of contracts. The
start_date or if this is not a business day the following business day
is also included in the rebalance days.
Parameters
----------
start_date: pandas.Timestamp
Date to generate rebalance dates starting from
end_date: pandas.Timestamp
Date to generate rebalance dates until
expiries: pd.DataFrame
A pd.DataFrame with columns ["contract", "first_notice",
"last_trade"] where "first_notice" and "last_trade" must be
parseable to datetimes with format %Y-%m-%d and "contract" must be
a string in the form YYYYNNC representing the contract name, e.g.
"2007ESU".
offsets: int or dict
Number of business days to roll relative to earlier of the
instruments First Notice and Last Trade date. If int is given use
the same number for all futures, if dict is given keys must cover
all root generics and contain an integer for each.
all_monthly: boolean
Whether to roll each contract individually based on the offset from
the earlier of its First Notice and Last Trade date or to roll all
contracts with the same month code based on the earliest date.
holidays: list
list of timezone aware pd.Timestamps used for holidays when calculating
relative date roll logic.
Returns
-------
pandas.DatetimeIndex
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> sd = pd.Timestamp("2018-01-01")
>>> ed = pd.Timestamp("2018-02-01")
>>> exp = pd.DataFrame(
... [["2018CLF", "2018-01-28", "2018-01-27"],
... ["2018CLG", "2018-02-28", "2018-02-27"],
... ["2018ESF", "2018-01-20", "2018-01-21"],
... ["2018ESG", "2018-02-20", "2018-02-21"]],
... columns=["contract", "first_notice", "last_trade"]
... )
>>> offsets = -5
>>> rebal.get_relative_to_expiry_rebalance_dates(sd, ed, exp, offsets)
"""
if not holidays:
holidays = []
close_by = _close_by_dates(expiries, all_monthly)
gnrc_close_by = close_by.groupby(["root_generic"])
rebal_dates = []
for root, close_by_dates in gnrc_close_by:
if not isinstance(offsets, int):
offset = offsets[root]
else:
offset = offsets
dates = (
close_by_dates.loc[:, "close_by"].values.astype('datetime64[D]')
)
dates = np.busday_offset(dates, offsets=offset, roll='preceding',
holidays=holidays)
rebal_dates.append(dates)
rebal_dates = np.concatenate(rebal_dates)
rebal_dates = pd.DatetimeIndex(rebal_dates).unique().sort_values()
rebal_dates = rebal_dates[rebal_dates >= start_date]
rebal_dates = rebal_dates[rebal_dates <= end_date]
first_date = np.busday_offset(start_date.date(), 0,
roll="following", holidays=holidays)
rebal_dates = rebal_dates.union([first_date])
return rebal_dates
def _close_by_dates(expiries, all_monthly):
# hacky, should refactor such that not using private method
# _validate_expiries
expiries = strategy.Exposures._validate_expiries(expiries)
close_by = expiries.set_index("contract")
close_by.loc[:, "close_by"] = (
close_by[["first_notice", "last_trade"]].min(axis=1)
)
close_by = close_by.sort_values("close_by")
if all_monthly:
close_by = (
close_by.join(close_by.groupby(["year", "month"]).first(),
on=["year", "month"], rsuffix="_earliest_cntrct")
)
close_by = close_by[["root_generic", "close_by_earliest_cntrct"]]
close_by.columns = ["root_generic", "close_by"]
close_by = close_by.loc[:, ["root_generic", "close_by"]]
return close_by
def get_fixed_frequency_rebalance_dates(start_date, end_date, frequency,
offset):
"""
Generate reblance dates according to a fixed frequency, e.g. Wednesday of
every week.
Parameters
----------
start_date: pandas.Timestamp
Date to generate rebalance dates starting from
end_date: pandas.Timestamp
Date to generate rebalance dates until
frequency: string
Fixed frequency for reblance, supports {"weekly", "monthly"}
offset: int or list
Relative offsets based on the frequency. E.g. [0, 1] for weekly
gives the first two days of the week, [-5] for monthly gives the
fifth last day of the month.
Returns
-------
pandas.DatetimeIndex
Examples
--------
>>> import strategy.rebalance as rebal
>>> import pandas as pd
>>> sd = pd.Timestamp("2018-01-01")
>>> ed = pd.Timestamp("2018-02-01")
>>> freq = "weekly"
>>> offsets = 2
>>> rebal.get_fixed_frequency_rebalance_dates(sd, ed, freq, offsets)
"""
if frequency == "monthly":
groups = ["year", "month"]
sd = start_date - pd.offsets.MonthBegin(1)
ed = end_date + pd.offsets.MonthEnd(1)
dts = pd.date_range(start=sd, end=ed, freq="B")
dts = pd.DataFrame({"date": dts, "month": dts.month,
"year": dts.year})
elif frequency == "weekly":
groups = ["weekofyear"]
sd = start_date - pd.Timedelta(start_date.dayofweek, unit='D')
ed = end_date + pd.Timedelta(6 - end_date.dayofweek, unit='D')
dts = pd.date_range(start=sd, end=ed, freq="B")
dts = pd.DataFrame({"date": dts, "weekofyear": dts.weekofyear})
dts = dts.groupby(groups).apply(lambda x: x.iloc[offset])
dts = pd.DatetimeIndex(dts.loc[:, "date"].reset_index(drop=True))
dts = dts[(dts > start_date) & (dts <= end_date)]
dts = pd.DatetimeIndex([start_date]).append(dts)
return dts
| [
"pandas.MultiIndex.from_product",
"mapping.mappings.roller",
"numpy.busday_offset",
"pandas.DatetimeIndex",
"pandas.Timedelta",
"numpy.array",
"pandas.offsets.MonthBegin",
"numpy.concatenate",
"pandas.DataFrame",
"pandas.offsets.MonthEnd",
"pandas.date_range"
] | [((6720, 6747), 'numpy.concatenate', 'np.concatenate', (['rebal_dates'], {}), '(rebal_dates)\n', (6734, 6747), True, 'import numpy as np\n'), ((2891, 2945), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[gnrcs, ['front', 'back']]"], {}), "([gnrcs, ['front', 'back']])\n", (2917, 2945), True, 'import pandas as pd\n'), ((3193, 3237), 'pandas.DataFrame', 'pd.DataFrame', (['trans'], {'index': 'idx', 'columns': 'cols'}), '(trans, index=idx, columns=cols)\n', (3205, 3237), True, 'import pandas as pd\n'), ((3292, 3424), 'mapping.mappings.roller', 'mp.mappings.roller', (['dates', 'cntrct_close_by_dates[root]', 'mp.mappings.static_transition'], {'transition': 'transition', 'holidays': 'holidays'}), '(dates, cntrct_close_by_dates[root], mp.mappings.\n static_transition, transition=transition, holidays=holidays)\n', (3310, 3424), True, 'import mapping as mp\n'), ((6557, 6633), 'numpy.busday_offset', 'np.busday_offset', (['dates'], {'offsets': 'offset', 'roll': '"""preceding"""', 'holidays': 'holidays'}), "(dates, offsets=offset, roll='preceding', holidays=holidays)\n", (6573, 6633), True, 'import numpy as np\n'), ((9260, 9301), 'pandas.date_range', 'pd.date_range', ([], {'start': 'sd', 'end': 'ed', 'freq': '"""B"""'}), "(start=sd, end=ed, freq='B')\n", (9273, 9301), True, 'import pandas as pd\n'), ((9316, 9381), 'pandas.DataFrame', 'pd.DataFrame', (["{'date': dts, 'month': dts.month, 'year': dts.year}"], {}), "({'date': dts, 'month': dts.month, 'year': dts.year})\n", (9328, 9381), True, 'import pandas as pd\n'), ((3124, 3158), 'numpy.array', 'np.array', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (3132, 3158), True, 'import numpy as np\n'), ((9174, 9198), 'pandas.offsets.MonthBegin', 'pd.offsets.MonthBegin', (['(1)'], {}), '(1)\n', (9195, 9198), True, 'import pandas as pd\n'), ((9223, 9245), 'pandas.offsets.MonthEnd', 'pd.offsets.MonthEnd', (['(1)'], {}), '(1)\n', (9242, 9245), True, 'import pandas as pd\n'), ((9629, 9670), 'pandas.date_range', 'pd.date_range', ([], {'start': 'sd', 'end': 'ed', 'freq': '"""B"""'}), "(start=sd, end=ed, freq='B')\n", (9642, 9670), True, 'import pandas as pd\n'), ((9685, 9742), 'pandas.DataFrame', 'pd.DataFrame', (["{'date': dts, 'weekofyear': dts.weekofyear}"], {}), "({'date': dts, 'weekofyear': dts.weekofyear})\n", (9697, 9742), True, 'import pandas as pd\n'), ((9940, 9970), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['[start_date]'], {}), '([start_date])\n', (9956, 9970), True, 'import pandas as pd\n'), ((9499, 9543), 'pandas.Timedelta', 'pd.Timedelta', (['start_date.dayofweek'], {'unit': '"""D"""'}), "(start_date.dayofweek, unit='D')\n", (9511, 9543), True, 'import pandas as pd\n'), ((9568, 9614), 'pandas.Timedelta', 'pd.Timedelta', (['(6 - end_date.dayofweek)'], {'unit': '"""D"""'}), "(6 - end_date.dayofweek, unit='D')\n", (9580, 9614), True, 'import pandas as pd\n'), ((6766, 6795), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['rebal_dates'], {}), '(rebal_dates)\n', (6782, 6795), True, 'import pandas as pd\n')] |
from __future__ import print_function
import time
import numpy as np
from scipy.special import gammaln, psi
from six.moves import xrange
from .utils import write_top_words
from .formatted_logger import formatted_logger
eps = 1e-20
logger = formatted_logger('RelationalTopicModel', 'info')
class RelationalTopicModel:
""" implementation of relational topic model by Chang and Blei (2009)
I implemented the exponential link probability function in here
Attributes
----------
eta: ndarray, shape (n_topic)
coefficient of exponential function
rho: int
pseudo number of negative example
"""
def __init__(self, n_topic, n_doc, n_voca, alpha=0.1, rho=1000, **kwargs):
self.n_doc = n_doc
self.n_topic = n_topic
self.n_voca = n_voca
self.alpha = alpha
self.gamma = np.random.gamma(100., 1. / 100, [self.n_doc, self.n_topic])
self.beta = np.random.dirichlet([5] * self.n_voca, self.n_topic)
self.nu = 0
self.eta = np.random.normal(0., 1, self.n_topic)
self.phi = list()
self.pi = np.zeros([self.n_doc, self.n_topic])
self.rho = rho
self.verbose = kwargs.pop('verbose', True)
logger.info('Initialize RTM: num_voca:%d, num_topic:%d, num_doc:%d' % (self.n_voca, self.n_topic, self.n_doc))
def fit(self, doc_ids, doc_cnt, doc_links, max_iter=100):
for di in xrange(self.n_doc):
unique_word = len(doc_ids[di])
cnt = doc_cnt[di]
self.phi.append(np.random.dirichlet([10] * self.n_topic, unique_word).T) # list of KxW
self.pi[di, :] = np.sum(cnt * self.phi[di], 1) / np.sum(cnt * self.phi[di])
for iter in xrange(max_iter):
tic = time.time()
self.variation_update(doc_ids, doc_cnt, doc_links)
self.parameter_estimation(doc_links)
if self.verbose:
elbo = self.compute_elbo(doc_ids, doc_cnt, doc_links)
logger.info('[ITER] %3d,\tElapsed time: %.3f\tELBO: %.3f', iter, time.time()-tic, elbo)
def compute_elbo(self, doc_ids, doc_cnt, doc_links):
""" compute evidence lower bound for trained model
"""
elbo = 0
e_log_theta = psi(self.gamma) - psi(np.sum(self.gamma, 1))[:, np.newaxis] # D x K
log_beta = np.log(self.beta + eps)
for di in xrange(self.n_doc):
words = doc_ids[di]
cnt = doc_cnt[di]
elbo += np.sum(cnt * (self.phi[di] * log_beta[:, words])) # E_q[log p(w_{d,n}|\beta,z_{d,n})]
elbo += np.sum((self.alpha - 1.) * e_log_theta[di, :]) # E_q[log p(\theta_d | alpha)]
elbo += np.sum(self.phi[di].T * e_log_theta[di, :]) # E_q[log p(z_{d,n}|\theta_d)]
elbo += -gammaln(np.sum(self.gamma[di, :])) + np.sum(gammaln(self.gamma[di, :])) \
- np.sum((self.gamma[di, :] - 1.) * (e_log_theta[di, :])) # - E_q[log q(theta|gamma)]
elbo += - np.sum(cnt * self.phi[di] * np.log(self.phi[di])) # - E_q[log q(z|phi)]
for adi in doc_links[di]:
elbo += np.dot(self.eta,
self.pi[di] * self.pi[adi]) + self.nu # E_q[log p(y_{d1,d2}|z_{d1},z_{d2},\eta,\nu)]
return elbo
def variation_update(self, doc_ids, doc_cnt, doc_links):
# update phi, gamma
e_log_theta = psi(self.gamma) - psi(np.sum(self.gamma, 1))[:, np.newaxis]
new_beta = np.zeros([self.n_topic, self.n_voca])
for di in xrange(self.n_doc):
words = doc_ids[di]
cnt = doc_cnt[di]
doc_len = np.sum(cnt)
new_phi = np.log(self.beta[:, words] + eps) + e_log_theta[di, :][:, np.newaxis]
gradient = np.zeros(self.n_topic)
for ai in doc_links[di]:
gradient += self.eta * self.pi[ai, :] / doc_len
new_phi += gradient[:, np.newaxis]
new_phi = np.exp(new_phi)
new_phi = new_phi / np.sum(new_phi, 0)
self.phi[di] = new_phi
self.pi[di, :] = np.sum(cnt * self.phi[di], 1) / np.sum(cnt * self.phi[di])
self.gamma[di, :] = np.sum(cnt * self.phi[di], 1) + self.alpha
new_beta[:, words] += (cnt * self.phi[di])
self.beta = new_beta / np.sum(new_beta, 1)[:, np.newaxis]
def parameter_estimation(self, doc_links):
# update eta, nu
pi_sum = np.zeros(self.n_topic)
num_links = 0.
for di in xrange(self.n_doc):
for adi in doc_links[di]:
pi_sum += self.pi[di, :] * self.pi[adi, :]
num_links += 1
num_links /= 2. # divide by 2 for bidirectional edge
pi_sum /= 2.
pi_alpha = np.zeros(self.n_topic) + self.alpha / (self.alpha * self.n_topic) * self.alpha / (self.alpha * self.n_topic)
self.nu = np.log(num_links - np.sum(pi_sum)) - np.log(
self.rho * (self.n_topic - 1) / self.n_topic + num_links - np.sum(pi_sum))
self.eta = np.log(pi_sum) - np.log(pi_sum + self.rho * pi_alpha) - self.nu
def save_model(self, output_directory, vocab=None):
import os
if not os.path.exists(output_directory):
os.mkdir(output_directory)
np.savetxt(output_directory + '/eta.txt', self.eta, delimiter='\t')
np.savetxt(output_directory + '/beta.txt', self.beta, delimiter='\t')
np.savetxt(output_directory + '/gamma.txt', self.gamma, delimiter='\t')
with open(output_directory + '/nu.txt', 'w') as f:
f.write('%f\n' % self.nu)
if vocab is not None:
write_top_words(self.beta, vocab, output_directory + '/top_words.csv')
| [
"numpy.random.normal",
"os.path.exists",
"scipy.special.psi",
"numpy.log",
"numpy.exp",
"numpy.sum",
"numpy.random.dirichlet",
"numpy.random.gamma",
"six.moves.xrange",
"numpy.zeros",
"numpy.savetxt",
"os.mkdir",
"numpy.dot",
"time.time",
"scipy.special.gammaln"
] | [((854, 915), 'numpy.random.gamma', 'np.random.gamma', (['(100.0)', '(1.0 / 100)', '[self.n_doc, self.n_topic]'], {}), '(100.0, 1.0 / 100, [self.n_doc, self.n_topic])\n', (869, 915), True, 'import numpy as np\n'), ((934, 986), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([5] * self.n_voca)', 'self.n_topic'], {}), '([5] * self.n_voca, self.n_topic)\n', (953, 986), True, 'import numpy as np\n'), ((1027, 1065), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1)', 'self.n_topic'], {}), '(0.0, 1, self.n_topic)\n', (1043, 1065), True, 'import numpy as np\n'), ((1110, 1146), 'numpy.zeros', 'np.zeros', (['[self.n_doc, self.n_topic]'], {}), '([self.n_doc, self.n_topic])\n', (1118, 1146), True, 'import numpy as np\n'), ((1424, 1442), 'six.moves.xrange', 'xrange', (['self.n_doc'], {}), '(self.n_doc)\n', (1430, 1442), False, 'from six.moves import xrange\n'), ((1726, 1742), 'six.moves.xrange', 'xrange', (['max_iter'], {}), '(max_iter)\n', (1732, 1742), False, 'from six.moves import xrange\n'), ((2346, 2369), 'numpy.log', 'np.log', (['(self.beta + eps)'], {}), '(self.beta + eps)\n', (2352, 2369), True, 'import numpy as np\n'), ((2389, 2407), 'six.moves.xrange', 'xrange', (['self.n_doc'], {}), '(self.n_doc)\n', (2395, 2407), False, 'from six.moves import xrange\n'), ((3482, 3519), 'numpy.zeros', 'np.zeros', (['[self.n_topic, self.n_voca]'], {}), '([self.n_topic, self.n_voca])\n', (3490, 3519), True, 'import numpy as np\n'), ((3539, 3557), 'six.moves.xrange', 'xrange', (['self.n_doc'], {}), '(self.n_doc)\n', (3545, 3557), False, 'from six.moves import xrange\n'), ((4445, 4467), 'numpy.zeros', 'np.zeros', (['self.n_topic'], {}), '(self.n_topic)\n', (4453, 4467), True, 'import numpy as np\n'), ((4511, 4529), 'six.moves.xrange', 'xrange', (['self.n_doc'], {}), '(self.n_doc)\n', (4517, 4529), False, 'from six.moves import xrange\n'), ((5278, 5345), 'numpy.savetxt', 'np.savetxt', (["(output_directory + '/eta.txt')", 'self.eta'], {'delimiter': '"""\t"""'}), "(output_directory + '/eta.txt', self.eta, delimiter='\\t')\n", (5288, 5345), True, 'import numpy as np\n'), ((5354, 5423), 'numpy.savetxt', 'np.savetxt', (["(output_directory + '/beta.txt')", 'self.beta'], {'delimiter': '"""\t"""'}), "(output_directory + '/beta.txt', self.beta, delimiter='\\t')\n", (5364, 5423), True, 'import numpy as np\n'), ((5432, 5503), 'numpy.savetxt', 'np.savetxt', (["(output_directory + '/gamma.txt')", 'self.gamma'], {'delimiter': '"""\t"""'}), "(output_directory + '/gamma.txt', self.gamma, delimiter='\\t')\n", (5442, 5503), True, 'import numpy as np\n'), ((1762, 1773), 'time.time', 'time.time', ([], {}), '()\n', (1771, 1773), False, 'import time\n'), ((2258, 2273), 'scipy.special.psi', 'psi', (['self.gamma'], {}), '(self.gamma)\n', (2261, 2273), False, 'from scipy.special import gammaln, psi\n'), ((2492, 2541), 'numpy.sum', 'np.sum', (['(cnt * (self.phi[di] * log_beta[:, words]))'], {}), '(cnt * (self.phi[di] * log_beta[:, words]))\n', (2498, 2541), True, 'import numpy as np\n'), ((2599, 2646), 'numpy.sum', 'np.sum', (['((self.alpha - 1.0) * e_log_theta[di, :])'], {}), '((self.alpha - 1.0) * e_log_theta[di, :])\n', (2605, 2646), True, 'import numpy as np\n'), ((2698, 2741), 'numpy.sum', 'np.sum', (['(self.phi[di].T * e_log_theta[di, :])'], {}), '(self.phi[di].T * e_log_theta[di, :])\n', (2704, 2741), True, 'import numpy as np\n'), ((3402, 3417), 'scipy.special.psi', 'psi', (['self.gamma'], {}), '(self.gamma)\n', (3405, 3417), False, 'from scipy.special import gammaln, psi\n'), ((3643, 3654), 'numpy.sum', 'np.sum', (['cnt'], {}), '(cnt)\n', (3649, 3654), True, 'import numpy as np\n'), ((3772, 3794), 'numpy.zeros', 'np.zeros', (['self.n_topic'], {}), '(self.n_topic)\n', (3780, 3794), True, 'import numpy as np\n'), ((3966, 3981), 'numpy.exp', 'np.exp', (['new_phi'], {}), '(new_phi)\n', (3972, 3981), True, 'import numpy as np\n'), ((4763, 4785), 'numpy.zeros', 'np.zeros', (['self.n_topic'], {}), '(self.n_topic)\n', (4771, 4785), True, 'import numpy as np\n'), ((5196, 5228), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (5210, 5228), False, 'import os\n'), ((5242, 5268), 'os.mkdir', 'os.mkdir', (['output_directory'], {}), '(output_directory)\n', (5250, 5268), False, 'import os\n'), ((1646, 1675), 'numpy.sum', 'np.sum', (['(cnt * self.phi[di])', '(1)'], {}), '(cnt * self.phi[di], 1)\n', (1652, 1675), True, 'import numpy as np\n'), ((1678, 1704), 'numpy.sum', 'np.sum', (['(cnt * self.phi[di])'], {}), '(cnt * self.phi[di])\n', (1684, 1704), True, 'import numpy as np\n'), ((2892, 2946), 'numpy.sum', 'np.sum', (['((self.gamma[di, :] - 1.0) * e_log_theta[di, :])'], {}), '((self.gamma[di, :] - 1.0) * e_log_theta[di, :])\n', (2898, 2946), True, 'import numpy as np\n'), ((3678, 3711), 'numpy.log', 'np.log', (['(self.beta[:, words] + eps)'], {}), '(self.beta[:, words] + eps)\n', (3684, 3711), True, 'import numpy as np\n'), ((4014, 4032), 'numpy.sum', 'np.sum', (['new_phi', '(0)'], {}), '(new_phi, 0)\n', (4020, 4032), True, 'import numpy as np\n'), ((4099, 4128), 'numpy.sum', 'np.sum', (['(cnt * self.phi[di])', '(1)'], {}), '(cnt * self.phi[di], 1)\n', (4105, 4128), True, 'import numpy as np\n'), ((4131, 4157), 'numpy.sum', 'np.sum', (['(cnt * self.phi[di])'], {}), '(cnt * self.phi[di])\n', (4137, 4157), True, 'import numpy as np\n'), ((4190, 4219), 'numpy.sum', 'np.sum', (['(cnt * self.phi[di])', '(1)'], {}), '(cnt * self.phi[di], 1)\n', (4196, 4219), True, 'import numpy as np\n'), ((4320, 4339), 'numpy.sum', 'np.sum', (['new_beta', '(1)'], {}), '(new_beta, 1)\n', (4326, 4339), True, 'import numpy as np\n'), ((5042, 5056), 'numpy.log', 'np.log', (['pi_sum'], {}), '(pi_sum)\n', (5048, 5056), True, 'import numpy as np\n'), ((5059, 5095), 'numpy.log', 'np.log', (['(pi_sum + self.rho * pi_alpha)'], {}), '(pi_sum + self.rho * pi_alpha)\n', (5065, 5095), True, 'import numpy as np\n'), ((1545, 1598), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([10] * self.n_topic)', 'unique_word'], {}), '([10] * self.n_topic, unique_word)\n', (1564, 1598), True, 'import numpy as np\n'), ((2280, 2301), 'numpy.sum', 'np.sum', (['self.gamma', '(1)'], {}), '(self.gamma, 1)\n', (2286, 2301), True, 'import numpy as np\n'), ((3135, 3179), 'numpy.dot', 'np.dot', (['self.eta', '(self.pi[di] * self.pi[adi])'], {}), '(self.eta, self.pi[di] * self.pi[adi])\n', (3141, 3179), True, 'import numpy as np\n'), ((3424, 3445), 'numpy.sum', 'np.sum', (['self.gamma', '(1)'], {}), '(self.gamma, 1)\n', (3430, 3445), True, 'import numpy as np\n'), ((4910, 4924), 'numpy.sum', 'np.sum', (['pi_sum'], {}), '(pi_sum)\n', (4916, 4924), True, 'import numpy as np\n'), ((5007, 5021), 'numpy.sum', 'np.sum', (['pi_sum'], {}), '(pi_sum)\n', (5013, 5021), True, 'import numpy as np\n'), ((2066, 2077), 'time.time', 'time.time', ([], {}), '()\n', (2075, 2077), False, 'import time\n'), ((2840, 2866), 'scipy.special.gammaln', 'gammaln', (['self.gamma[di, :]'], {}), '(self.gamma[di, :])\n', (2847, 2866), False, 'from scipy.special import gammaln, psi\n'), ((3027, 3047), 'numpy.log', 'np.log', (['self.phi[di]'], {}), '(self.phi[di])\n', (3033, 3047), True, 'import numpy as np\n'), ((2804, 2829), 'numpy.sum', 'np.sum', (['self.gamma[di, :]'], {}), '(self.gamma[di, :])\n', (2810, 2829), True, 'import numpy as np\n')] |
"""test update parameters"""
import numpy as np
from neuralink.parameters import Parameters
from .utils import multiple_test
def test_update_parameters1():
parameters = {
"W1": np.array(
[
[-0.00615039, 0.0169021],
[-0.02311792, 0.03137121],
[-0.0169217, -0.01752545],
[0.00935436, -0.05018221],
]
),
"W2": np.array([[-0.0104319, -0.04019007, 0.01607211, 0.04440255]]),
"b1": np.array(
[[-8.97523455e-07], [8.15562092e-06], [6.04810633e-07], [-2.54560700e-06]]
),
"b2": np.array([[9.14954378e-05]]),
}
grads = {
"dW1": np.array(
[
[0.00023322, -0.00205423],
[0.00082222, -0.00700776],
[-0.00031831, 0.0028636],
[-0.00092857, 0.00809933],
]
),
"dW2": np.array(
[[-1.75740039e-05, 3.70231337e-03, -1.25683095e-03, -2.55715317e-03]]
),
"db1": np.array(
[[1.05570087e-07], [-3.81814487e-06], [-1.90155145e-07], [5.46467802e-07]]
),
"db2": np.array([[-1.08923140e-05]]),
}
expected_W1 = np.array(
[
[-0.00643025, 0.01936718],
[-0.02410458, 0.03978052],
[-0.01653973, -0.02096177],
[0.01046864, -0.05990141],
]
)
expected_b1 = np.array(
[[-1.02420756e-06], [1.27373948e-05], [8.32996807e-07], [-3.20136836e-06]]
)
expected_W2 = np.array([[-0.01041081, -0.04463285, 0.01758031, 0.04747113]])
expected_b2 = np.array([[0.00010457]])
expected_output = {
"W1": expected_W1,
"b1": expected_b1,
"W2": expected_W2,
"b2": expected_b2,
}
output = Parameters().update(parameters, grads)
assert (
type(output["W1"]) == np.ndarray
), f"Wrong type for W1. Expected: {np.ndarray}"
assert (
type(output["b1"]) == np.ndarray
), f"Wrong type for b1. Expected: {np.ndarray}"
assert (
type(output["W2"]) == np.ndarray
), f"Wrong type for W2. Expected: {np.ndarray}"
assert (
type(output["b2"]) == np.ndarray
), f"Wrong type for b2. Expected: {np.ndarray}"
assert output["W1"].shape == expected_output["W1"].shape, f"Wrong shape for W1."
assert output["b1"].shape == expected_output["b1"].shape, f"Wrong shape for b1."
assert output["W2"].shape == expected_output["W2"].shape, f"Wrong shape for W2."
assert output["b2"].shape == expected_output["b2"].shape, f"Wrong shape for b2."
assert np.allclose(output["W1"], expected_output["W1"]), "Wrong values for W1"
assert np.allclose(output["b1"], expected_output["b1"]), "Wrong values for b1"
assert np.allclose(output["W2"], expected_output["W2"]), "Wrong values for W2"
assert np.allclose(output["b2"], expected_output["b2"]), "Wrong values for b2"
def test_update_parameters2():
np.random.seed(2)
W1 = np.random.randn(3, 4)
b1 = np.random.randn(3, 1)
W2 = np.random.randn(1, 3)
b2 = np.random.randn(1, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
np.random.seed(3)
dW1 = np.random.randn(3, 4)
db1 = np.random.randn(3, 1)
dW2 = np.random.randn(1, 3)
db2 = np.random.randn(1, 1)
grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}
learning_rate = 0.1
expected_W1 = np.array(
[
[-0.59562069, -0.09991781, -2.14584584, 1.82662008],
[-1.76569676, -0.80627147, 0.51115557, -1.18258802],
[-1.0535704, -0.86128581, 0.68284052, 2.20374577],
]
)
expected_b1 = np.array([[-0.04659241], [-1.28888275], [0.53405496]])
expected_W2 = np.array([[-0.55569196, 0.0354055, 1.32964895]])
expected_b2 = np.array([[-0.84610769]])
expected_output = {
"W1": expected_W1,
"b1": expected_b1,
"W2": expected_W2,
"b2": expected_b2,
}
test_cases = [
{
"name": "datatype_check",
"input": [parameters, grads, learning_rate],
"expected": expected_output,
"error": "Data type mismatch",
},
{
"name": "shape_check",
"input": [parameters, grads, learning_rate],
"expected": expected_output,
"error": "Wrong shape",
},
{
"name": "equation_output_check",
"input": [parameters, grads, 0.1],
"expected": expected_output,
"error": "Wrong output",
},
]
multiple_test(test_cases, Parameters().update)
| [
"numpy.allclose",
"neuralink.parameters.Parameters",
"numpy.array",
"numpy.random.seed",
"numpy.random.randn"
] | [((1224, 1348), 'numpy.array', 'np.array', (['[[-0.00643025, 0.01936718], [-0.02410458, 0.03978052], [-0.01653973, -\n 0.02096177], [0.01046864, -0.05990141]]'], {}), '([[-0.00643025, 0.01936718], [-0.02410458, 0.03978052], [-\n 0.01653973, -0.02096177], [0.01046864, -0.05990141]])\n', (1232, 1348), True, 'import numpy as np\n'), ((1435, 1524), 'numpy.array', 'np.array', (['[[-1.02420756e-06], [1.27373948e-05], [8.32996807e-07], [-3.20136836e-06]]'], {}), '([[-1.02420756e-06], [1.27373948e-05], [8.32996807e-07], [-\n 3.20136836e-06]])\n', (1443, 1524), True, 'import numpy as np\n'), ((1552, 1614), 'numpy.array', 'np.array', (['[[-0.01041081, -0.04463285, 0.01758031, 0.04747113]]'], {}), '([[-0.01041081, -0.04463285, 0.01758031, 0.04747113]])\n', (1560, 1614), True, 'import numpy as np\n'), ((1633, 1657), 'numpy.array', 'np.array', (['[[0.00010457]]'], {}), '([[0.00010457]])\n', (1641, 1657), True, 'import numpy as np\n'), ((2628, 2676), 'numpy.allclose', 'np.allclose', (["output['W1']", "expected_output['W1']"], {}), "(output['W1'], expected_output['W1'])\n", (2639, 2676), True, 'import numpy as np\n'), ((2711, 2759), 'numpy.allclose', 'np.allclose', (["output['b1']", "expected_output['b1']"], {}), "(output['b1'], expected_output['b1'])\n", (2722, 2759), True, 'import numpy as np\n'), ((2794, 2842), 'numpy.allclose', 'np.allclose', (["output['W2']", "expected_output['W2']"], {}), "(output['W2'], expected_output['W2'])\n", (2805, 2842), True, 'import numpy as np\n'), ((2877, 2925), 'numpy.allclose', 'np.allclose', (["output['b2']", "expected_output['b2']"], {}), "(output['b2'], expected_output['b2'])\n", (2888, 2925), True, 'import numpy as np\n'), ((2986, 3003), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3000, 3003), True, 'import numpy as np\n'), ((3013, 3034), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)'], {}), '(3, 4)\n', (3028, 3034), True, 'import numpy as np\n'), ((3044, 3065), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (3059, 3065), True, 'import numpy as np\n'), ((3075, 3096), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (3090, 3096), True, 'import numpy as np\n'), ((3106, 3127), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (3121, 3127), True, 'import numpy as np\n'), ((3190, 3207), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (3204, 3207), True, 'import numpy as np\n'), ((3218, 3239), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)'], {}), '(3, 4)\n', (3233, 3239), True, 'import numpy as np\n'), ((3250, 3271), 'numpy.random.randn', 'np.random.randn', (['(3)', '(1)'], {}), '(3, 1)\n', (3265, 3271), True, 'import numpy as np\n'), ((3282, 3303), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (3297, 3303), True, 'import numpy as np\n'), ((3314, 3335), 'numpy.random.randn', 'np.random.randn', (['(1)', '(1)'], {}), '(1, 1)\n', (3329, 3335), True, 'import numpy as np\n'), ((3439, 3615), 'numpy.array', 'np.array', (['[[-0.59562069, -0.09991781, -2.14584584, 1.82662008], [-1.76569676, -\n 0.80627147, 0.51115557, -1.18258802], [-1.0535704, -0.86128581, \n 0.68284052, 2.20374577]]'], {}), '([[-0.59562069, -0.09991781, -2.14584584, 1.82662008], [-1.76569676,\n -0.80627147, 0.51115557, -1.18258802], [-1.0535704, -0.86128581, \n 0.68284052, 2.20374577]])\n', (3447, 3615), True, 'import numpy as np\n'), ((3686, 3740), 'numpy.array', 'np.array', (['[[-0.04659241], [-1.28888275], [0.53405496]]'], {}), '([[-0.04659241], [-1.28888275], [0.53405496]])\n', (3694, 3740), True, 'import numpy as np\n'), ((3759, 3807), 'numpy.array', 'np.array', (['[[-0.55569196, 0.0354055, 1.32964895]]'], {}), '([[-0.55569196, 0.0354055, 1.32964895]])\n', (3767, 3807), True, 'import numpy as np\n'), ((3826, 3851), 'numpy.array', 'np.array', (['[[-0.84610769]]'], {}), '([[-0.84610769]])\n', (3834, 3851), True, 'import numpy as np\n'), ((193, 314), 'numpy.array', 'np.array', (['[[-0.00615039, 0.0169021], [-0.02311792, 0.03137121], [-0.0169217, -\n 0.01752545], [0.00935436, -0.05018221]]'], {}), '([[-0.00615039, 0.0169021], [-0.02311792, 0.03137121], [-0.0169217,\n -0.01752545], [0.00935436, -0.05018221]])\n', (201, 314), True, 'import numpy as np\n'), ((427, 488), 'numpy.array', 'np.array', (['[[-0.0104319, -0.04019007, 0.01607211, 0.04440255]]'], {}), '([[-0.0104319, -0.04019007, 0.01607211, 0.04440255]])\n', (435, 488), True, 'import numpy as np\n'), ((504, 591), 'numpy.array', 'np.array', (['[[-8.97523455e-07], [8.15562092e-06], [6.04810633e-07], [-2.545607e-06]]'], {}), '([[-8.97523455e-07], [8.15562092e-06], [6.04810633e-07], [-\n 2.545607e-06]])\n', (512, 591), True, 'import numpy as np\n'), ((626, 654), 'numpy.array', 'np.array', (['[[9.14954378e-05]]'], {}), '([[9.14954378e-05]])\n', (634, 654), True, 'import numpy as np\n'), ((692, 814), 'numpy.array', 'np.array', (['[[0.00023322, -0.00205423], [0.00082222, -0.00700776], [-0.00031831, \n 0.0028636], [-0.00092857, 0.00809933]]'], {}), '([[0.00023322, -0.00205423], [0.00082222, -0.00700776], [-\n 0.00031831, 0.0028636], [-0.00092857, 0.00809933]])\n', (700, 814), True, 'import numpy as np\n'), ((927, 1003), 'numpy.array', 'np.array', (['[[-1.75740039e-05, 0.00370231337, -0.00125683095, -0.00255715317]]'], {}), '([[-1.75740039e-05, 0.00370231337, -0.00125683095, -0.00255715317]])\n', (935, 1003), True, 'import numpy as np\n'), ((1045, 1134), 'numpy.array', 'np.array', (['[[1.05570087e-07], [-3.81814487e-06], [-1.90155145e-07], [5.46467802e-07]]'], {}), '([[1.05570087e-07], [-3.81814487e-06], [-1.90155145e-07], [\n 5.46467802e-07]])\n', (1053, 1134), True, 'import numpy as np\n'), ((1168, 1196), 'numpy.array', 'np.array', (['[[-1.0892314e-05]]'], {}), '([[-1.0892314e-05]])\n', (1176, 1196), True, 'import numpy as np\n'), ((1811, 1823), 'neuralink.parameters.Parameters', 'Parameters', ([], {}), '()\n', (1821, 1823), False, 'from neuralink.parameters import Parameters\n'), ((4628, 4640), 'neuralink.parameters.Parameters', 'Parameters', ([], {}), '()\n', (4638, 4640), False, 'from neuralink.parameters import Parameters\n')] |
# -*- coding: utf-8 -*-
from datetime import datetime
import tensorflow as tf
import tensornet as tn
import numpy as np
def read_dataset(data_path, days, match_pattern, batch_size, parse_func, num_parallel_calls = 12):
ds_data_files = tn.data.list_files(data_path, days=days, match_pattern=match_pattern)
dataset = ds_data_files.shard(num_shards=tn.core.shard_num(), index=tn.core.self_shard_id())
dataset = dataset.interleave(lambda f: tf.data.TFRecordDataset(f, buffer_size=1024 * 100),
cycle_length=4, block_length=8,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size)
dataset = dataset.map(map_func=lambda example_proto: parse_func(example_proto),
num_parallel_calls=num_parallel_calls)
dataset = tn.data.BalanceDataset(dataset)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def trained_delta_days(cur_dt, model_dir):
last_train_dt = tn.model.read_last_train_dt(model_dir)
if not last_train_dt:
return 1
last_train_dt = datetime.fromisoformat(last_train_dt)
cur_dt = datetime.fromisoformat(cur_dt)
return (cur_dt - last_train_dt).days
def dump_predict(result, path):
result = np.concatenate(result, axis=1)
content = ""
for y, y_pred in result:
content += "{}\t{}\n".format(y, y_pred)
filename = "{}/part-{:05d}".format(path, tn.core.self_shard_id())
tf.io.write_file(filename, content)
return
| [
"tensorflow.data.TFRecordDataset",
"tensornet.core.shard_num",
"tensornet.core.self_shard_id",
"tensornet.model.read_last_train_dt",
"tensornet.data.BalanceDataset",
"tensorflow.io.write_file",
"numpy.concatenate",
"datetime.datetime.fromisoformat",
"tensornet.data.list_files"
] | [((241, 310), 'tensornet.data.list_files', 'tn.data.list_files', (['data_path'], {'days': 'days', 'match_pattern': 'match_pattern'}), '(data_path, days=days, match_pattern=match_pattern)\n', (259, 310), True, 'import tensornet as tn\n'), ((867, 898), 'tensornet.data.BalanceDataset', 'tn.data.BalanceDataset', (['dataset'], {}), '(dataset)\n', (889, 898), True, 'import tensornet as tn\n'), ((1045, 1083), 'tensornet.model.read_last_train_dt', 'tn.model.read_last_train_dt', (['model_dir'], {}), '(model_dir)\n', (1072, 1083), True, 'import tensornet as tn\n'), ((1149, 1186), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['last_train_dt'], {}), '(last_train_dt)\n', (1171, 1186), False, 'from datetime import datetime\n'), ((1200, 1230), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['cur_dt'], {}), '(cur_dt)\n', (1222, 1230), False, 'from datetime import datetime\n'), ((1319, 1349), 'numpy.concatenate', 'np.concatenate', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (1333, 1349), True, 'import numpy as np\n'), ((1520, 1555), 'tensorflow.io.write_file', 'tf.io.write_file', (['filename', 'content'], {}), '(filename, content)\n', (1536, 1555), True, 'import tensorflow as tf\n'), ((1491, 1514), 'tensornet.core.self_shard_id', 'tn.core.self_shard_id', ([], {}), '()\n', (1512, 1514), True, 'import tensornet as tn\n'), ((356, 375), 'tensornet.core.shard_num', 'tn.core.shard_num', ([], {}), '()\n', (373, 375), True, 'import tensornet as tn\n'), ((383, 406), 'tensornet.core.self_shard_id', 'tn.core.self_shard_id', ([], {}), '()\n', (404, 406), True, 'import tensornet as tn\n'), ((451, 501), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['f'], {'buffer_size': '(1024 * 100)'}), '(f, buffer_size=1024 * 100)\n', (474, 501), True, 'import tensorflow as tf\n')] |
import numpy as np
import re
import torch
import random
import transformers
from sklearn.preprocessing import LabelEncoder
from transformers import DistilBertTokenizer, DistilBertModel
import sys
import pathlib
scripts_dir = pathlib.Path(__file__).parent.resolve()
sys.path.append(str(scripts_dir))
from bert_model import BERT_Arch
import json
from transformers import logging
import os
#logging.set_verbosity_warning()
logging.set_verbosity_error()
# specify GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
mode = os.environ.get('mode','response') # response/intent
model_file = os.environ.get('model_file','chatbot_model.pt')
responses_dict = os.environ.get('responses_dict','responses.json')
if os.path.exists('/input/train'):
model_path = '/input/train/'+ model_file
if os.path.exists('/input/train/intents.json'):
intents_path = '/input/train/intents.json'
else:
intents_path = '/input/train/extended_intents.json'
else:
model_path = os.path.join(scripts_dir, model_file)
intents_path = os.path.join(scripts_dir, 'intents.json')
responses_dict = os.path.join(scripts_dir, 'responses.json')
f = open(intents_path)
intents = json.load(f)
number_of_labels = len(intents)
# Import the DistilBert pretrained model
model = BERT_Arch(DistilBertModel.from_pretrained('distilbert-base-uncased'),number_of_labels)
model.load_state_dict(torch.load(model_path))
if mode == 'response':
f = open(responses_dict)
res = json.load(f)
def get_prediction(str):
# Converting the labels into encodings
le = LabelEncoder()
lst = []
for i in intents:
lst = lst + [i]
lst = le.fit_transform (lst)
str = re.sub(r'[^a-zA-Z ]+', '', str)
test_text = [str]
model.eval()
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
tokens_test_data = tokenizer(test_text,max_length = 8,padding='max_length',
truncation=True, return_token_type_ids=False)
test_seq = torch.tensor(tokens_test_data['input_ids'])
test_mask = torch.tensor(tokens_test_data['attention_mask'])
preds = None
with torch.no_grad():
preds = model(test_seq.to(device), test_mask.to(device))
sm = torch.nn.Softmax(dim = 1)
probabilities = sm(preds)
preds = preds.detach().cpu().numpy()
preds = np.argmax(preds, axis = 1)
return le.inverse_transform(preds)[0], probabilities[0][preds][0].item()
def predict(data):
message = data['input_text']
intent, confidence = get_prediction(message)
if mode == 'intent':
result = {"intent":intent, "score": confidence}
return result
for i in res:
if i == intent:
result = random.choice(res[i])
break
result = {"intent":intent, "response":result, "score": confidence}
return result | [
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"random.choice",
"torch.nn.Softmax",
"transformers.DistilBertModel.from_pretrained",
"pathlib.Path",
"torch.load",
"os.environ.get",
"os.path.join",
"numpy.argmax",
"transformers.DistilBertTokenizer.from_pretrained",
"torch.tensor",
"to... | [((421, 450), 'transformers.logging.set_verbosity_error', 'logging.set_verbosity_error', ([], {}), '()\n', (448, 450), False, 'from transformers import logging\n'), ((544, 578), 'os.environ.get', 'os.environ.get', (['"""mode"""', '"""response"""'], {}), "('mode', 'response')\n", (558, 578), False, 'import os\n'), ((610, 658), 'os.environ.get', 'os.environ.get', (['"""model_file"""', '"""chatbot_model.pt"""'], {}), "('model_file', 'chatbot_model.pt')\n", (624, 658), False, 'import os\n'), ((675, 725), 'os.environ.get', 'os.environ.get', (['"""responses_dict"""', '"""responses.json"""'], {}), "('responses_dict', 'responses.json')\n", (689, 725), False, 'import os\n'), ((729, 759), 'os.path.exists', 'os.path.exists', (['"""/input/train"""'], {}), "('/input/train')\n", (743, 759), False, 'import os\n'), ((1201, 1213), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1210, 1213), False, 'import json\n'), ((813, 856), 'os.path.exists', 'os.path.exists', (['"""/input/train/intents.json"""'], {}), "('/input/train/intents.json')\n", (827, 856), False, 'import os\n'), ((1002, 1039), 'os.path.join', 'os.path.join', (['scripts_dir', 'model_file'], {}), '(scripts_dir, model_file)\n', (1014, 1039), False, 'import os\n'), ((1059, 1100), 'os.path.join', 'os.path.join', (['scripts_dir', '"""intents.json"""'], {}), "(scripts_dir, 'intents.json')\n", (1071, 1100), False, 'import os\n'), ((1122, 1165), 'os.path.join', 'os.path.join', (['scripts_dir', '"""responses.json"""'], {}), "(scripts_dir, 'responses.json')\n", (1134, 1165), False, 'import os\n'), ((1312, 1370), 'transformers.DistilBertModel.from_pretrained', 'DistilBertModel.from_pretrained', (['"""distilbert-base-uncased"""'], {}), "('distilbert-base-uncased')\n", (1343, 1370), False, 'from transformers import DistilBertTokenizer, DistilBertModel\n'), ((1415, 1437), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (1425, 1437), False, 'import torch\n'), ((1502, 1514), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1511, 1514), False, 'import json\n'), ((1594, 1608), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1606, 1608), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1711, 1741), 're.sub', 're.sub', (['"""[^a-zA-Z ]+"""', '""""""', 'str'], {}), "('[^a-zA-Z ]+', '', str)\n", (1717, 1741), False, 'import re\n'), ((1798, 1860), 'transformers.DistilBertTokenizer.from_pretrained', 'DistilBertTokenizer.from_pretrained', (['"""distilbert-base-uncased"""'], {}), "('distilbert-base-uncased')\n", (1833, 1860), False, 'from transformers import DistilBertTokenizer, DistilBertModel\n'), ((2035, 2078), 'torch.tensor', 'torch.tensor', (["tokens_test_data['input_ids']"], {}), "(tokens_test_data['input_ids'])\n", (2047, 2078), False, 'import torch\n'), ((2095, 2143), 'torch.tensor', 'torch.tensor', (["tokens_test_data['attention_mask']"], {}), "(tokens_test_data['attention_mask'])\n", (2107, 2143), False, 'import torch\n'), ((2261, 2284), 'torch.nn.Softmax', 'torch.nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (2277, 2284), False, 'import torch\n'), ((2370, 2394), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2379, 2394), True, 'import numpy as np\n'), ((498, 523), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (521, 523), False, 'import torch\n'), ((2170, 2185), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2183, 2185), False, 'import torch\n'), ((225, 247), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (237, 247), False, 'import pathlib\n'), ((2743, 2764), 'random.choice', 'random.choice', (['res[i]'], {}), '(res[i])\n', (2756, 2764), False, 'import random\n')] |
from zipfile import ZipFile
# from skimage.io import imread
import os
import numpy as np
import pandas as pd
from PIL import Image
from pathlib import Path
from data_layer.util import image_path
# from data_layer.dataset import CovidMetadata
# DEFAULT_BASE_PATH = 'C:/Covid-Screening/data_layer/raw_data'
DEFAULT_BASE_PATH = os.pathPath(__file__).parent
DEFAULT_METADATA_BASE_PATH = os.path.join(DEFAULT_BASE_PATH, 'metadata.csv')
# DEFAULT_IMAGES_BASE_PATH = os.path.join(DEFAULT_BASE_PATH, 'images')
# DEFAULT_CHANNELS = (1, 2, 3, 4, 5)
ROOT_DIR = Path(__file__).parent
CHANNELS = (1,2,3,4,5)
save_path = f"{ROOT_DIR}\\data_layer\\raw_data\\mocks\\"
# config = Config('trasfer_data')
# metadata = CovidMetadata(config.DEFAULT_METADATA_BASE_PATH)
# metadata = load_csv(DEFAULT_METADATA_BASE_PATH)
metadata2 = pd.read_csv(DEFAULT_METADATA_BASE_PATH)
img_filenames = []
channels = []
i=0
for rec in metadata:
for c in CHANNELS:
i+=1
img_filename = image_path(rec['experiment'], rec['plate'], rec['well'], rec['site'],c)
img_filenames.append(img_filename)
channels.append(c)
metadata = pd.DataFrame(metadata)
reps = [5]*metadata.shape[0]
image_frame = metadata.loc[np.repeat(metadata.index.values,reps)]
image_frame['channel'] = channels
image_frame['img_filename'] = img_filenames
image_frame.to_csv(image_frame, os.path.join(DEFAULT_BASE_PATH, 'image_frame'), columns=None, index=None)
write_dict_to_csv_with_pandas(image_frame, os.path.join(DEFAULT_BASE_PATH, 'image_frame.csv'))
filename = "D:\\RxRx19a-images.zip"
with ZipFile(filename) as archive:
for entry in archive.infolist():
with archive.open(entry) as file:
if file.name.__contains__( 'HRCE'):
# last_sep = file.name.rindex('/')
img = Image.open(file)
print(img.size, img.mode, len(img.getdata()))
| [
"PIL.Image.open",
"numpy.repeat",
"pandas.read_csv",
"pathlib.Path",
"zipfile.ZipFile",
"os.path.join",
"os.pathPath",
"pandas.DataFrame",
"data_layer.util.image_path"
] | [((388, 435), 'os.path.join', 'os.path.join', (['DEFAULT_BASE_PATH', '"""metadata.csv"""'], {}), "(DEFAULT_BASE_PATH, 'metadata.csv')\n", (400, 435), False, 'import os\n'), ((819, 858), 'pandas.read_csv', 'pd.read_csv', (['DEFAULT_METADATA_BASE_PATH'], {}), '(DEFAULT_METADATA_BASE_PATH)\n', (830, 858), True, 'import pandas as pd\n'), ((1132, 1154), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {}), '(metadata)\n', (1144, 1154), True, 'import pandas as pd\n'), ((329, 350), 'os.pathPath', 'os.pathPath', (['__file__'], {}), '(__file__)\n', (340, 350), False, 'import os\n'), ((557, 571), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (561, 571), False, 'from pathlib import Path\n'), ((1211, 1249), 'numpy.repeat', 'np.repeat', (['metadata.index.values', 'reps'], {}), '(metadata.index.values, reps)\n', (1220, 1249), True, 'import numpy as np\n'), ((1360, 1406), 'os.path.join', 'os.path.join', (['DEFAULT_BASE_PATH', '"""image_frame"""'], {}), "(DEFAULT_BASE_PATH, 'image_frame')\n", (1372, 1406), False, 'import os\n'), ((1477, 1527), 'os.path.join', 'os.path.join', (['DEFAULT_BASE_PATH', '"""image_frame.csv"""'], {}), "(DEFAULT_BASE_PATH, 'image_frame.csv')\n", (1489, 1527), False, 'import os\n'), ((1571, 1588), 'zipfile.ZipFile', 'ZipFile', (['filename'], {}), '(filename)\n', (1578, 1588), False, 'from zipfile import ZipFile\n'), ((978, 1050), 'data_layer.util.image_path', 'image_path', (["rec['experiment']", "rec['plate']", "rec['well']", "rec['site']", 'c'], {}), "(rec['experiment'], rec['plate'], rec['well'], rec['site'], c)\n", (988, 1050), False, 'from data_layer.util import image_path\n'), ((1798, 1814), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (1808, 1814), False, 'from PIL import Image\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Drop in replacement for lf0 extraction in the HTS training demo
script using Praat, trying to compensate for octave errors
especially if the voice is slightly hoarse...
"""
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import array
import math
import numpy as np
import ttslab
from ttslab.trackfile import Track
ttslab.extend(Track, "ttslab.trackfile.funcs.tfuncs_praat")
def friendly_log(f):
try:
return math.log(f)
except ValueError:
return float('-1e+10')
if __name__ == "__main__":
fn = sys.argv[1]
outfn = sys.argv[2]
minf0 = float(sys.argv[3])
maxf0 = float(sys.argv[4])
t = Track()
t.get_f0(fn, minpitch=minf0, maxpitch=maxf0, timestep=0.005, fixocterrs=True) #timestep hardcoded here because of hack below...
#hack aligns samples with equiv from HTS script:
pad = np.array([0.0, 0.0]).reshape(-1, 1)
f0hzvalues = np.concatenate([pad, t.values, pad])
lf0 = array.array(b"f", map(friendly_log, f0hzvalues))
with open(outfn, "wb") as outfh:
lf0.tofile(outfh)
| [
"ttslab.trackfile.Track",
"math.log",
"ttslab.extend",
"numpy.array",
"numpy.concatenate"
] | [((454, 513), 'ttslab.extend', 'ttslab.extend', (['Track', '"""ttslab.trackfile.funcs.tfuncs_praat"""'], {}), "(Track, 'ttslab.trackfile.funcs.tfuncs_praat')\n", (467, 513), False, 'import ttslab\n'), ((770, 777), 'ttslab.trackfile.Track', 'Track', ([], {}), '()\n', (775, 777), False, 'from ttslab.trackfile import Track\n'), ((1027, 1063), 'numpy.concatenate', 'np.concatenate', (['[pad, t.values, pad]'], {}), '([pad, t.values, pad])\n', (1041, 1063), True, 'import numpy as np\n'), ((560, 571), 'math.log', 'math.log', (['f'], {}), '(f)\n', (568, 571), False, 'import math\n'), ((974, 994), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (982, 994), True, 'import numpy as np\n')] |
import sys
import numpy as np
import matplotlib
print("Python", sys.version)
print("Numpy", np.__version__)
print("Matplotlib", matplotlib.__version__)
inputs = [1.0, 2.0, 3.0, 2.5]
weights = [[0.2, 0.8, -0.5, 1.0],
[0.5, -0.91, 0.26, -0.5],
[-0.26, -0.27, 0.17, 0.87]]
bias = [2.0,3.0,0.5]
layer_output = []
for n_weight, n_bias in zip(weights, bias) :
neuron_output = 0
for neu_input,neu_weight in zip(inputs,n_weight) :
neuron_output = neuron_output+ neu_input*neu_weight
neuron_output = neuron_output+n_bias
layer_output.append (neuron_output)
print("layer_output from Scratch Python", layer_output)
output = np.dot(weights,inputs)+ bias
print("layer_output using Mumpy", output) | [
"numpy.dot"
] | [((696, 719), 'numpy.dot', 'np.dot', (['weights', 'inputs'], {}), '(weights, inputs)\n', (702, 719), True, 'import numpy as np\n')] |
import unittest
from site_analysis.atom import Atom
from unittest.mock import patch, MagicMock, Mock
from pymatgen.core import Structure, Lattice
import numpy as np
class AtomTestCase(unittest.TestCase):
def test_atom_is_initialised(self):
atom = Atom(index=22)
self.assertEqual(atom.index, 22)
self.assertEqual(atom.in_site, None)
self.assertEqual(atom._frac_coords, None)
self.assertEqual(atom.trajectory, [])
def test_reset(self):
atom = Atom(index=12)
atom.in_site = 3
atom._frac_coords = np.array( [0,0, 0.0, 0.0] )
atom.trajectory = [1,2,3]
atom.reset()
self.assertEqual( atom.in_site, None )
self.assertEqual( atom._frac_coords, None )
self.assertEqual( atom.trajectory, [] )
def test___str__(self):
atom = Atom(index=12)
self.assertEqual(str(atom), 'Atom: 12')
def test___repr__(self):
atom = Atom(index=12)
self.assertEqual(atom.__repr__(), 'site_analysis.Atom(index=12, in_site=None, frac_coords=None)')
def test_assign_coords(self):
atom = Atom(index=1)
structure = example_structure()
atom.assign_coords(structure=structure)
np.testing.assert_array_equal(atom._frac_coords, structure[1].frac_coords )
def test_frac_coords_getter_raises_atttribute_error_if_frac_coords_is_none(self):
atom = Atom(index=1)
atom._frac_coords = None
with self.assertRaises(AttributeError):
atom.frac_coords
def test_frac_coords_getter(self):
atom = Atom(index=12)
c = np.array([0.3, 0.4, 0.5])
atom._frac_coords = c
np.testing.assert_array_equal(atom.frac_coords, c)
def test_as_dict(self):
index = 11
in_site = 4
c = np.array([0.1, 0.2, 0.3])
atom = Atom(index=index)
atom.in_site = in_site
atom._frac_coords = c
d = atom.as_dict()
self.assertEqual(d['index'], index)
self.assertEqual(d['in_site'], in_site)
np.testing.assert_array_equal(d['frac_coords'], c)
def example_structure(species=None):
if not species:
species = ['S']*5
lattice = Lattice.from_parameters(10.0, 10.0, 10.0, 90, 90, 90)
cartesian_coords = np.array([[1.0, 1.0, 1.0],
[9.0, 1.0, 1.0],
[5.0, 5.0, 5.0],
[1.0, 9.0, 9.0],
[9.0, 9.0, 9.0]])
structure = Structure(coords=cartesian_coords,
lattice=lattice,
species=species,
coords_are_cartesian=True)
return structure
if __name__ == '__main__':
unittest.main()
| [
"pymatgen.core.Lattice.from_parameters",
"pymatgen.core.Structure",
"numpy.array",
"site_analysis.atom.Atom",
"unittest.main",
"numpy.testing.assert_array_equal"
] | [((2210, 2263), 'pymatgen.core.Lattice.from_parameters', 'Lattice.from_parameters', (['(10.0)', '(10.0)', '(10.0)', '(90)', '(90)', '(90)'], {}), '(10.0, 10.0, 10.0, 90, 90, 90)\n', (2233, 2263), False, 'from pymatgen.core import Structure, Lattice\n'), ((2287, 2387), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0], [9.0, 1.0, 1.0], [5.0, 5.0, 5.0], [1.0, 9.0, 9.0], [9.0, \n 9.0, 9.0]]'], {}), '([[1.0, 1.0, 1.0], [9.0, 1.0, 1.0], [5.0, 5.0, 5.0], [1.0, 9.0, 9.0\n ], [9.0, 9.0, 9.0]])\n', (2295, 2387), True, 'import numpy as np\n'), ((2531, 2630), 'pymatgen.core.Structure', 'Structure', ([], {'coords': 'cartesian_coords', 'lattice': 'lattice', 'species': 'species', 'coords_are_cartesian': '(True)'}), '(coords=cartesian_coords, lattice=lattice, species=species,\n coords_are_cartesian=True)\n', (2540, 2630), False, 'from pymatgen.core import Structure, Lattice\n'), ((2770, 2785), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2783, 2785), False, 'import unittest\n'), ((261, 275), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(22)'}), '(index=22)\n', (265, 275), False, 'from site_analysis.atom import Atom\n'), ((500, 514), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(12)'}), '(index=12)\n', (504, 514), False, 'from site_analysis.atom import Atom\n'), ((568, 594), 'numpy.array', 'np.array', (['[0, 0, 0.0, 0.0]'], {}), '([0, 0, 0.0, 0.0])\n', (576, 594), True, 'import numpy as np\n'), ((842, 856), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(12)'}), '(index=12)\n', (846, 856), False, 'from site_analysis.atom import Atom\n'), ((950, 964), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(12)'}), '(index=12)\n', (954, 964), False, 'from site_analysis.atom import Atom\n'), ((1121, 1134), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(1)'}), '(index=1)\n', (1125, 1134), False, 'from site_analysis.atom import Atom\n'), ((1231, 1305), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['atom._frac_coords', 'structure[1].frac_coords'], {}), '(atom._frac_coords, structure[1].frac_coords)\n', (1260, 1305), True, 'import numpy as np\n'), ((1412, 1425), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(1)'}), '(index=1)\n', (1416, 1425), False, 'from site_analysis.atom import Atom\n'), ((1591, 1605), 'site_analysis.atom.Atom', 'Atom', ([], {'index': '(12)'}), '(index=12)\n', (1595, 1605), False, 'from site_analysis.atom import Atom\n'), ((1618, 1643), 'numpy.array', 'np.array', (['[0.3, 0.4, 0.5]'], {}), '([0.3, 0.4, 0.5])\n', (1626, 1643), True, 'import numpy as np\n'), ((1682, 1732), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['atom.frac_coords', 'c'], {}), '(atom.frac_coords, c)\n', (1711, 1732), True, 'import numpy as np\n'), ((1813, 1838), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3]'], {}), '([0.1, 0.2, 0.3])\n', (1821, 1838), True, 'import numpy as np\n'), ((1854, 1871), 'site_analysis.atom.Atom', 'Atom', ([], {'index': 'index'}), '(index=index)\n', (1858, 1871), False, 'from site_analysis.atom import Atom\n'), ((2060, 2110), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["d['frac_coords']", 'c'], {}), "(d['frac_coords'], c)\n", (2089, 2110), True, 'import numpy as np\n')] |
__description__ = \
"""
Standard plots for output of machine learning runs.
"""
__author__ = "<NAME>"
__date__ = "2016-04-23"
import numpy as np
import matplotlib.pylab as plt
from matplotlib.backends.backend_pdf import PdfPages
def correlation(ml_data,ml_machine,pdf_file=None,max_value=12):
if pdf_file is not None:
pdf = PdfPages(pdf_file)
plt.rcParams.update({'font.size': 20})
fig = plt.figure(figsize=(7, 7))
# Plot sundry dashed lines
plt.plot(np.array([-1*max_value,max_value]),np.array([-1*max_value,max_value]),'k--')
plt.plot((0,0),(-1*max_value,max_value),'k--')
plt.plot((-1*max_value,max_value),(0,0),'k--')
# Plot training set
train_prediction = ml_machine.predict(ml_data.training_features)
plt.plot(ml_data.training_values,
train_prediction,
"o",color="red")
# Plot test set
test_prediction = ml_machine.predict(ml_data.test_features)
plt.plot(ml_data.test_values,
test_prediction,
"o",color="blue")
m, b = np.polyfit(ml_data.test_values, test_prediction, 1)
x = np.array((-1*max_value,max_value))
plt.plot(x, m*x + b, 'k-',linewidth=2.9)
plt.ylim(-1*max_value,max_value)
plt.xlim(-1*max_value,max_value)
plt.xlabel("measured value")
plt.ylabel("predicted value")
if pdf_file is not None:
pdf.savefig()
pdf.close()
return fig
| [
"matplotlib.pylab.xlim",
"numpy.polyfit",
"matplotlib.pylab.figure",
"matplotlib.pylab.ylim",
"matplotlib.pylab.xlabel",
"numpy.array",
"matplotlib.pylab.rcParams.update",
"matplotlib.pylab.plot",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pylab.ylabel"
] | [((367, 405), 'matplotlib.pylab.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (386, 405), True, 'import matplotlib.pylab as plt\n'), ((416, 442), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (426, 442), True, 'import matplotlib.pylab as plt\n'), ((574, 626), 'matplotlib.pylab.plot', 'plt.plot', (['(0, 0)', '(-1 * max_value, max_value)', '"""k--"""'], {}), "((0, 0), (-1 * max_value, max_value), 'k--')\n", (582, 626), True, 'import matplotlib.pylab as plt\n'), ((625, 677), 'matplotlib.pylab.plot', 'plt.plot', (['(-1 * max_value, max_value)', '(0, 0)', '"""k--"""'], {}), "((-1 * max_value, max_value), (0, 0), 'k--')\n", (633, 677), True, 'import matplotlib.pylab as plt\n'), ((774, 843), 'matplotlib.pylab.plot', 'plt.plot', (['ml_data.training_values', 'train_prediction', '"""o"""'], {'color': '"""red"""'}), "(ml_data.training_values, train_prediction, 'o', color='red')\n", (782, 843), True, 'import matplotlib.pylab as plt\n'), ((958, 1023), 'matplotlib.pylab.plot', 'plt.plot', (['ml_data.test_values', 'test_prediction', '"""o"""'], {'color': '"""blue"""'}), "(ml_data.test_values, test_prediction, 'o', color='blue')\n", (966, 1023), True, 'import matplotlib.pylab as plt\n'), ((1066, 1117), 'numpy.polyfit', 'np.polyfit', (['ml_data.test_values', 'test_prediction', '(1)'], {}), '(ml_data.test_values, test_prediction, 1)\n', (1076, 1117), True, 'import numpy as np\n'), ((1126, 1163), 'numpy.array', 'np.array', (['(-1 * max_value, max_value)'], {}), '((-1 * max_value, max_value))\n', (1134, 1163), True, 'import numpy as np\n'), ((1165, 1208), 'matplotlib.pylab.plot', 'plt.plot', (['x', '(m * x + b)', '"""k-"""'], {'linewidth': '(2.9)'}), "(x, m * x + b, 'k-', linewidth=2.9)\n", (1173, 1208), True, 'import matplotlib.pylab as plt\n'), ((1215, 1250), 'matplotlib.pylab.ylim', 'plt.ylim', (['(-1 * max_value)', 'max_value'], {}), '(-1 * max_value, max_value)\n', (1223, 1250), True, 'import matplotlib.pylab as plt\n'), ((1252, 1287), 'matplotlib.pylab.xlim', 'plt.xlim', (['(-1 * max_value)', 'max_value'], {}), '(-1 * max_value, max_value)\n', (1260, 1287), True, 'import matplotlib.pylab as plt\n'), ((1290, 1318), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""measured value"""'], {}), "('measured value')\n", (1300, 1318), True, 'import matplotlib.pylab as plt\n'), ((1323, 1352), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""predicted value"""'], {}), "('predicted value')\n", (1333, 1352), True, 'import matplotlib.pylab as plt\n'), ((339, 357), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['pdf_file'], {}), '(pdf_file)\n', (347, 357), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((493, 530), 'numpy.array', 'np.array', (['[-1 * max_value, max_value]'], {}), '([-1 * max_value, max_value])\n', (501, 530), True, 'import numpy as np\n'), ((528, 565), 'numpy.array', 'np.array', (['[-1 * max_value, max_value]'], {}), '([-1 * max_value, max_value])\n', (536, 565), True, 'import numpy as np\n')] |
import sys, serial, time, array
import numpy as np
import statistics as st
class TFmini():
def __init__(self):
self.ser = serial.Serial("/dev/ttyAMA0", 115200)
#self.unit_cm = 24.57284 # number unit per one centimater
self.lidar_per_sec = 127.64107
self.rate_speed = 4.906
self.calib_no = 70;
def calib(self):
print(self.ser.is_open)
if self.ser.is_open == False:
self.ser.open()
cal = []
i = 0
while True:
count = self.ser.in_waiting
if count > 8:
recv = self.ser.read(9) #Read array from LiDar
self.ser.reset_input_buffer()
if recv[0] == 89 and recv[1] == 89: # 0x59 is 'Y'
distance = recv[2] + recv[3] * 256
#print(distance)
cal.append(distance)
#print(distance)
if len(cal) == 120:
#print("Cal Successed")
self.calib_no = st.mode(cal)
print("Calibrate Finished" + str(self.calib_no))
return self.calib_no
i=i+1;
def getTFminiData(self):
if self.ser.is_open == False:
self.ser.open()
disList = []
i = 0
run = 0
#print("Scanning!")
try:
while True:
count = self.ser.in_waiting
if count > 8:
recv = self.ser.read(9) #Read array from LiDar
self.ser.reset_input_buffer()
#print(recv)
if recv[0] == 89 and recv[1] == 89: # 0x59 is 'Y'
#print(i)
#recv 2 is distance
distance = recv[2] + recv[3] * 256
height = abs(distance - self.calib_no)
#print(distance)
if height > 2:
disList.append(height)
run = 0
#เธเนเธญเธเธเธฑเธเธเธฒเธฃเนเธเธงเนเธเนเธเธขเนเธกเนเธซเธขเธธเธเนเธเธเธฑเธเธเธต
if len(disList) > 1 and height < 2:
run = run+1;
#เธเนเธญเธเธเธฑเธเธเธฒเธฃเนเธเธงเนเธเธเธญเธเธเนเธญเธกเธนเธฅ
if run > 500:
#print("RUN > 120")
print("Get Successed")
break;
else:
pass
print("TFMini Finished")
return disList
except AssertionError as e:
print(e)
return None
def getSize(self,disList):
ss = self.rate_speed * (len(disList)/self.lidar_per_sec)
obj = {
'hei': np.max(disList),
'wid': ss,
}
return obj | [
"serial.Serial",
"statistics.mode",
"numpy.max"
] | [((144, 181), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyAMA0"""', '(115200)'], {}), "('/dev/ttyAMA0', 115200)\n", (157, 181), False, 'import sys, serial, time, array\n'), ((3118, 3133), 'numpy.max', 'np.max', (['disList'], {}), '(disList)\n', (3124, 3133), True, 'import numpy as np\n'), ((1125, 1137), 'statistics.mode', 'st.mode', (['cal'], {}), '(cal)\n', (1132, 1137), True, 'import statistics as st\n')] |
# Front matter
##############
import os
from os import fdopen, remove
from tempfile import mkstemp
from shutil import move
import glob
import re
import time
import pandas as pd
import numpy as np
from scipy import constants
from scipy.optimize import curve_fit, fsolve
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
from scipy.interpolate import spline
import math
import seaborn as sns
matplotlib.rc('xtick', labelsize=16)
matplotlib.rc('ytick', labelsize=16)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
start_time = time.time()
# Input information
######################
# Path to get volumes from hcp-Fe Murphy dataset
EOSpath = '../../081_vD_Fe_PowerLaw/Results/input_values.csv'
# Path to get re-analyzed hcp-Fe Murphy PDOS
phoxpath = '../../050_phox_Fe_man/'
# Fei Gruneisen parameter variables
gamma0 = 1.74
q = 0.78
# From Dewaele 2006, agrees with Fei 2016 to third decimal place.
V0 = 22.428
dV0 = 0.098
# Verification:
rho0 = 8.2695 # g/cc, Fei 2016 density
M = 55.845 # g/mol, for natural Fe
V0_ccpermol = M/rho0 # cm^3/mol
V0_check = V0_ccpermol*(2*10**24)/constants.N_A # A^3
print(V0_check)
# Functions
###########
def calcScalingParam(V,Vi,V0,gamma0,q):
xi = np.exp( gamma0*(Vi/V0)**q * (1/q) * ((V/Vi)**q-1) )
return xi
def scaleDOS(ref_folder,xi,dos_dict):
dos_ref_df = dos_dict[ref_folder]
# Interpolate the reference PDOS
fdos = interp1d(dos_ref_df['E'], dos_ref_df['DOS'], kind='cubic')
E_ref_min = min(dos_ref_df['E'])
E_ref_max = max(dos_ref_df['E'])
# Scale PDOS using xi
# If xi > 1, we need to limit the energy range so we don't call the interpolated
# reference function out of range
E_min = max(E_ref_min/xi,min(dos_df['E']))
E_max = min(E_ref_max/xi,max(dos_df['E']))
dos_crop_df = dos_df[dos_df['E']>(E_min)]
dos_crop_df = dos_crop_df[dos_crop_df['E']<(E_max)]
dos_scaled = xi*fdos(xi*dos_crop_df['E'])
# Save scaled PDOS
dos_scaled_df = dos_crop_df[['E']].copy()
dos_scaled_df['DOS'] = dos_scaled
# dos_scaled_df.to_csv(ref_folder+'/scaledDOSdata/scaled2_'+folder+'.csv',index=False)
return(dos_scaled_df)
def DOSsubplot(folder,ref_folder,number,ax):
offset = 100
dos_df = dos_dict[folder]
# ax.plot(dos_df['E'], dos_df['DOS']+offset*number,color='#1f77b4')
# ax.fill_between(dos_df['E'], dos_df['DOS']-dos_df['dDOS']+offset*number,
# dos_df['DOS']+dos_df['dDOS']+offset*number, facecolor='#1f77b4', alpha=0.3)
ax.errorbar(dos_df['E'], dos_df['DOS']+offset*number,yerr=dos_df['dDOS'],
marker='.',markersize=1.5,color='black',ecolor='darkgray',elinewidth=0.5,
linestyle='none',zorder=-5)
if folder != ref_folder:
dos_scaled_df = dos_scaled_dict[folder]
ax.plot(dos_scaled_df['E'], dos_scaled_df['DOS']+offset*number,color='red')
def plotScaledPDOS(ref_folder,folder_list,dos_dict,dos_scaled_dict):
fig, (ax) = plt.subplots(nrows = 1, ncols=1, figsize=(4,10))
offsetnum = 0
for folder in folder_list:
DOSsubplot(folder,ref_folder,offsetnum,ax)
offsetnum = offsetnum + 1
ax.set_xlabel(r'Energy (meV)',fontsize = 16)
ax.set_ylabel(r'PDOS $D(E,V)$',fontsize = 16)
ax.set_xlim([0,85])
ax.set_ylim(ymin=-10,ymax=1050)
ax.xaxis.set_ticks([0,20,40,60,80])
ax.tick_params(direction='in',left='off',top='on')
ax.set_yticklabels([])
fig.savefig(ref_folder+'/Fei_scaledPDOS_Fe_narrow.pdf', format='pdf', bbox_inches='tight')
plt.close()
# Import data
#############
input_df = pd.read_csv(EOSpath, engine='python')
# # Only use hcp data (We measured the bcc phase, not Caitlin)
# input_df = input_df[input_df['Phase']=='hcp']
# # Data was out of order as imported. To fix that:
# input_df = input_df.sort_values('P')
# Load PDOS data
# Find the filepath of all .res NRIXS files in phox directories
respath_list = [filepath for filepath in glob.glob(phoxpath+'*/*.res')]
# Prep lists, dictionaries, and df to store input data in
folder_list = []
index_dict = dict()
dos_dict = dict()
# Collect folders, indices, paths, and input values
for respath in respath_list:
# Determine filepaths for dos and in_psvl
folder = re.findall('([A-Za-z0-9_]+)/[A-Za-z0-9_]+.res',respath)[0]
index = re.findall('/([A-Za-z0-9_]+).res',respath)[0]
dospath = phoxpath+folder+'/Output/'+index+'_dos.dat'
# Check if each folder is hcp. Don't use it otherwise
phase = input_df[input_df['Folder']==folder].iloc[-1]['Phase']
if phase == 'hcp':
# Import PDOS
dos_df = pd.read_csv(dospath, sep='\s+', comment='@', header=None,
names = ['E','DOS','dDOS'])
# Store to use PDOS later
folder_list.append(folder)
index_dict[folder] = index
dos_dict[folder] = dos_df
# Sort folder_list by pressure (needed to plot in correct order)
sort_folder_df = pd.DataFrame(columns = ['Folder','P'])
for folder in folder_list:
P = input_df[input_df['Folder']==folder].iloc[-1]['P']
sort_folder_df = sort_folder_df.append(
pd.DataFrame([[folder,P]],columns=sort_folder_df.columns))
sort_folder_df = sort_folder_df.sort_values('P')
folder_list = sort_folder_df['Folder'].values
# Plot scaled PDOS
##################
# Create a dataframe to store results in
results_df = pd.DataFrame(columns = ['Ref Folder','Ref Index','Vi','dVi',
'Folder','Index','V','dV','V/Vi','xi'])
# for ref_folder in ['2009Oct_30GPa']:
for ref_folder in folder_list:
print('Reference PDOS: '+ref_folder)
# Check if a folder for the reference PDOS exists, and make one if not
if not os.path.exists(ref_folder):
os.makedirs(ref_folder)
dos_ref_df = dos_dict[ref_folder]
# What is the reference volume?
Vi = input_df[input_df['Folder']==ref_folder].iloc[-1]['V']
dVi = input_df[input_df['Folder']==ref_folder].iloc[-1]['dV']
dos_scaled_dict = dict()
# for folder in ['2011Feb_171GPa']:
for folder in folder_list:
print('\tScaling to '+folder)
# What is the volume?
V = input_df[input_df['Folder']==folder].iloc[-1]['V']
dV = input_df[input_df['Folder']==folder].iloc[-1]['dV']
V_Vi = V/Vi
xi = calcScalingParam(V,Vi,V0,gamma0,q)
dos_scaled_dict[folder] = scaleDOS(ref_folder,xi,dos_dict)
results_df = results_df.append(pd.DataFrame([[
ref_folder,index_dict[ref_folder],Vi,dVi,
folder,index_dict[folder],V,dV,V_Vi,xi]],columns = results_df.columns))
# Create plot of ref PDOS scaled to all other PDOS
plotScaledPDOS(ref_folder,folder_list,dos_dict,dos_scaled_dict)
# At this point in the nested loops, save results in case code crashes
# Will be overwritten on each loop with updated results
results_df = results_df.round({'V/Vi':3,'xi':4,'dxi':4})
results_df.to_csv('Results/Fei_scalingparameters.csv',index=False)
| [
"os.path.exists",
"pandas.read_csv",
"os.makedirs",
"scipy.interpolate.interp1d",
"seaborn.set_style",
"numpy.exp",
"matplotlib.pyplot.close",
"matplotlib.rc",
"pandas.DataFrame",
"re.findall",
"time.time",
"matplotlib.pyplot.subplots",
"glob.glob"
] | [((509, 545), 'matplotlib.rc', 'matplotlib.rc', (['"""xtick"""'], {'labelsize': '(16)'}), "('xtick', labelsize=16)\n", (522, 545), False, 'import matplotlib\n'), ((547, 583), 'matplotlib.rc', 'matplotlib.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (560, 583), False, 'import matplotlib\n'), ((765, 794), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {'rc': 'rc'}), "('ticks', rc=rc)\n", (778, 794), True, 'import seaborn as sns\n'), ((809, 820), 'time.time', 'time.time', ([], {}), '()\n', (818, 820), False, 'import time\n'), ((3666, 3703), 'pandas.read_csv', 'pd.read_csv', (['EOSpath'], {'engine': '"""python"""'}), "(EOSpath, engine='python')\n", (3677, 3703), True, 'import pandas as pd\n'), ((4934, 4971), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Folder', 'P']"}), "(columns=['Folder', 'P'])\n", (4946, 4971), True, 'import pandas as pd\n'), ((5348, 5458), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Ref Folder', 'Ref Index', 'Vi', 'dVi', 'Folder', 'Index', 'V', 'dV',\n 'V/Vi', 'xi']"}), "(columns=['Ref Folder', 'Ref Index', 'Vi', 'dVi', 'Folder',\n 'Index', 'V', 'dV', 'V/Vi', 'xi'])\n", (5360, 5458), True, 'import pandas as pd\n'), ((1480, 1543), 'numpy.exp', 'np.exp', (['(gamma0 * (Vi / V0) ** q * (1 / q) * ((V / Vi) ** q - 1))'], {}), '(gamma0 * (Vi / V0) ** q * (1 / q) * ((V / Vi) ** q - 1))\n', (1486, 1543), True, 'import numpy as np\n'), ((1660, 1718), 'scipy.interpolate.interp1d', 'interp1d', (["dos_ref_df['E']", "dos_ref_df['DOS']"], {'kind': '"""cubic"""'}), "(dos_ref_df['E'], dos_ref_df['DOS'], kind='cubic')\n", (1668, 1718), False, 'from scipy.interpolate import interp1d\n'), ((3091, 3138), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(4, 10)'}), '(nrows=1, ncols=1, figsize=(4, 10))\n', (3103, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3612, 3623), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3621, 3623), True, 'import matplotlib.pyplot as plt\n'), ((4029, 4060), 'glob.glob', 'glob.glob', (["(phoxpath + '*/*.res')"], {}), "(phoxpath + '*/*.res')\n", (4038, 4060), False, 'import glob\n'), ((4310, 4366), 're.findall', 're.findall', (['"""([A-Za-z0-9_]+)/[A-Za-z0-9_]+.res"""', 'respath'], {}), "('([A-Za-z0-9_]+)/[A-Za-z0-9_]+.res', respath)\n", (4320, 4366), False, 'import re\n'), ((4378, 4421), 're.findall', 're.findall', (['"""/([A-Za-z0-9_]+).res"""', 'respath'], {}), "('/([A-Za-z0-9_]+).res', respath)\n", (4388, 4421), False, 'import re\n'), ((4647, 4737), 'pandas.read_csv', 'pd.read_csv', (['dospath'], {'sep': '"""\\\\s+"""', 'comment': '"""@"""', 'header': 'None', 'names': "['E', 'DOS', 'dDOS']"}), "(dospath, sep='\\\\s+', comment='@', header=None, names=['E',\n 'DOS', 'dDOS'])\n", (4658, 4737), True, 'import pandas as pd\n'), ((5099, 5158), 'pandas.DataFrame', 'pd.DataFrame', (['[[folder, P]]'], {'columns': 'sort_folder_df.columns'}), '([[folder, P]], columns=sort_folder_df.columns)\n', (5111, 5158), True, 'import pandas as pd\n'), ((5649, 5675), 'os.path.exists', 'os.path.exists', (['ref_folder'], {}), '(ref_folder)\n', (5663, 5675), False, 'import os\n'), ((5682, 5705), 'os.makedirs', 'os.makedirs', (['ref_folder'], {}), '(ref_folder)\n', (5693, 5705), False, 'import os\n'), ((6317, 6455), 'pandas.DataFrame', 'pd.DataFrame', (['[[ref_folder, index_dict[ref_folder], Vi, dVi, folder, index_dict[folder],\n V, dV, V_Vi, xi]]'], {'columns': 'results_df.columns'}), '([[ref_folder, index_dict[ref_folder], Vi, dVi, folder,\n index_dict[folder], V, dV, V_Vi, xi]], columns=results_df.columns)\n', (6329, 6455), True, 'import pandas as pd\n')] |
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import simpa as sp
import numpy as np
def create_custom_absorber():
wavelengths = np.linspace(200, 1500, 100)
absorber = sp.Spectrum(spectrum_name="random absorber",
wavelengths=wavelengths,
values=np.random.random(
np.shape(wavelengths)))
return absorber
def create_custom_chromophore(volume_fraction: float = 1.0):
chromophore = sp.Molecule(
absorption_spectrum=create_custom_absorber(),
volume_fraction=volume_fraction,
scattering_spectrum=sp.ScatteringSpectrumLibrary.CONSTANT_SCATTERING_ARBITRARY(40.0),
anisotropy_spectrum=sp.AnisotropySpectrumLibrary.CONSTANT_ANISOTROPY_ARBITRARY(0.9)
)
return chromophore
def create_custom_tissue_type():
# First create an instance of a TissueSettingsGenerator
tissue_settings_generator = sp.MolecularCompositionGenerator()
water_volume_fraction = 0.4
blood_volume_fraction = 0.5
custom_chromophore_volume_fraction = 0.1
# The volume fraction within every tissue type should sum up to 1.
oxygenation = 0.4
# Then append chromophores that you want
tissue_settings_generator.append(key="oxyhemoglobin",
value=sp.MOLECULE_LIBRARY.oxyhemoglobin(oxygenation * blood_volume_fraction))
tissue_settings_generator.append(key="deoxyhemoglobin",
value=sp.MOLECULE_LIBRARY.deoxyhemoglobin((1 - oxygenation) * blood_volume_fraction))
tissue_settings_generator.append(key="water",
value=sp.MOLECULE_LIBRARY.water(water_volume_fraction))
tissue_settings_generator.append(key="custom",
value=create_custom_chromophore(custom_chromophore_volume_fraction))
return tissue_settings_generator
| [
"simpa.ScatteringSpectrumLibrary.CONSTANT_SCATTERING_ARBITRARY",
"simpa.MOLECULE_LIBRARY.deoxyhemoglobin",
"simpa.MOLECULE_LIBRARY.water",
"numpy.linspace",
"numpy.shape",
"simpa.MOLECULE_LIBRARY.oxyhemoglobin",
"simpa.AnisotropySpectrumLibrary.CONSTANT_ANISOTROPY_ARBITRARY",
"simpa.MolecularCompositi... | [((235, 262), 'numpy.linspace', 'np.linspace', (['(200)', '(1500)', '(100)'], {}), '(200, 1500, 100)\n', (246, 262), True, 'import numpy as np\n'), ((1061, 1095), 'simpa.MolecularCompositionGenerator', 'sp.MolecularCompositionGenerator', ([], {}), '()\n', (1093, 1095), True, 'import simpa as sp\n'), ((738, 802), 'simpa.ScatteringSpectrumLibrary.CONSTANT_SCATTERING_ARBITRARY', 'sp.ScatteringSpectrumLibrary.CONSTANT_SCATTERING_ARBITRARY', (['(40.0)'], {}), '(40.0)\n', (796, 802), True, 'import simpa as sp\n'), ((836, 899), 'simpa.AnisotropySpectrumLibrary.CONSTANT_ANISOTROPY_ARBITRARY', 'sp.AnisotropySpectrumLibrary.CONSTANT_ANISOTROPY_ARBITRARY', (['(0.9)'], {}), '(0.9)\n', (894, 899), True, 'import simpa as sp\n'), ((1447, 1517), 'simpa.MOLECULE_LIBRARY.oxyhemoglobin', 'sp.MOLECULE_LIBRARY.oxyhemoglobin', (['(oxygenation * blood_volume_fraction)'], {}), '(oxygenation * blood_volume_fraction)\n', (1480, 1517), True, 'import simpa as sp\n'), ((1622, 1700), 'simpa.MOLECULE_LIBRARY.deoxyhemoglobin', 'sp.MOLECULE_LIBRARY.deoxyhemoglobin', (['((1 - oxygenation) * blood_volume_fraction)'], {}), '((1 - oxygenation) * blood_volume_fraction)\n', (1657, 1700), True, 'import simpa as sp\n'), ((1795, 1843), 'simpa.MOLECULE_LIBRARY.water', 'sp.MOLECULE_LIBRARY.water', (['water_volume_fraction'], {}), '(water_volume_fraction)\n', (1820, 1843), True, 'import simpa as sp\n'), ((465, 486), 'numpy.shape', 'np.shape', (['wavelengths'], {}), '(wavelengths)\n', (473, 486), True, 'import numpy as np\n')] |
import numpy as np
import time
import sys
import random
from domain.make_env import make_env
from .ind import *
from domain.classify_gym import mnist_256, fashion_mnist
import pdb
class Task():
"""Problem domain to be solved by neural network. Uses OpenAI Gym patterns.
"""
def __init__(self, game, paramOnly=False, nReps=1):
"""Initializes task environment
Args:
game - (string) - dict key of task to be solved (see domain/config.py)
Optional:
paramOnly - (bool) - only load parameters instead of launching task?
nReps - (nReps) - number of trials to get average fitness
"""
# Network properties
self.nInput = game.input_size
self.nOutput = game.output_size
self.actRange = game.h_act
self.absWCap = game.weightCap
self.layers = game.layers
self.activations = np.r_[np.full(1, 1), game.i_act, game.o_act]
# Environment
self.maxEpisodeLength = game.max_episode_length
self.actSelect = game.actionSelect
if not paramOnly:
self.env = make_env(game.env_name)
# Special needs...
self.needsClosed = (game.env_name.startswith("CartPoleSwingUp"))
def testInd(self, wVec, aVec, view=False, seed=-1):
"""Evaluate individual on task
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
Optional:
view - (bool) - view trial?
seed - (int) - starting random seed for trials
Returns:
fitness - (float) - reward earned in trial
"""
if seed >= 0:
random.seed(seed)
np.random.seed(seed)
self.env.seed(seed)
state = self.env.reset()
self.env.t = 0
annOut = act(wVec, aVec, self.nInput, self.nOutput, state)
action = selectAct(annOut, self.actSelect)
state, reward, done, info = self.env.step(action)
if self.maxEpisodeLength == 0:
return reward
else:
totalReward = reward
for tStep in range(self.maxEpisodeLength):
annOut = act(wVec, aVec, self.nInput, self.nOutput, state)
action = selectAct(annOut, self.actSelect)
state, reward, done, info = self.env.step(action)
totalReward += reward
if view:
# time.sleep(0.01)
if self.needsClosed:
self.env.render(close=done)
else:
self.env.render()
if done:
break
return totalReward
# -- 'Weight Agnostic Network' evaluation -------------------------------- -- #
def setWeights(self, wVec, wVal):
"""Set single shared weight of network
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
wVal - (float) - value to assign to all weights
Returns:
wMat - (np_array) - weight matrix with single shared weight
[N X N]
"""
# Create connection matrix
wVec[np.isnan(wVec)] = 0
dim = int(np.sqrt(np.shape(wVec)[0]))
cMat = np.reshape(wVec, (dim, dim))
cMat[cMat != 0] = 1.0
# Assign value to all weights
wMat = np.copy(cMat) * wVal
return wMat
def obtain_data(self, mnist=False):
if mnist:
# construct state for training data
x_train, y_train = mnist_256(train=True)
x_test, y_test = mnist_256(train=False)
else:
x_train, y_train = fashion_mnist(train=True)
x_test, y_test = fashion_mnist(train=False)
return x_train, y_train, x_test, y_test
def predict(self, wVec, aVec, x, view=False, seed=-1, mnist=False):
if seed >= 0:
random.seed(seed)
np.random.seed(seed)
self.env.seed(seed)
# train accuracy
state = x
annOut = act(wVec, aVec, self.nInput, self.nOutput, state)
# these are the (soft max) outputs
action = selectAct(annOut, self.actSelect)
predictions = np.argmax(action, axis=1)
return predictions
def evaluateModel(self, wVec, aVec, hyp, mnist=False, seed=-1, nRep=False, nVals=6, view=False, returnVals=False):
if nRep is False:
nRep = hyp['alg_nReps']
# Set weight values to test WANN with
if (hyp['alg_wDist'] == "standard") and nVals == 6: # Double, constant, and half signal
wVals = np.array((-2, -1.0, -0.5, 0.5, 1.0, 2))
else:
wVals = np.linspace(-self.absWCap, self.absWCap, nVals)
x_train, y_train, x_test, y_test = self.obtain_data(mnist)
train_predictions = np.empty((nVals, y_train.shape[0]), dtype=np.float64)
test_predictions = np.empty((nVals, y_test.shape[0]), dtype=np.float64)
train_accuracies = np.empty((nRep, nVals), dtype=np.float64)
test_accuracies = np.empty((nRep, nVals), dtype=np.float64)
def get_majority_predictions(predictions):
def _majority(l):
return max(set(l), key=l.count)
predictions = [_majority(list(predictions[:, i]))
for i in range(predictions.shape[1])]
return predictions
def calc_accuracy(predictions, ground_truths):
n_correct = np.sum(predictions == ground_truths)
return n_correct / ground_truths.shape[0]
for iVal in range(nVals):
wMat = self.setWeights(wVec, wVals[iVal])
print('accuracy testing')
train_prediction = self.predict(
wMat, aVec, x_train, seed=seed, view=view)
test_prediction = self.predict(
wMat, aVec, x_test, seed=seed, view=view)
train_predictions[iVal, :] = train_prediction
test_predictions[iVal, :] = test_prediction
train_accuracies[0, iVal] = calc_accuracy(train_prediction,y_train)
test_accuracies[0, iVal] = calc_accuracy(test_prediction,y_test)
train_majority_prediction = get_majority_predictions(train_predictions)
test_majority_prediction = get_majority_predictions(test_predictions)
ensemble_accuracy_train = calc_accuracy(train_majority_prediction, y_train)
ensemble_accuracy_test = calc_accuracy(test_majority_prediction, y_test)
return train_accuracies, test_accuracies, ensemble_accuracy_train, ensemble_accuracy_test, train_predictions, test_predictions, train_majority_prediction, test_majority_prediction, y_train, y_test
def usedInputs(self, wVec, aVec):
nr_of_classes = 10 #currently hardcoded since both fashion and mnist consist of 10 classes
wMat = self.setWeights(wVec, 1)
outputs = wMat[:,-nr_of_classes:]
tree_dict = {}
def usedInputsHelper(nodes_to_visit, visited_nodes):
if len(nodes_to_visit) == 0:
return visited_nodes
head = nodes_to_visit[0]
tail = nodes_to_visit[1:]
if head < 256: ## we don't have to visit input nodes
visited_nodes.append(head)
nodes_to_visit = tail
return usedInputsHelper(nodes_to_visit, visited_nodes)
visited_nodes.append(head)
nodes_to_visit = np.append(tail, np.where(wMat[:,head]>0)[0] )
return usedInputsHelper(nodes_to_visit, visited_nodes)
for i in reversed(range(1, nr_of_classes+1)):
all_nodes = []
nodes_of_interest = np.where(wMat[:,-i] > 0)[0]
route = usedInputsHelper(nodes_of_interest, [])
tree_dict[i%11] = sorted(route)
return tree_dict
def getDistFitness(self, wVec, aVec, hyp,
seed=-1, nRep=False, nVals=6, view=False, returnVals=False, accuracyTest=False):
"""Get fitness of a single individual with distribution of weights
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
hyp - (dict) - hyperparameters
['alg_wDist'] - weight distribution [standard;fixed;linspace]
['alg_absWCap'] - absolute value of highest weight for linspace
Optional:
seed - (int) - starting random seed for trials
nReps - (int) - number of trials to get average fitness
nVals - (int) - number of weight values to test
Returns:
fitness - (float) - mean reward over all trials
"""
if nRep is False:
nRep = hyp['alg_nReps']
# Set weight values to test WANN with
if (hyp['alg_wDist'] == "standard") and nVals == 6: # Double, constant, and half signal
wVals = np.array((-2, -1.0, -0.5, 0.5, 1.0, 2))
else:
wVals = np.linspace(-self.absWCap, self.absWCap, nVals)
# Get reward from 'reps' rollouts -- test population on same seeds
reward = np.empty((nRep, nVals))
train_accuracies = np.empty((nRep, nVals), dtype=np.float64)
test_accuracies = np.empty((nRep, nVals), dtype=np.float64)
for iRep in range(nRep):
for iVal in range(nVals):
wMat = self.setWeights(wVec, wVals[iVal])
if accuracyTest:
print('accuracy test')
train_accuracy, test_accuracy = self.testIndividualAccuracy(
wMat, aVec, seed=seed, view=view)
train_accuracies[iRep, iVal] = train_accuracy
test_accuracies[iRep, iVal] = test_accuracy
else:
if seed == -1:
reward[iRep, iVal] = self.testInd(
wMat, aVec, seed=seed, view=view)
else:
reward[iRep, iVal] = self.testInd(
wMat, aVec, seed=seed+iRep, view=view)
if returnVals is True:
return np.mean(reward, axis=0), wVals
return np.mean(reward, axis=0)
| [
"numpy.mean",
"numpy.copy",
"numpy.reshape",
"numpy.where",
"domain.classify_gym.mnist_256",
"numpy.argmax",
"random.seed",
"numpy.array",
"numpy.linspace",
"numpy.sum",
"numpy.empty",
"numpy.random.seed",
"numpy.isnan",
"domain.classify_gym.fashion_mnist",
"numpy.full",
"numpy.shape",... | [((3425, 3453), 'numpy.reshape', 'np.reshape', (['wVec', '(dim, dim)'], {}), '(wVec, (dim, dim))\n', (3435, 3453), True, 'import numpy as np\n'), ((4385, 4410), 'numpy.argmax', 'np.argmax', (['action'], {'axis': '(1)'}), '(action, axis=1)\n', (4394, 4410), True, 'import numpy as np\n'), ((5004, 5057), 'numpy.empty', 'np.empty', (['(nVals, y_train.shape[0])'], {'dtype': 'np.float64'}), '((nVals, y_train.shape[0]), dtype=np.float64)\n', (5012, 5057), True, 'import numpy as np\n'), ((5085, 5137), 'numpy.empty', 'np.empty', (['(nVals, y_test.shape[0])'], {'dtype': 'np.float64'}), '((nVals, y_test.shape[0]), dtype=np.float64)\n', (5093, 5137), True, 'import numpy as np\n'), ((5166, 5207), 'numpy.empty', 'np.empty', (['(nRep, nVals)'], {'dtype': 'np.float64'}), '((nRep, nVals), dtype=np.float64)\n', (5174, 5207), True, 'import numpy as np\n'), ((5234, 5275), 'numpy.empty', 'np.empty', (['(nRep, nVals)'], {'dtype': 'np.float64'}), '((nRep, nVals), dtype=np.float64)\n', (5242, 5275), True, 'import numpy as np\n'), ((9497, 9520), 'numpy.empty', 'np.empty', (['(nRep, nVals)'], {}), '((nRep, nVals))\n', (9505, 9520), True, 'import numpy as np\n'), ((9548, 9589), 'numpy.empty', 'np.empty', (['(nRep, nVals)'], {'dtype': 'np.float64'}), '((nRep, nVals), dtype=np.float64)\n', (9556, 9589), True, 'import numpy as np\n'), ((9616, 9657), 'numpy.empty', 'np.empty', (['(nRep, nVals)'], {'dtype': 'np.float64'}), '((nRep, nVals), dtype=np.float64)\n', (9624, 9657), True, 'import numpy as np\n'), ((10561, 10584), 'numpy.mean', 'np.mean', (['reward'], {'axis': '(0)'}), '(reward, axis=0)\n', (10568, 10584), True, 'import numpy as np\n'), ((1119, 1142), 'domain.make_env.make_env', 'make_env', (['game.env_name'], {}), '(game.env_name)\n', (1127, 1142), False, 'from domain.make_env import make_env\n'), ((1842, 1859), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1853, 1859), False, 'import random\n'), ((1872, 1892), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1886, 1892), True, 'import numpy as np\n'), ((3344, 3358), 'numpy.isnan', 'np.isnan', (['wVec'], {}), '(wVec)\n', (3352, 3358), True, 'import numpy as np\n'), ((3538, 3551), 'numpy.copy', 'np.copy', (['cMat'], {}), '(cMat)\n', (3545, 3551), True, 'import numpy as np\n'), ((3717, 3738), 'domain.classify_gym.mnist_256', 'mnist_256', ([], {'train': '(True)'}), '(train=True)\n', (3726, 3738), False, 'from domain.classify_gym import mnist_256, fashion_mnist\n'), ((3768, 3790), 'domain.classify_gym.mnist_256', 'mnist_256', ([], {'train': '(False)'}), '(train=False)\n', (3777, 3790), False, 'from domain.classify_gym import mnist_256, fashion_mnist\n'), ((3836, 3861), 'domain.classify_gym.fashion_mnist', 'fashion_mnist', ([], {'train': '(True)'}), '(train=True)\n', (3849, 3861), False, 'from domain.classify_gym import mnist_256, fashion_mnist\n'), ((3891, 3917), 'domain.classify_gym.fashion_mnist', 'fashion_mnist', ([], {'train': '(False)'}), '(train=False)\n', (3904, 3917), False, 'from domain.classify_gym import mnist_256, fashion_mnist\n'), ((4075, 4092), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4086, 4092), False, 'import random\n'), ((4105, 4125), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4119, 4125), True, 'import numpy as np\n'), ((4785, 4824), 'numpy.array', 'np.array', (['(-2, -1.0, -0.5, 0.5, 1.0, 2)'], {}), '((-2, -1.0, -0.5, 0.5, 1.0, 2))\n', (4793, 4824), True, 'import numpy as np\n'), ((4859, 4906), 'numpy.linspace', 'np.linspace', (['(-self.absWCap)', 'self.absWCap', 'nVals'], {}), '(-self.absWCap, self.absWCap, nVals)\n', (4870, 4906), True, 'import numpy as np\n'), ((5651, 5687), 'numpy.sum', 'np.sum', (['(predictions == ground_truths)'], {}), '(predictions == ground_truths)\n', (5657, 5687), True, 'import numpy as np\n'), ((9282, 9321), 'numpy.array', 'np.array', (['(-2, -1.0, -0.5, 0.5, 1.0, 2)'], {}), '((-2, -1.0, -0.5, 0.5, 1.0, 2))\n', (9290, 9321), True, 'import numpy as np\n'), ((9356, 9403), 'numpy.linspace', 'np.linspace', (['(-self.absWCap)', 'self.absWCap', 'nVals'], {}), '(-self.absWCap, self.absWCap, nVals)\n', (9367, 9403), True, 'import numpy as np\n'), ((908, 921), 'numpy.full', 'np.full', (['(1)', '(1)'], {}), '(1, 1)\n', (915, 921), True, 'import numpy as np\n'), ((7888, 7913), 'numpy.where', 'np.where', (['(wMat[:, -i] > 0)'], {}), '(wMat[:, -i] > 0)\n', (7896, 7913), True, 'import numpy as np\n'), ((10515, 10538), 'numpy.mean', 'np.mean', (['reward'], {'axis': '(0)'}), '(reward, axis=0)\n', (10522, 10538), True, 'import numpy as np\n'), ((3390, 3404), 'numpy.shape', 'np.shape', (['wVec'], {}), '(wVec)\n', (3398, 3404), True, 'import numpy as np\n'), ((7676, 7703), 'numpy.where', 'np.where', (['(wMat[:, head] > 0)'], {}), '(wMat[:, head] > 0)\n', (7684, 7703), True, 'import numpy as np\n')] |
import os
import cv2
import copy
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa
from keras.utils import Sequence
import xml.etree.ElementTree as ET
from yok.utils import BoundBox, normalize, bbox_iou
def parse_annotation(ann_dir, img_dir, labels=[]):
count = 0
all_imgs = []
seen_labels = {}
for ann in sorted(os.listdir(ann_dir)):
#if count > 2:
# break
img = {'object': []}
tree = ET.parse(ann_dir + ann)
for elem in tree.iter():
if 'filename' in elem.tag:
img['filename'] = img_dir + elem.text
if 'width' in elem.tag:
img['width'] = int(elem.text)
if 'height' in elem.tag:
img['height'] = int(elem.text)
if 'object' in elem.tag or 'part' in elem.tag:
obj = {}
for attr in list(elem):
if 'name' in attr.tag:
obj['name'] = attr.text
print(obj['name'])
if obj['name'] in seen_labels:
seen_labels[obj['name']] += 1
else:
seen_labels[obj['name']] = 1
if len(labels) > 0 and obj['name'] not in labels:
break
else:
img['object'] += [obj]
count = count + 1
if 'bndbox' in attr.tag:
for dim in list(attr):
if 'xmin' in dim.tag:
obj['xmin'] = int(round(float(dim.text)))
if 'ymin' in dim.tag:
obj['ymin'] = int(round(float(dim.text)))
if 'xmax' in dim.tag:
obj['xmax'] = int(round(float(dim.text)))
if 'ymax' in dim.tag:
obj['ymax'] = int(round(float(dim.text)))
if len(img['object']) > 0:
all_imgs += [img]
return all_imgs, seen_labels
class BatchGenerator(Sequence):
def __init__(self, images,
config,
shuffle=True,
jitter=True,
norm=None):
self.generator = None
self.images = images
self.config = config
self.shuffle = shuffle
self.jitter = jitter
self.norm = norm
self.anchors = [BoundBox(0, 0, config['anchors'][2 * i], config['anchors'][2 * i + 1]) for i in
range(int(len(config['anchors']) // 2))]
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
self.aug_pipe = iaa.Sequential(
[
sometimes(iaa.Affine(
)),
iaa.SomeOf((0, 5),
[
iaa.OneOf([
iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 7)),
iaa.MedianBlur(k=(3, 11)),
]),
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
iaa.OneOf([
iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
]),
iaa.Add((-10, 10), per_channel=0.5),
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
],
random_order=True
)
],
random_order=True
)
if shuffle: np.random.shuffle(self.images)
def __len__(self):
return int(np.ceil(float(len(self.images)) / self.config['batch_size']))
def __getitem__(self, idx):
l_bound = idx * self.config['batch_size']
r_bound = (idx + 1) * self.config['batch_size']
if r_bound > len(self.images):
r_bound = len(self.images)
l_bound = r_bound - self.config['batch_size']
instance_count = 0
x_batch = np.zeros((r_bound - l_bound, self.config['image_h'], self.config['image_w'], 3)) # input images
b_batch = np.zeros((r_bound - l_bound, 1, 1, 1, self.config['true_box_buffer'],
4)) # list of self.config['TRUE_self.config['BOX']_BUFFER'] GT boxes
y_batch = np.zeros((r_bound - l_bound, self.config['grid_h'], self.config['grid_w'], self.config['box_no'],
4 + 1 + self.config['class_no'])) # desired network output
for train_instance in self.images[l_bound:r_bound]:
img, all_objs = self.aug_image(train_instance, jitter=self.jitter)
true_box_index = 0
for obj in all_objs:
if obj['xmax'] > obj['xmin'] and obj['ymax'] > obj['ymin'] and obj['name'] in self.config['class_names']:
center_x = .5 * (obj['xmin'] + obj['xmax'])
center_x = center_x / (float(self.config['image_w']) / self.config['grid_w'])
center_y = .5 * (obj['ymin'] + obj['ymax'])
center_y = center_y / (float(self.config['image_h']) / self.config['grid_h'])
grid_x = int(np.floor(center_x))
grid_y = int(np.floor(center_y))
if grid_x < self.config['grid_w'] and grid_y < self.config['grid_h']:
obj_indx = self.config['class_names'].index(obj['name'])
center_w = (obj['xmax'] - obj['xmin']) / (
float(self.config['image_w']) / self.config['grid_w'])
center_h = (obj['ymax'] - obj['ymin']) / (
float(self.config['image_h']) / self.config['grid_h'])
box = [center_x, center_y, center_w, center_h]
best_anchor = -1
max_iou = -1
shifted_box = BoundBox(0,
0,
center_w,
center_h)
for i in range(len(self.anchors)):
anchor = self.anchors[i]
iou = bbox_iou(shifted_box, anchor)
if max_iou < iou:
best_anchor = i
max_iou = iou
y_batch[instance_count, grid_y, grid_x, best_anchor, 0:4] = box
y_batch[instance_count, grid_y, grid_x, best_anchor, 4] = 1.
y_batch[instance_count, grid_y, grid_x, best_anchor, 5 + obj_indx] = 1
b_batch[instance_count, 0, 0, 0, true_box_index] = box
true_box_index += 1
true_box_index = true_box_index % self.config['true_box_buffer']
if self.norm != None:
x_batch[instance_count] = self.norm(img)
else:
for obj in all_objs:
if obj['xmax'] > obj['xmin'] and obj['ymax'] > obj['ymin']:
cv2.rectangle(img[:, :, ::-1], (obj['xmin'], obj['ymin']), (obj['xmax'], obj['ymax']),
(255, 0, 0), 3)
cv2.putText(img[:, :, ::-1], obj['name'],
(obj['xmin'] + 2, obj['ymin'] + 12),
0, 1.2e-3 * img.shape[0],
(0, 255, 0), 2)
x_batch[instance_count] = img
instance_count += 1
return [x_batch, b_batch], y_batch
def on_epoch_end(self):
if self.shuffle: np.random.shuffle(self.images)
def aug_image(self, train_instance, jitter):
image_name = train_instance['filename']
image = cv2.imread(image_name)
if image is None: print
'Cannot find ', image_name
h, w, c = image.shape
all_objs = copy.deepcopy(train_instance['object'])
if jitter:
scale = np.random.uniform() / 10. + 1.
image = cv2.resize(image, (0, 0), fx=scale, fy=scale)
max_offx = (scale - 1.) * w
max_offy = (scale - 1.) * h
offx = int(np.random.uniform() * max_offx)
offy = int(np.random.uniform() * max_offy)
image = image[offy: (offy + h), offx: (offx + w)]
flip = np.random.binomial(1, .5)
if flip > 0.5: image = cv2.flip(image, 1)
image = self.aug_pipe.augment_image(image)
image = cv2.resize(image, (self.config['image_h'], self.config['image_w']))
image = image[:, :, ::-1]
for obj in all_objs:
for attr in ['xmin', 'xmax']:
if jitter: obj[attr] = int(obj[attr] * scale - offx)
obj[attr] = int(obj[attr] * float(self.config['image_w']) / w)
obj[attr] = max(min(obj[attr], self.config['image_w']), 0)
for attr in ['ymin', 'ymax']:
if jitter: obj[attr] = int(obj[attr] * scale - offy)
obj[attr] = int(obj[attr] * float(self.config['image_h']) / h)
obj[attr] = max(min(obj[attr], self.config['image_h']), 0)
if jitter and flip > 0.5:
xmin = obj['xmin']
obj['xmin'] = self.config['image_w'] - obj['xmax']
obj['xmax'] = self.config['image_w'] - xmin
return image, all_objs
| [
"cv2.rectangle",
"imgaug.augmenters.AverageBlur",
"imgaug.augmenters.GaussianBlur",
"yok.utils.bbox_iou",
"copy.deepcopy",
"numpy.random.binomial",
"os.listdir",
"xml.etree.ElementTree.parse",
"imgaug.augmenters.Add",
"imgaug.augmenters.Sharpen",
"imgaug.augmenters.AdditiveGaussianNoise",
"num... | [((374, 393), 'os.listdir', 'os.listdir', (['ann_dir'], {}), '(ann_dir)\n', (384, 393), False, 'import os\n'), ((490, 513), 'xml.etree.ElementTree.parse', 'ET.parse', (['(ann_dir + ann)'], {}), '(ann_dir + ann)\n', (498, 513), True, 'import xml.etree.ElementTree as ET\n'), ((4677, 4762), 'numpy.zeros', 'np.zeros', (["(r_bound - l_bound, self.config['image_h'], self.config['image_w'], 3)"], {}), "((r_bound - l_bound, self.config['image_h'], self.config['image_w'], 3)\n )\n", (4685, 4762), True, 'import numpy as np\n'), ((4793, 4866), 'numpy.zeros', 'np.zeros', (["(r_bound - l_bound, 1, 1, 1, self.config['true_box_buffer'], 4)"], {}), "((r_bound - l_bound, 1, 1, 1, self.config['true_box_buffer'], 4))\n", (4801, 4866), True, 'import numpy as np\n'), ((4981, 5116), 'numpy.zeros', 'np.zeros', (["(r_bound - l_bound, self.config['grid_h'], self.config['grid_w'], self.\n config['box_no'], 4 + 1 + self.config['class_no'])"], {}), "((r_bound - l_bound, self.config['grid_h'], self.config['grid_w'],\n self.config['box_no'], 4 + 1 + self.config['class_no']))\n", (4989, 5116), True, 'import numpy as np\n'), ((8602, 8624), 'cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (8612, 8624), False, 'import cv2\n'), ((8749, 8788), 'copy.deepcopy', 'copy.deepcopy', (["train_instance['object']"], {}), "(train_instance['object'])\n", (8762, 8788), False, 'import copy\n'), ((9371, 9438), 'cv2.resize', 'cv2.resize', (['image', "(self.config['image_h'], self.config['image_w'])"], {}), "(image, (self.config['image_h'], self.config['image_w']))\n", (9381, 9438), False, 'import cv2\n'), ((2632, 2702), 'yok.utils.BoundBox', 'BoundBox', (['(0)', '(0)', "config['anchors'][2 * i]", "config['anchors'][2 * i + 1]"], {}), "(0, 0, config['anchors'][2 * i], config['anchors'][2 * i + 1])\n", (2640, 2702), False, 'from yok.utils import BoundBox, normalize, bbox_iou\n'), ((2813, 2836), 'imgaug.augmenters.Sometimes', 'iaa.Sometimes', (['(0.5)', 'aug'], {}), '(0.5, aug)\n', (2826, 2836), True, 'from imgaug import augmenters as iaa\n'), ((4203, 4233), 'numpy.random.shuffle', 'np.random.shuffle', (['self.images'], {}), '(self.images)\n', (4220, 4233), True, 'import numpy as np\n'), ((8453, 8483), 'numpy.random.shuffle', 'np.random.shuffle', (['self.images'], {}), '(self.images)\n', (8470, 8483), True, 'import numpy as np\n'), ((8884, 8929), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': 'scale', 'fy': 'scale'}), '(image, (0, 0), fx=scale, fy=scale)\n', (8894, 8929), False, 'import cv2\n'), ((9213, 9239), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(0.5)'], {}), '(1, 0.5)\n', (9231, 9239), True, 'import numpy as np\n'), ((9275, 9293), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (9283, 9293), False, 'import cv2\n'), ((2922, 2934), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {}), '()\n', (2932, 2934), True, 'from imgaug import augmenters as iaa\n'), ((8832, 8851), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8849, 8851), True, 'import numpy as np\n'), ((9038, 9057), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9055, 9057), True, 'import numpy as np\n'), ((9094, 9113), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9111, 9113), True, 'import numpy as np\n'), ((3369, 3419), 'imgaug.augmenters.Sharpen', 'iaa.Sharpen', ([], {'alpha': '(0, 1.0)', 'lightness': '(0.75, 1.5)'}), '(alpha=(0, 1.0), lightness=(0.75, 1.5))\n', (3380, 3419), True, 'from imgaug import augmenters as iaa\n'), ((3471, 3545), 'imgaug.augmenters.AdditiveGaussianNoise', 'iaa.AdditiveGaussianNoise', ([], {'loc': '(0)', 'scale': '(0.0, 0.05 * 255)', 'per_channel': '(0.5)'}), '(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5)\n', (3496, 3545), True, 'from imgaug import augmenters as iaa\n'), ((3781, 3816), 'imgaug.augmenters.Add', 'iaa.Add', (['(-10, 10)'], {'per_channel': '(0.5)'}), '((-10, 10), per_channel=0.5)\n', (3788, 3816), True, 'from imgaug import augmenters as iaa\n'), ((3850, 3891), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.5, 1.5)'], {'per_channel': '(0.5)'}), '((0.5, 1.5), per_channel=0.5)\n', (3862, 3891), True, 'from imgaug import augmenters as iaa\n'), ((3925, 3979), 'imgaug.augmenters.ContrastNormalization', 'iaa.ContrastNormalization', (['(0.5, 2.0)'], {'per_channel': '(0.5)'}), '((0.5, 2.0), per_channel=0.5)\n', (3950, 3979), True, 'from imgaug import augmenters as iaa\n'), ((5868, 5886), 'numpy.floor', 'np.floor', (['center_x'], {}), '(center_x)\n', (5876, 5886), True, 'import numpy as np\n'), ((5922, 5940), 'numpy.floor', 'np.floor', (['center_y'], {}), '(center_y)\n', (5930, 5940), True, 'import numpy as np\n'), ((6612, 6646), 'yok.utils.BoundBox', 'BoundBox', (['(0)', '(0)', 'center_w', 'center_h'], {}), '(0, 0, center_w, center_h)\n', (6620, 6646), False, 'from yok.utils import BoundBox, normalize, bbox_iou\n'), ((7867, 7973), 'cv2.rectangle', 'cv2.rectangle', (['img[:, :, ::-1]', "(obj['xmin'], obj['ymin'])", "(obj['xmax'], obj['ymax'])", '(255, 0, 0)', '(3)'], {}), "(img[:, :, ::-1], (obj['xmin'], obj['ymin']), (obj['xmax'],\n obj['ymax']), (255, 0, 0), 3)\n", (7880, 7973), False, 'import cv2\n'), ((8034, 8159), 'cv2.putText', 'cv2.putText', (['img[:, :, ::-1]', "obj['name']", "(obj['xmin'] + 2, obj['ymin'] + 12)", '(0)', '(0.0012 * img.shape[0])', '(0, 255, 0)', '(2)'], {}), "(img[:, :, ::-1], obj['name'], (obj['xmin'] + 2, obj['ymin'] + \n 12), 0, 0.0012 * img.shape[0], (0, 255, 0), 2)\n", (8045, 8159), False, 'import cv2\n'), ((6942, 6971), 'yok.utils.bbox_iou', 'bbox_iou', (['shifted_box', 'anchor'], {}), '(shifted_box, anchor)\n', (6950, 6971), False, 'from yok.utils import BoundBox, normalize, bbox_iou\n'), ((3101, 3127), 'imgaug.augmenters.GaussianBlur', 'iaa.GaussianBlur', (['(0, 3.0)'], {}), '((0, 3.0))\n', (3117, 3127), True, 'from imgaug import augmenters as iaa\n'), ((3211, 3236), 'imgaug.augmenters.AverageBlur', 'iaa.AverageBlur', ([], {'k': '(2, 7)'}), '(k=(2, 7))\n', (3226, 3236), True, 'from imgaug import augmenters as iaa\n'), ((3274, 3299), 'imgaug.augmenters.MedianBlur', 'iaa.MedianBlur', ([], {'k': '(3, 11)'}), '(k=(3, 11))\n', (3288, 3299), True, 'from imgaug import augmenters as iaa\n'), ((3627, 3668), 'imgaug.augmenters.Dropout', 'iaa.Dropout', (['(0.01, 0.1)'], {'per_channel': '(0.5)'}), '((0.01, 0.1), per_channel=0.5)\n', (3638, 3668), True, 'from imgaug import augmenters as iaa\n')] |
# Algoritmos y Complejidad
# Profesor: <NAME>
# Alumno: <NAME>
import datetime as time
import numpy as np
from matplotlib import pyplot as plt
import AlgoritmosOrdenacion as sort
# Configuaracion
inicio = 0 # Tamano inicial del arreglo
aumento = 1 # Aumento del tamano del arreglo
tamMax = 1000001 # Tamano maximo del arreglo
#arr = [] # Arreglo generado aleatoriamente
bubbleT = [] # Tiempo del bubble sort
insertionT = [] # Tiempo del insertion sort
mergeT = [] # Tiempo del merge sort
tamX = [] # Valores de la grafica en X
# Prueba los algoritmos de ordenacion y regresa un arreglo con los tiempos de ejecucion
def ProbarOrdenacion(n):
res = []
arr = []
# Bubble sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.BubbleSort(arr)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
# Insertion sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.InsertionSort(arr)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
# Merge sort
arr = np.random.randint(1, 1000, size=n)
a = time.datetime.now()
sort.MergeSort(arr, 0, n-1)
b = time.datetime.now()
res.append(int((b-a).total_seconds() * 1000000))
return res
# Dibuja la grafica
def dibujar():
# plt.scatter(i, y)
plt.plot(tamX, bubbleT, 'b')
plt.plot(tamX, insertionT, 'r')
plt.plot(tamX, mergeT, 'g')
plt.title("Algoritmos de ordenacion")
plt.xlabel("Tamano del arreglo")
plt.ylabel("Tiempo")
plt.legend(["bubble sort", "insertion sort", "merge sort"])
# Funcion main
def main():
tam = inicio
while tam < tamMax:
res = ProbarOrdenacion(tam)
bubbleT.append(res[0])
insertionT.append(res[1])
mergeT.append(res[2])
tamX.append(tam)
tam += aumento
dibujar()
plt.pause(0.05)
print("----------------------------------")
print("Tiempos:")
print(tamX)
print("Bubble Sort:")
print(bubbleT)
print("Insertion Sort:")
print(insertionT)
print("Merge Sort:")
print(mergeT)
main()
dibujar()
plt.show()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"AlgoritmosOrdenacion.InsertionSort",
"AlgoritmosOrdenacion.MergeSort",
"datetime.datetime.now",
"numpy.random.randint",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.title",
"AlgoritmosOrdenacion.BubbleSort",
"mat... | [((2217, 2227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2225, 2227), True, 'from matplotlib import pyplot as plt\n'), ((755, 789), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {'size': 'n'}), '(1, 1000, size=n)\n', (772, 789), True, 'import numpy as np\n'), ((798, 817), 'datetime.datetime.now', 'time.datetime.now', ([], {}), '()\n', (815, 817), True, 'import datetime as time\n'), ((822, 842), 'AlgoritmosOrdenacion.BubbleSort', 'sort.BubbleSort', (['arr'], {}), '(arr)\n', (837, 842), True, 'import AlgoritmosOrdenacion as sort\n'), ((851, 870), 'datetime.datetime.now', 'time.datetime.now', ([], {}), '()\n', (868, 870), True, 'import datetime as time\n'), ((956, 990), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {'size': 'n'}), '(1, 1000, size=n)\n', (973, 990), True, 'import numpy as np\n'), ((999, 1018), 'datetime.datetime.now', 'time.datetime.now', ([], {}), '()\n', (1016, 1018), True, 'import datetime as time\n'), ((1023, 1046), 'AlgoritmosOrdenacion.InsertionSort', 'sort.InsertionSort', (['arr'], {}), '(arr)\n', (1041, 1046), True, 'import AlgoritmosOrdenacion as sort\n'), ((1055, 1074), 'datetime.datetime.now', 'time.datetime.now', ([], {}), '()\n', (1072, 1074), True, 'import datetime as time\n'), ((1157, 1191), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000)'], {'size': 'n'}), '(1, 1000, size=n)\n', (1174, 1191), True, 'import numpy as np\n'), ((1200, 1219), 'datetime.datetime.now', 'time.datetime.now', ([], {}), '()\n', (1217, 1219), True, 'import datetime as time\n'), ((1224, 1253), 'AlgoritmosOrdenacion.MergeSort', 'sort.MergeSort', (['arr', '(0)', '(n - 1)'], {}), '(arr, 0, n - 1)\n', (1238, 1253), True, 'import AlgoritmosOrdenacion as sort\n'), ((1260, 1279), 'datetime.datetime.now', 'time.datetime.now', ([], {}), '()\n', (1277, 1279), True, 'import datetime as time\n'), ((1413, 1441), 'matplotlib.pyplot.plot', 'plt.plot', (['tamX', 'bubbleT', '"""b"""'], {}), "(tamX, bubbleT, 'b')\n", (1421, 1441), True, 'from matplotlib import pyplot as plt\n'), ((1446, 1477), 'matplotlib.pyplot.plot', 'plt.plot', (['tamX', 'insertionT', '"""r"""'], {}), "(tamX, insertionT, 'r')\n", (1454, 1477), True, 'from matplotlib import pyplot as plt\n'), ((1482, 1509), 'matplotlib.pyplot.plot', 'plt.plot', (['tamX', 'mergeT', '"""g"""'], {}), "(tamX, mergeT, 'g')\n", (1490, 1509), True, 'from matplotlib import pyplot as plt\n'), ((1514, 1551), 'matplotlib.pyplot.title', 'plt.title', (['"""Algoritmos de ordenacion"""'], {}), "('Algoritmos de ordenacion')\n", (1523, 1551), True, 'from matplotlib import pyplot as plt\n'), ((1556, 1588), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Tamano del arreglo"""'], {}), "('Tamano del arreglo')\n", (1566, 1588), True, 'from matplotlib import pyplot as plt\n'), ((1593, 1613), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Tiempo"""'], {}), "('Tiempo')\n", (1603, 1613), True, 'from matplotlib import pyplot as plt\n'), ((1618, 1677), 'matplotlib.pyplot.legend', 'plt.legend', (["['bubble sort', 'insertion sort', 'merge sort']"], {}), "(['bubble sort', 'insertion sort', 'merge sort'])\n", (1628, 1677), True, 'from matplotlib import pyplot as plt\n'), ((1955, 1970), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.05)'], {}), '(0.05)\n', (1964, 1970), True, 'from matplotlib import pyplot as plt\n')] |
import matplotlib
matplotlib.use('Agg')
import numpy
from amuse.io import write_set_to_file
from amuse.io import read_set_from_file
from amuse.lab import Particles, units
from amuse.ext.protodisk import ProtoPlanetaryDisk
from amuse.lab import nbody_system
from amuse.lab import new_powerlaw_mass_distribution
from matplotlib import pyplot
def planetary_system():
# Generate the sun, Saturn, and Titan. Positions and velocities acquired from textbook.
particles = Particles(3)
sun = particles[0]
sun.mass = 1.0 | units.MSun
sun.radius = 1.0 | units.RSun
sun.position = (0.005717, -0.00538, -0.0000213) | units.AU
sun.velocity = (7.893, 11.894, 0.20642) | (units.m/units.s)
saturn = particles[1]
saturn.mass = 95.16 | units.MEarth
saturn.radius = 763.59 | units.REarth
saturn.position = (-2.075, 8.7812, 0.3273) | units.AU
saturn.velocity = (-9.9109, 2.236, -0.2398) | units.kms
titan = particles[2]
titan.mass = 0.0225 | units.MEarth
titan.radius = 0.404 | units.REarth
titan.position = (-2.0668, 8.7264, 0.3355) | units.AU # Saturn's postion plus 1221870 km in AU.
titan.velocity = (-4.3409, 3.334, 5.3302) | units.kms # Saturn's velocity plus 5.57.
particles.move_to_center()
numpy.random.seed(1)
# Generate the disk.
disk_a = 1221870 | units.km # titan's semi-major axis
disk_e = 0.0288 # eccentricity
hill_radius = disk_a.value_in(units.AU) * (1 - disk_e) * (titan.mass / (3 * saturn.mass))**(1/3)
disk_rmin = 0.03 * hill_radius
disk_rmax = 0.5 * hill_radius
disk_mass = new_powerlaw_mass_distribution(1000,
2.2e-5*titan.mass,
2.2e-3*titan.mass,
alpha=-2.0)
Mtot = disk_mass.sum()
converter = nbody_system.nbody_to_si(titan.mass, 1. | units.AU)
disk = ProtoPlanetaryDisk(100,
convert_nbody=converter,
densitypower=1.5,
Rmin=disk_rmin,
Rmax=disk_rmax,
q_out=10.,
discfraction=0.1).result
disk.move_to_center()
disk.x += titan.x
disk.y += titan.y
disk.vx += titan.vx
disk.vy += titan.vy
planet_attributes=["x", "y", "z", "vx", "vy", "vz",
"mass", "semimajor_axis", "eccentricity"]
disk_attributes=["x", "y", "z", "vx", "vy", "vz",
"mass", "u", "rho", "h_smooth"]
write_set_to_file(particles, 'SunSaturnTitan_i0000.amuse', 'amuse', attribute_names=planet_attributes)
write_set_to_file(disk, 'disk_i0000.amuse', 'amuse', attribute_names=disk_attributes)
#test = read_set_from_file('SunSaturnTitan_i0000.amuse', 'amuse')
#print test
return particles, disk
#if __name__ in ('__main__'):
# planetary_system()
| [
"amuse.lab.Particles",
"matplotlib.use",
"amuse.lab.new_powerlaw_mass_distribution",
"amuse.lab.nbody_system.nbody_to_si",
"numpy.random.seed",
"amuse.ext.protodisk.ProtoPlanetaryDisk",
"amuse.io.write_set_to_file"
] | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((475, 487), 'amuse.lab.Particles', 'Particles', (['(3)'], {}), '(3)\n', (484, 487), False, 'from amuse.lab import Particles, units\n'), ((1261, 1281), 'numpy.random.seed', 'numpy.random.seed', (['(1)'], {}), '(1)\n', (1278, 1281), False, 'import numpy\n'), ((1589, 1685), 'amuse.lab.new_powerlaw_mass_distribution', 'new_powerlaw_mass_distribution', (['(1000)', '(2.2e-05 * titan.mass)', '(0.0022 * titan.mass)'], {'alpha': '(-2.0)'}), '(1000, 2.2e-05 * titan.mass, 0.0022 * titan.\n mass, alpha=-2.0)\n', (1619, 1685), False, 'from amuse.lab import new_powerlaw_mass_distribution\n'), ((1851, 1903), 'amuse.lab.nbody_system.nbody_to_si', 'nbody_system.nbody_to_si', (['titan.mass', '(1.0 | units.AU)'], {}), '(titan.mass, 1.0 | units.AU)\n', (1875, 1903), False, 'from amuse.lab import nbody_system\n'), ((2573, 2679), 'amuse.io.write_set_to_file', 'write_set_to_file', (['particles', '"""SunSaturnTitan_i0000.amuse"""', '"""amuse"""'], {'attribute_names': 'planet_attributes'}), "(particles, 'SunSaturnTitan_i0000.amuse', 'amuse',\n attribute_names=planet_attributes)\n", (2590, 2679), False, 'from amuse.io import write_set_to_file\n'), ((2680, 2770), 'amuse.io.write_set_to_file', 'write_set_to_file', (['disk', '"""disk_i0000.amuse"""', '"""amuse"""'], {'attribute_names': 'disk_attributes'}), "(disk, 'disk_i0000.amuse', 'amuse', attribute_names=\n disk_attributes)\n", (2697, 2770), False, 'from amuse.io import write_set_to_file\n'), ((1915, 2048), 'amuse.ext.protodisk.ProtoPlanetaryDisk', 'ProtoPlanetaryDisk', (['(100)'], {'convert_nbody': 'converter', 'densitypower': '(1.5)', 'Rmin': 'disk_rmin', 'Rmax': 'disk_rmax', 'q_out': '(10.0)', 'discfraction': '(0.1)'}), '(100, convert_nbody=converter, densitypower=1.5, Rmin=\n disk_rmin, Rmax=disk_rmax, q_out=10.0, discfraction=0.1)\n', (1933, 2048), False, 'from amuse.ext.protodisk import ProtoPlanetaryDisk\n')] |
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(x, 0)
def d_relu_dz(z):
return (z > 0) * 1
def leaky_relu(Z):
return np.maximum(0.1 * Z, Z)
def d_leaky_relu_dz(z):
return 1 if z >= 0 else 0.01
def tanh(Z):
return np.tanh(Z)
def d_tanh_dz(z):
return 1 - (np.square(np.tanh(z)))
def d_sigmoid_dz(z):
return sigmoid(z) * (1 - sigmoid(z))
def mse(Y_hat, Y):
return np.sum((np.square(Y_hat - Y)))
def d_mse_da(a, y):
return 2*(a - y)
| [
"numpy.exp",
"numpy.maximum",
"numpy.tanh",
"numpy.square"
] | [((95, 111), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (105, 111), True, 'import numpy as np\n'), ((187, 209), 'numpy.maximum', 'np.maximum', (['(0.1 * Z)', 'Z'], {}), '(0.1 * Z, Z)\n', (197, 209), True, 'import numpy as np\n'), ((295, 305), 'numpy.tanh', 'np.tanh', (['Z'], {}), '(Z)\n', (302, 305), True, 'import numpy as np\n'), ((469, 489), 'numpy.square', 'np.square', (['(Y_hat - Y)'], {}), '(Y_hat - Y)\n', (478, 489), True, 'import numpy as np\n'), ((57, 67), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (63, 67), True, 'import numpy as np\n'), ((352, 362), 'numpy.tanh', 'np.tanh', (['z'], {}), '(z)\n', (359, 362), True, 'import numpy as np\n')] |
""" Module for holding common checking utility functions. """
# Copyright (c) 2012-2013 <NAME>
#
# This file is part of the KMCLib project distributed under the terms of the
# GNU General Public License version 3, see <http://www.gnu.org/licenses/>.
#
import numpy
from KMCLib.Exceptions.Error import Error
def checkIndexWithinBounds(index, list, msg=None):
"""
Check that the given index is within the bounds of the list.
:param index: The index to check.
:type index: int
:param list: The list to check against.
:param msg: The error message to print. If none is given the string default to "Index out of range."
:type msg: string
:returns: The valid index.
"""
# Set the default.
if msg is None:
msg = "Index out of range."
# Stop if outside bounds.
if (index < 0 or index >= len(list)):
raise Error(msg)
# Return if passed.
return index
def checkCellVectors(cell_vectors):
"""
Check that the cell vectors are of the correct dimensions and not fully linearly dependent.
:param cell_vectors: The cell vectors to test.
:returns: The valid cell vectors as a 3x3 numpy array.
"""
# If this is not a numpy array, check the list and convert.
if not isinstance(cell_vectors, numpy.ndarray):
# If it is a list, check that it is a list of list, length 3x3
cell_vectors = checkSequence(cell_vectors, "The 'cell_vectors' must be given as a 3x3 list or array of numbers.")
# Check as if this was a coordinate list.
cell_vectors = checkCoordinateList(cell_vectors, "cell_vecctors")
# Transform to a numpy array.
cell_vectors = numpy.array(cell_vectors)
# Now with the cell vectors as a numpy array, check the dimension.
if numpy.shape(cell_vectors) != (3,3):
raise Error("The 'cell_vectors' parametes must have the shape 3x3.")
# Check the type.
dummy_array = numpy.array([[1.0,0.0,0.0],[0.0,1.0,0.0],[0.0,0.0,1.0]])
if cell_vectors.dtype != dummy_array.dtype:
raise Error("The cell_vectors elements must be floating point numbers.")
# Check for linear dependencies.
cell_T = numpy.transpose(cell_vectors)
cell_determinant = numpy.linalg.det(cell_T)
if cell_determinant < 0.00001:
raise Error("The unit cell vectors are linearly dependent with determinant = %f."%(cell_determinant))
# Done checking.
return cell_vectors
def checkCoordinateList(coordinates, varname="coordinates"):
"""
Check that the given coordinates is a valid Nx3 sequence of numbers.
:param coordinates: The object to test. To pass the test this must be
an Nx3 array of floating point numbers.
:param varname: The name of the variable. Defaults to "coordinates"
:type varname: string
:returns: A valid Nx3 numpy array of numbers.
"""
# Check that it is a sequence.
coordinates = checkSequence(coordinates, "The %s must be given as a list of lists with dimensions Nx3"%(varname))
# Check that its length is not zero.
if (len(coordinates) < 1):
raise Error("The '%s' parameter may not be an empty list."%(varname))
# Check each coordinate.
for coord in coordinates:
# Check that it is a sequence.
coord = checkSequence(coord, "The %s must be given as a list of lists with dimensions Nx3"%(varname))
# Make sure the length of the coordinate is 3.
if len(coord) != 3:
raise Error("Each entry in the '%s' list must have exactly three elements."%(varname))
# Check that each element is a floating point number.
if not all([isinstance(c,float) for c in coord]):
raise Error("All '%s' entries must be given as floating point numbers."%(varname))
# Convert to a numpy array and return.
return numpy.array(coordinates)
def checkSequence(sequence, msg="The tested object is not a sequence."):
"""
Check that the given object is sequence.
:param sequence: The object to test.
:param msg: Non-default error message to print.
:type msg: string
:returns: The valid sequence object.
"""
# Check that this is a sequence.
if not ('__len__' in dir(sequence)):
raise Error(msg)
# Done.
return sequence
def checkSequenceOfPositiveIntegers(sequence, msg="The tested object is not a sequence of positive integers."):
"""
Utility function to check if a parameter is a sequence of positive integers.
:param sequence: The sequence to check.
:param msg: Non-default error message to print.
:type msg: string
:returns: The valid sequence.
"""
# Check that it is a sequence.
sequence = checkSequenceOf(sequence, int, msg)
# Check that each element is a positive integer.
for s in sequence:
if s < 0:
raise Error(msg)
# Done.
return sequence
def checkSequenceOfFloats(sequence, msg="The tested object is not a sequence of floats."):
"""
Utility function to check if a parameter is a sequence of floating point numbers.
:param sequence: The sequence to check.
:param msg: Non-default error message to print.
:type msg: string
:returns: The valid sequence.
"""
return checkSequenceOf(sequence, float, msg)
def checkSequenceOf(sequence, class_type, msg="The tested object is not a sequence of the correct type."):
"""
Utility function to check if a parameter is a sequence of instances of a given type.
:param sequence: The sequence to check.
:param class_type: The class of which the elements in the sequence should be instances.
:param msg: Non-default error message to print.
:type msg: string
:returns: The valid sequence.
"""
# Check that it is a sequence.
sequence = checkSequence(sequence, msg)
# Check that its length is not zero.
if len(sequence) == 0:
raise Error(msg)
# Check that each element is an instance of type KMCProcess.
for element in sequence:
if not isinstance(element, class_type):
raise Error(msg)
# Done.
return sequence
def checkTypes(types, length):
"""
Check that the types list is given as a list of strings with the correct
length.
:param types: The object to check.
:param length: The size the list should have.
:type length: int
:returns: The checked list.
"""
# Check that it is a list.
if not isinstance(types, list):
raise Error("The 'types' parameter must be given as a list of strings.")
# Check eachg element.
for t in types:
if not isinstance(t,str):
raise Error("The 'types' parameter must be given as a list of strings.")
# Check the length.
if len(types) != length:
raise Error("The length of the 'types' parameter must match the coordinates.")
# Done.
return types
def checkPositiveInteger(parameter, default_parameter, parameter_name):
"""
Utility function for checking that a parameter is a positive integer.
:param parameter: The parameter to check.
:param default_parameter: The value to use if the parameter value is None
:param parameter_name: The name of the parameter to use in error messages.
:type parameter_name: str
:returns: The checked parameter.
"""
# Set default.
if parameter is None:
parameter = default_parameter
# The error message.
msg = "The parameter '%s' must be given as a positive integer."%(parameter_name)
# Check type.
if not isinstance(parameter, int):
raise Error(msg)
# Check value.
if parameter < 0:
raise Error(msg)
# Checked.
return parameter
def checkPositiveFloat(parameter, default_parameter, parameter_name):
"""
Utility function for checking that a parameter is a positive float.
:param parameter: The parameter to check.
:param default_parameter: The value to use if the parameter value is None
:param parameter_name: The name of the parameter to use in error messages.
:type parameter_name: str
:returns: The checked parameter.
"""
# Set default.
if parameter is None:
parameter = default_parameter
# The error message.
msg = "The parameter '%s' must be given as a positive float."%(parameter_name)
# Check type.
if not isinstance(parameter, float):
raise Error(msg)
# Check value.
if parameter < 0.0:
raise Error(msg)
# Checked.
return parameter
| [
"KMCLib.Exceptions.Error.Error",
"numpy.linalg.det",
"numpy.array",
"numpy.shape",
"numpy.transpose"
] | [((1955, 2019), 'numpy.array', 'numpy.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (1966, 2019), False, 'import numpy\n'), ((2192, 2221), 'numpy.transpose', 'numpy.transpose', (['cell_vectors'], {}), '(cell_vectors)\n', (2207, 2221), False, 'import numpy\n'), ((2245, 2269), 'numpy.linalg.det', 'numpy.linalg.det', (['cell_T'], {}), '(cell_T)\n', (2261, 2269), False, 'import numpy\n'), ((3878, 3902), 'numpy.array', 'numpy.array', (['coordinates'], {}), '(coordinates)\n', (3889, 3902), False, 'import numpy\n'), ((884, 894), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (889, 894), False, 'from KMCLib.Exceptions.Error import Error\n'), ((1696, 1721), 'numpy.array', 'numpy.array', (['cell_vectors'], {}), '(cell_vectors)\n', (1707, 1721), False, 'import numpy\n'), ((1801, 1826), 'numpy.shape', 'numpy.shape', (['cell_vectors'], {}), '(cell_vectors)\n', (1812, 1826), False, 'import numpy\n'), ((1851, 1913), 'KMCLib.Exceptions.Error.Error', 'Error', (['"""The \'cell_vectors\' parametes must have the shape 3x3."""'], {}), '("The \'cell_vectors\' parametes must have the shape 3x3.")\n', (1856, 1913), False, 'from KMCLib.Exceptions.Error import Error\n'), ((2074, 2140), 'KMCLib.Exceptions.Error.Error', 'Error', (['"""The cell_vectors elements must be floating point numbers."""'], {}), "('The cell_vectors elements must be floating point numbers.')\n", (2079, 2140), False, 'from KMCLib.Exceptions.Error import Error\n'), ((2319, 2418), 'KMCLib.Exceptions.Error.Error', 'Error', (["('The unit cell vectors are linearly dependent with determinant = %f.' %\n cell_determinant)"], {}), "('The unit cell vectors are linearly dependent with determinant = %f.' %\n cell_determinant)\n", (2324, 2418), False, 'from KMCLib.Exceptions.Error import Error\n'), ((3150, 3213), 'KMCLib.Exceptions.Error.Error', 'Error', (['("The \'%s\' parameter may not be an empty list." % varname)'], {}), '("The \'%s\' parameter may not be an empty list." % varname)\n', (3155, 3213), False, 'from KMCLib.Exceptions.Error import Error\n'), ((4297, 4307), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (4302, 4307), False, 'from KMCLib.Exceptions.Error import Error\n'), ((5973, 5983), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (5978, 5983), False, 'from KMCLib.Exceptions.Error import Error\n'), ((6555, 6621), 'KMCLib.Exceptions.Error.Error', 'Error', (['"""The \'types\' parameter must be given as a list of strings."""'], {}), '("The \'types\' parameter must be given as a list of strings.")\n', (6560, 6621), False, 'from KMCLib.Exceptions.Error import Error\n'), ((6857, 6929), 'KMCLib.Exceptions.Error.Error', 'Error', (['"""The length of the \'types\' parameter must match the coordinates."""'], {}), '("The length of the \'types\' parameter must match the coordinates.")\n', (6862, 6929), False, 'from KMCLib.Exceptions.Error import Error\n'), ((7663, 7673), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (7668, 7673), False, 'from KMCLib.Exceptions.Error import Error\n'), ((7730, 7740), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (7735, 7740), False, 'from KMCLib.Exceptions.Error import Error\n'), ((8477, 8487), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (8482, 8487), False, 'from KMCLib.Exceptions.Error import Error\n'), ((8546, 8556), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (8551, 8556), False, 'from KMCLib.Exceptions.Error import Error\n'), ((3526, 3611), 'KMCLib.Exceptions.Error.Error', 'Error', (['("Each entry in the \'%s\' list must have exactly three elements." % varname)'], {}), '("Each entry in the \'%s\' list must have exactly three elements." % varname\n )\n', (3531, 3611), False, 'from KMCLib.Exceptions.Error import Error\n'), ((3746, 3822), 'KMCLib.Exceptions.Error.Error', 'Error', (['("All \'%s\' entries must be given as floating point numbers." % varname)'], {}), '("All \'%s\' entries must be given as floating point numbers." % varname)\n', (3751, 3822), False, 'from KMCLib.Exceptions.Error import Error\n'), ((4906, 4916), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (4911, 4916), False, 'from KMCLib.Exceptions.Error import Error\n'), ((6145, 6155), 'KMCLib.Exceptions.Error.Error', 'Error', (['msg'], {}), '(msg)\n', (6150, 6155), False, 'from KMCLib.Exceptions.Error import Error\n'), ((6722, 6788), 'KMCLib.Exceptions.Error.Error', 'Error', (['"""The \'types\' parameter must be given as a list of strings."""'], {}), '("The \'types\' parameter must be given as a list of strings.")\n', (6727, 6788), False, 'from KMCLib.Exceptions.Error import Error\n')] |
from numba import jit
import numpy as np
import matplotlib.pyplot as plt
visualization = True
def show_spheres(scale, points, rgb, label=None):
"""
:param scale: int
:param points: tuple (x, y, z)
:param rgb:
:return:
"""
if label is not None:
print('')
print(label)
points = np.stack([points[0].reshape(-1), points[1].reshape(-1), points[2].reshape(-1)], axis=1)
fig = plt.figure()
ax = fig.gca(projection='3d')
# axis scale setting
ax.set_xlim3d(-1 * scale, scale)
ax.set_ylim3d(-1 * scale, scale)
ax.set_zlim3d(-0.8 * scale, 0.8 * scale)
x, y, z = 0, 0, 2
ax.plot([0, scale * x], [0, scale * y], [0, scale * z])
# label
# ax.grid(False)
ax.plot([0, scale * x], [0, scale * y], [0, scale * z])
# if label is not None:
# x, y, z = label
#
# # label
# ax.grid(False)
# ax.plot([0, scale * x], [0, scale * y], [0, scale * z])
#
# # how rotate they are
# phi2 = np.arctan2(y, x) * 180 / np.pi
# theta = np.arccos(z) * 180 / np.pi
#
# if phi2 < 0:
# phi2 = 360 + phi2
r = rgb[0].reshape(-1)
g = rgb[1].reshape(-1)
b = rgb[2].reshape(-1)
# rgb 0~1 scale
r = (r - np.min(r)) / (np.max(r) - np.min(r))
g = (g - np.min(g)) / (np.max(g) - np.min(g))
b = (b - np.min(b)) / (np.max(b) - np.min(b))
rgb = np.stack([r, g, b], axis=1)
ax.scatter(points[:, 0], points[:, 1], points[:, 2], facecolors=rgb, alpha=1, depthshade=False,
edgecolors=None,
) # data coloring
plt.legend(loc=2)
# Photos viewed at 90 degrees
ax.view_init(0, 0)
# Photos from above
# ax.view_init(-1 * theta + 90, phi2)
plt.draw()
plt.show()
def rotate_map_given_R(R, height, width):
# Inputs:
# phi,theta in degrees , height and width of an image
# output:
# rotation map for x and y coordinate
# goal:
# calculating rotation map for corresponding image dimension and phi,theta value.
# (1,0,0)(rho,phi,theta) on sphere goes to (1,phi,theta)
def pos_conversion(x, y, z):
# given postech protocol
# return my protocol
return z, -x, y
def inv_conversion(x, y, z):
# given my conversion
# convert it to postech system.
return -y, z, x
# if not original_file.is_file():
# step1
spherePoints = flat_to_sphere(height, width)
# R = calculate_Rmatrix_from_phi_theta(phi,theta)
R_inv = np.linalg.inv(R)
#step2
spherePointsRotated = rotate_sphere_given_phi_theta(R_inv, spherePoints)
#Create two mapping variable
#step3
[map_x, map_y] = sphere_to_flat(spherePointsRotated, height, width)
# dst(y,x) = src(map_x(y,x),map_y(y,x))
return [map_x, map_y]
@jit(nopython=True, cache=True)
def flat_to_sphere(height, width):
# Input:
# height and width of image
# Output:
# return (height,width,3) numpy ndarray. (y,x) of array has (x,y,z) value which is on sphere.
# Goal:
# return sphere points
# Create matrix that contains x,y,z coordinates
sphere = np.zeros((height, width, 3))
x_to_theta = np.zeros(width)
y_to_phi = np.zeros(height)
theta_slope = 2*np.pi/(width-1)
phi_slope = np.pi/(height-1)
# linear map from [y,x] to [phi,theta]
for x in range(0, width):
x_to_theta[x] = np.rad2deg(np.multiply(x, theta_slope))
for y in range(0, height):
y_to_phi[y] = np.rad2deg(np.multiply(y, phi_slope))
# For every pixel coordinates, create a matrix that contains the
# corresponding (x,y,z) coordinates
for y_f in range(0, height):
for x_f in range(0, width):
theta = x_to_theta[x_f]
phi = y_to_phi[y_f]
phi = np.deg2rad(phi)
theta = np.deg2rad(theta)
x_s = np.sin(phi) * np.cos(theta)
y_s = np.sin(phi) * np.sin(theta)
z_s = np.cos(phi)
sphere[y_f, x_f, 0] = x_s
sphere[y_f, x_f, 1] = y_s
sphere[y_f, x_f, 2] = z_s
return sphere
@jit(nopython=True, cache=True)
def rotate_sphere_given_phi_theta(R, spherePoints):
# Input:
# phi,theta in degrees and spherePoints(x,y,z of on sphere dimension (height,width,3) )
# Output:
# spherePointsRotated of which dimension is (h,w,3) and contains (x',y',z' )
# (x',y',z')=R*(x,y,z) where R maps (0,0,1) to (vx,vy,vz) defined by theta,phi (i.e. R*(0,0,1)=(vx,vy,vz))
# Goal:
# apply R to every point on sphere
h, w, c = spherePoints.shape
spherePointsRotated = np.zeros((h, w, c),dtype=np.float64)
for y in range(0, h):
for x in range(0, w):
pointOnSphere = spherePoints[y, x, :]
pointOnSphereRotated = np.dot(R, pointOnSphere)
spherePointsRotated[y, x, :] = pointOnSphereRotated
# spherePointsRotated[y, x, :] = np.dot(R, pointOnSphere)
return spherePointsRotated
@jit(nopython=True, cache=True)
def calculate_Rmatrix_from_phi_theta(phi, theta):
"""
A = [0,0,1] B = [x,y,z] ( = phi,theta) the goal is to find rotation matrix R where R*A == B
please refer to this website https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d
v = a cross b ,s = ||v|| (sine of angle), c = a dot b (cosine of angle)
:param phi: z ์ถ ๊ฐ๋
:param theta: xy ์ถ ๊ฐ๋
:return: rotation matrix that moves [0,0,1] to ([x,y,z] that is equivalent to (phi,theta))
"""
epsilon = 1e-7
A = np.array([0, 0, 1], dtype=np.float64) # original up-vector
# B = spherical_to_cartesian(phi,theta) # target vector
phi = np.deg2rad(phi)
theta = np.deg2rad(theta)
x = np.sin(phi) * np.cos(theta)
y = np.sin(phi) * np.sin(theta)
z = np.cos(phi)
B = np.array([x, y, z], dtype=np.float64)
desiredResult = B
# dot(R,A) == B
# If A == B then return identity(3)
if A[0] - B[0] < epsilon \
and A[0] - B[0] > -epsilon \
and A[1] - B[1] < epsilon \
and A[1] - B[1] > -epsilon \
and A[2] - B[2] < epsilon \
and A[2] - B[2] > -epsilon:
# print('Identity matrix is returned')
return np.identity(3)
# v = np.cross(A, B)
# In the numba, numpy.cross is not supported
cross_1 = np.multiply(A[1],B[2])-np.multiply(A[2],B[1])
cross_2 = np.multiply(A[2],B[0])-np.multiply(A[0],B[2])
cross_3 = np.multiply(A[0],B[1])-np.multiply(A[1],B[0])
v = np.array([cross_1,cross_2,cross_3])
c = np.dot(A, B)
skewSymmetric = skewSymmetricCrossProduct(v)
if -epsilon < c + 1 and c + 1 < epsilon:
R = -np.identity(3)
else:
R = np.identity(3) + skewSymmetric + np.dot(skewSymmetric, skewSymmetric) * (
1 / (1 + c)) # what if 1+c is 0?
return R
@jit(nopython=True, cache=True)
def skewSymmetricCrossProduct(v):
# Input:
# a vector in R^3
# Output:
# [ 0 -v3 v2 ; v3 0 -v1; -v2 v1 0]
v1 = v[0]
v2 = v[1]
v3 = v[2]
skewSymmetricMatrix = np.array([[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]], dtype=np.float64)
return skewSymmetricMatrix
@jit(nopython=True, cache=True)
def sphere_to_flat(spherePointsRotated, height, width):
# Input:
# y,x coordinate on 2d flat image,numpy nd array of dimension (height,width,3). ndarray(y,x) has x,y,z value on sphere ,height and width of an image
# Output:
# x,y coordinate of 2d flat image
# Goal:
# calculate destination x,y coordinate given information x,y(2d flat) <-> x,y,z(sphere)
map_y = np.zeros((height, width), dtype=np.float32)
map_x = np.zeros((height, width), dtype=np.float32)
factor_phi = (height-1)/np.pi
factor_theta = (width-1)/(2*np.pi)
# Get multiplied(by inverted rotation matrix) x,y,z coordinates
for image_y in range(0, height):
for image_x in range(0, width):
pointOnRotatedSphere_x = spherePointsRotated[image_y, image_x, 0]
pointOnRotatedSphere_y = spherePointsRotated[image_y, image_x, 1]
pointOnRotatedSphere_z = spherePointsRotated[image_y, image_x, 2]
x_2 = np.power(pointOnRotatedSphere_x, 2)
y_2 = np.power(pointOnRotatedSphere_y, 2)
z_2 = np.power(pointOnRotatedSphere_z, 2)
theta = float(np.arctan2(pointOnRotatedSphere_y, pointOnRotatedSphere_x))
# atan2 returns value of which range is [-pi,pi], range of theta is [0,2pi] so if theta is negative value,actual value is theta+2pi
if theta < 0:
theta = theta + np.multiply(2, np.pi)
rho = x_2 + y_2 + z_2
rho = np.sqrt(rho)
phi = np.arccos(pointOnRotatedSphere_z / rho)
map_y[image_y, image_x] = phi*factor_phi
map_x[image_y, image_x] = theta*factor_theta
return [map_x, map_y]
| [
"numpy.sqrt",
"numpy.arccos",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.multiply",
"numpy.max",
"numpy.stack",
"numpy.dot",
"numpy.min",
"numpy.identity",
"numpy.deg2rad",
"numba.jit",
"numpy.cos",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.sho... | [((2861, 2891), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (2864, 2891), False, 'from numba import jit\n'), ((4176, 4206), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (4179, 4206), False, 'from numba import jit\n'), ((5072, 5102), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (5075, 5102), False, 'from numba import jit\n'), ((6975, 7005), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (6978, 7005), False, 'from numba import jit\n'), ((7307, 7337), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(True)'}), '(nopython=True, cache=True)\n', (7310, 7337), False, 'from numba import jit\n'), ((427, 439), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (437, 439), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1447), 'numpy.stack', 'np.stack', (['[r, g, b]'], {'axis': '(1)'}), '([r, g, b], axis=1)\n', (1428, 1447), True, 'import numpy as np\n'), ((1619, 1636), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (1629, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1767, 1777), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1775, 1777), True, 'import matplotlib.pyplot as plt\n'), ((1782, 1792), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1790, 1792), True, 'import matplotlib.pyplot as plt\n'), ((2564, 2580), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (2577, 2580), True, 'import numpy as np\n'), ((3204, 3232), 'numpy.zeros', 'np.zeros', (['(height, width, 3)'], {}), '((height, width, 3))\n', (3212, 3232), True, 'import numpy as np\n'), ((3250, 3265), 'numpy.zeros', 'np.zeros', (['width'], {}), '(width)\n', (3258, 3265), True, 'import numpy as np\n'), ((3281, 3297), 'numpy.zeros', 'np.zeros', (['height'], {}), '(height)\n', (3289, 3297), True, 'import numpy as np\n'), ((4699, 4736), 'numpy.zeros', 'np.zeros', (['(h, w, c)'], {'dtype': 'np.float64'}), '((h, w, c), dtype=np.float64)\n', (4707, 4736), True, 'import numpy as np\n'), ((5656, 5693), 'numpy.array', 'np.array', (['[0, 0, 1]'], {'dtype': 'np.float64'}), '([0, 0, 1], dtype=np.float64)\n', (5664, 5693), True, 'import numpy as np\n'), ((5788, 5803), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (5798, 5803), True, 'import numpy as np\n'), ((5816, 5833), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (5826, 5833), True, 'import numpy as np\n'), ((5914, 5925), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (5920, 5925), True, 'import numpy as np\n'), ((5934, 5971), 'numpy.array', 'np.array', (['[x, y, z]'], {'dtype': 'np.float64'}), '([x, y, z], dtype=np.float64)\n', (5942, 5971), True, 'import numpy as np\n'), ((6628, 6665), 'numpy.array', 'np.array', (['[cross_1, cross_2, cross_3]'], {}), '([cross_1, cross_2, cross_3])\n', (6636, 6665), True, 'import numpy as np\n'), ((6673, 6685), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (6679, 6685), True, 'import numpy as np\n'), ((7201, 7271), 'numpy.array', 'np.array', (['[[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]]'], {'dtype': 'np.float64'}), '([[0, -v3, v2], [v3, 0, -v1], [-v2, v1, 0]], dtype=np.float64)\n', (7209, 7271), True, 'import numpy as np\n'), ((7746, 7789), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.float32'}), '((height, width), dtype=np.float32)\n', (7754, 7789), True, 'import numpy as np\n'), ((7802, 7845), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.float32'}), '((height, width), dtype=np.float32)\n', (7810, 7845), True, 'import numpy as np\n'), ((5842, 5853), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5848, 5853), True, 'import numpy as np\n'), ((5856, 5869), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5862, 5869), True, 'import numpy as np\n'), ((5878, 5889), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (5884, 5889), True, 'import numpy as np\n'), ((5892, 5905), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5898, 5905), True, 'import numpy as np\n'), ((6350, 6364), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (6361, 6364), True, 'import numpy as np\n'), ((6454, 6477), 'numpy.multiply', 'np.multiply', (['A[1]', 'B[2]'], {}), '(A[1], B[2])\n', (6465, 6477), True, 'import numpy as np\n'), ((6477, 6500), 'numpy.multiply', 'np.multiply', (['A[2]', 'B[1]'], {}), '(A[2], B[1])\n', (6488, 6500), True, 'import numpy as np\n'), ((6514, 6537), 'numpy.multiply', 'np.multiply', (['A[2]', 'B[0]'], {}), '(A[2], B[0])\n', (6525, 6537), True, 'import numpy as np\n'), ((6537, 6560), 'numpy.multiply', 'np.multiply', (['A[0]', 'B[2]'], {}), '(A[0], B[2])\n', (6548, 6560), True, 'import numpy as np\n'), ((6574, 6597), 'numpy.multiply', 'np.multiply', (['A[0]', 'B[1]'], {}), '(A[0], B[1])\n', (6585, 6597), True, 'import numpy as np\n'), ((6597, 6620), 'numpy.multiply', 'np.multiply', (['A[1]', 'B[0]'], {}), '(A[1], B[0])\n', (6608, 6620), True, 'import numpy as np\n'), ((1273, 1282), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (1279, 1282), True, 'import numpy as np\n'), ((1287, 1296), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (1293, 1296), True, 'import numpy as np\n'), ((1299, 1308), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (1305, 1308), True, 'import numpy as np\n'), ((1323, 1332), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (1329, 1332), True, 'import numpy as np\n'), ((1337, 1346), 'numpy.max', 'np.max', (['g'], {}), '(g)\n', (1343, 1346), True, 'import numpy as np\n'), ((1349, 1358), 'numpy.min', 'np.min', (['g'], {}), '(g)\n', (1355, 1358), True, 'import numpy as np\n'), ((1373, 1382), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (1379, 1382), True, 'import numpy as np\n'), ((1387, 1396), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (1393, 1396), True, 'import numpy as np\n'), ((1399, 1408), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (1405, 1408), True, 'import numpy as np\n'), ((3477, 3504), 'numpy.multiply', 'np.multiply', (['x', 'theta_slope'], {}), '(x, theta_slope)\n', (3488, 3504), True, 'import numpy as np\n'), ((3571, 3596), 'numpy.multiply', 'np.multiply', (['y', 'phi_slope'], {}), '(y, phi_slope)\n', (3582, 3596), True, 'import numpy as np\n'), ((3864, 3879), 'numpy.deg2rad', 'np.deg2rad', (['phi'], {}), '(phi)\n', (3874, 3879), True, 'import numpy as np\n'), ((3900, 3917), 'numpy.deg2rad', 'np.deg2rad', (['theta'], {}), '(theta)\n', (3910, 3917), True, 'import numpy as np\n'), ((4028, 4039), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (4034, 4039), True, 'import numpy as np\n'), ((4878, 4902), 'numpy.dot', 'np.dot', (['R', 'pointOnSphere'], {}), '(R, pointOnSphere)\n', (4884, 4902), True, 'import numpy as np\n'), ((6794, 6808), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (6805, 6808), True, 'import numpy as np\n'), ((8319, 8354), 'numpy.power', 'np.power', (['pointOnRotatedSphere_x', '(2)'], {}), '(pointOnRotatedSphere_x, 2)\n', (8327, 8354), True, 'import numpy as np\n'), ((8373, 8408), 'numpy.power', 'np.power', (['pointOnRotatedSphere_y', '(2)'], {}), '(pointOnRotatedSphere_y, 2)\n', (8381, 8408), True, 'import numpy as np\n'), ((8427, 8462), 'numpy.power', 'np.power', (['pointOnRotatedSphere_z', '(2)'], {}), '(pointOnRotatedSphere_z, 2)\n', (8435, 8462), True, 'import numpy as np\n'), ((8827, 8839), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (8834, 8839), True, 'import numpy as np\n'), ((8858, 8897), 'numpy.arccos', 'np.arccos', (['(pointOnRotatedSphere_z / rho)'], {}), '(pointOnRotatedSphere_z / rho)\n', (8867, 8897), True, 'import numpy as np\n'), ((3936, 3947), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3942, 3947), True, 'import numpy as np\n'), ((3950, 3963), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3956, 3963), True, 'import numpy as np\n'), ((3982, 3993), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (3988, 3993), True, 'import numpy as np\n'), ((3996, 4009), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4002, 4009), True, 'import numpy as np\n'), ((6831, 6845), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (6842, 6845), True, 'import numpy as np\n'), ((6864, 6900), 'numpy.dot', 'np.dot', (['skewSymmetric', 'skewSymmetric'], {}), '(skewSymmetric, skewSymmetric)\n', (6870, 6900), True, 'import numpy as np\n'), ((8490, 8548), 'numpy.arctan2', 'np.arctan2', (['pointOnRotatedSphere_y', 'pointOnRotatedSphere_x'], {}), '(pointOnRotatedSphere_y, pointOnRotatedSphere_x)\n', (8500, 8548), True, 'import numpy as np\n'), ((8752, 8773), 'numpy.multiply', 'np.multiply', (['(2)', 'np.pi'], {}), '(2, np.pi)\n', (8763, 8773), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Return residual from pyccd model using Google Earth Engine
Usage: GE_pyccd_residual.py [options]
--path=PATH path
--row=ROW row
--lon=LON longitude
--lat=LAT latitude
--date=DATE date to compare (%Y%j)
--count=COUNT count of observations after DATE
--output=OUTPUT output numpy file
--start=START starting year
--finish=FINISH finish year
--expand=EXPAND expansion distance for thumbnails
"""
from docopt import docopt
import os,sys
import numpy as np
import datetime
import pandas as pd
import matplotlib.dates as mdates
from matplotlib import dates
from pylab import *
import wget
#Import pycc and earth engine
import ee
import ccd
# Initialize Earth Engine
ee.Initialize()
def pixel(args):
calculate = False
if args['--path']:
path = int(args['--path'])
else:
print('Calculating path from lon/lat')
calculate = True
if args['--row']:
row = int(args['--row'])
else:
print('Calculating row from lon/lat')
calculate = True
lon = float(args['--lon'])
if np.abs(lon) > 180:
print('Invalide longitude value')
sys.exit()
lat = float(args['--lat'])
if np.abs(lat) > 90:
print('Invalide latitude value')
sys.exit()
if args['--date']:
_date = args['--date']
dt = datetime.datetime.strptime(_date, "%Y%j")
date = dt.toordinal()
else:
print('Please specify date')
sys.exit()
if args['--count']:
count = int(args['--count'])
else:
count = 1
if args['--expand']:
expand = int(args['--expand'])
else:
count = 500
if args['--output']:
output = args['--output']
saveout = True
else:
saveout = False
if saveout:
if not os.path.isdir(output):
os.mkdir(output)
if args['--start']:
start = args['--start']
start = '{a}-01-01'.format(a=start)
else:
start = '1984-01-01'
if args['--finish']:
finish = args['--finish']
finish = '{a}-01-01'.format(a=finish)
else:
finish = '2017-01-01'
#Location
point = {'type':'Point', 'coordinates':[lon, lat]};
if calculate:
#WRS-2 outline
fc = ee.FeatureCollection('ft:1_RZgjlcqixp-L9hyS6NYGqLaKOlnhSC35AB5M5Ll');
#Get overlap
pgeo = ee.Geometry.Point([lon, lat]);
cur_wrs = fc.filterBounds(pgeo);
path = cur_wrs.first().get('PATH');
row = cur_wrs.first().get('ROW');
print('Path: {}'.format(int(path.getInfo())));
print('Row: {}'.format(int(row.getInfo())));
# Create image collection
#Landsat Collection. TODO: How to reduce line size with API?
l8_collection = ee.ImageCollection(
'LANDSAT/LC8_SR').filter(
ee.Filter.eq('WRS_PATH', path)).filter(
ee.Filter.eq('WRS_ROW', row)).filterDate(
start, finish);
l7_collection = ee.ImageCollection(
'LANDSAT/LE7_SR').filter(
ee.Filter.eq('WRS_PATH', path)).filter(
ee.Filter.eq('WRS_ROW', row)).filterDate(
start, finish);
l5_collection = ee.ImageCollection(
'LANDSAT/LT5_SR').filter(
ee.Filter.eq('WRS_PATH', path)).filter(
ee.Filter.eq('WRS_ROW', row)).filterDate(
start, finish);
l8_thermal = ee.ImageCollection(
'LANDSAT/LC08/C01/T1_TOA').filter(
ee.Filter.eq('WRS_PATH', path)).filter(
ee.Filter.eq('WRS_ROW', row)).filterDate(
start, finish).select('B10');
l7_thermal = ee.ImageCollection(
'LANDSAT/LE07/C01/T1_TOA').filter(
ee.Filter.eq('WRS_PATH', path)).filter(
ee.Filter.eq('WRS_ROW', row)).filterDate(
start, finish).select('B6_VCID_1');
l5_thermal = ee.ImageCollection(
'LANDSAT/LT05/C01/T1_TOA').filter(
ee.Filter.eq('WRS_PATH', path)).filter(
ee.Filter.eq('WRS_ROW', row)).filterDate(
start, finish).select('B6');
#LC8 Band names
band_list = ['B2','B3','B4','B5','B6','B7','cfmask','cfmask_conf']
#Names to rename LC8 to / L7L5 band names
rename_list = ['B1','B2','B3','B4','B5','B7','cfmask','cfmask_conf']
#L8
df_sr = make_db(l8_collection, point, band_list, rename_list)
#L7
df_sr2 = make_db(l7_collection, point, rename_list, rename_list)
df_sr = update_df(df_sr, df_sr2)
#L5
df_sr2 = make_db(l5_collection, point, rename_list, rename_list)
df_sr = update_df(df_sr, df_sr2)
#thermal
band_list = ['B6']
rename_list = ['thermal']
df_thermal = make_db(l5_thermal, point, band_list, rename_list)
band_list = ['B6_VCID_1']
df_thermal2 = make_db(l7_thermal, point, band_list, rename_list)
df_thermal = update_df(df_thermal, df_thermal2)
band_list = ['B10']
df_thermal2 = make_db(l8_thermal, point, band_list, rename_list)
df_thermal = update_df(df_thermal, df_thermal2)
#Merge the thermal and SR
df = pd.merge(df_sr, df_thermal, on='time')
df = df.sort_values('time')
#Get rid of NaNs
# df['cfmask'][df['cfmask'].isnull()] = 4
# df[df.isnull()] = 0
#Scale brightness temperature by 10 for pyccd
df['thermal'] = df['thermal'] * 10
#TODO: Paramaterize everything
params = {'QA_BITPACKED': False,
'QA_FILL': 255,
'QA_CLEAR': 0,
'QA_WATER': 1,
'QA_SHADOW': 2,
'QA_SNOW': 3,
'QA_CLOUD': 4}
dates = np.array(df['time'])
blues = np.array(df['B1'])
greens = np.array(df['B2'])
reds = np.array(df['B3'])
nirs = np.array(df['B4'])
swir1s = np.array(df['B5'])
swir2s = np.array(df['B7'])
thermals = np.array(df['thermal'])
qas = np.array(df['cfmask'])
results = ccd.detect(dates, blues, greens, reds, nirs, swir1s, swir2s, thermals, qas, params=params)
#Get the observed values
observed_values, after_indices = get_observed(df, date, count)
model, location = get_model(results, date)
print('Found model {a} date specified'.format(a=location))
if len(model) == 1:
model = model[0]
if location == 'during' or 'before':
residuals = get_during(model, observed_values)
normalized_residuals = norm_res(residuals, model)
if saveout:
out = output + '/' + output
np.save(out, np.array(residuals))
image_count = df['id_x'].iloc[after_indices][0:count]
iter = 0
#Save image of absolute residuals
outname = output + '/' 'absolute_residuals.png'
save_barplot(residuals, outname, 'Absolute Residuals')
#Save image of normalized residuals
outname = output + '/' 'normalized_residuals.png'
save_barplot(normalized_residuals, outname, 'Normalized Residuals')
#Save thumbnails
for img in image_count.values:
save_thumbnail(img, point, output, expand, iter)
iter += 1
def norm_res(residuals, model):
normalized_res = {}
normalized_res['blue'] = residuals['blue'] / model.blue.rmse
normalized_res['green'] = residuals['green'] / model.green.rmse
normalized_res['red'] = residuals['red'] / model.red.rmse
normalized_res['nir'] = residuals['nir'] / model.nir.rmse
normalized_res['swir1'] = residuals['swir1'] / model.swir1.rmse
normalized_res['swir2'] = residuals['swir2'] / model.swir2.rmse
normalized_res['thermal'] = residuals['thermal'] / model.thermal.rmse
return normalized_res
#plot_results(results, df, band, plotband, dates, yl, plotlabel)
def save_thumbnail(img, point, outdir, expand, iter):
"""Save thumbnails of images after date """
sensor = img[0:3]
filename = 'LANDSAT/' + sensor +'_SR/' + img
image = ee.Image(filename).select(['B3','B2','B1'])
point2plot = ee.Geometry.Point(point['coordinates'])
bounds2plot = point2plot.buffer(expand).bounds().getInfo()['coordinates']
thumb = image.getThumbURL({'min': 0, 'max': 1200, 'region': bounds2plot, 'format': 'jpg'})
out_save = outdir + '/' + str(iter) + '_' + img + '.jpg'
wget.download(thumb, out_save)
def save_barplot(residuals, output, ylabel):
keys = residuals.keys()
means = []
stds = []
for i in keys:
means.append(residuals[i].mean())
stds.append(residuals[i].std())
ind = np.arange(len(keys))
width = 0.35
fig, ax = plt.subplots()
rects = ax.bar(ind, means, width, color='r', yerr=stds)
ax.set_ylabel(ylabel)
ax.set_xticks(ind)
ax.set_xticklabels(keys)
plt.savefig(output)
def get_during(result, observed_values):
predicted_values = {}
residuals = {}
prediction_dates = []
#days = np.arange(result.start_day, result.end_day + 1)
#Add extra data in case it's before #TODO
days = np.arange(result.start_day, result.end_day + 5000)
prediction_dates.append(days)
#Blue
intercept = result.blue.intercept
coef = result.blue.coefficients
predicted_values['B1'] = predict_band(intercept, coef, days, observed_values)
residuals['blue'] = observed_values['B1'].values - predicted_values['B1'][:,0]
#Green
intercept = result.green.intercept
coef = result.green.coefficients
predicted_values['B2'] = predict_band(intercept, coef, days, observed_values)
residuals['green'] = observed_values['B2'].values - predicted_values['B2'][:,0]
#Red
intercept = result.red.intercept
coef = result.red.coefficients
predicted_values['B3'] = predict_band(intercept, coef, days, observed_values)
residuals['red'] = observed_values['B3'].values - predicted_values['B3'][:,0]
#NIR
intercept = result.nir.intercept
coef = result.nir.coefficients
predicted_values['B4'] = predict_band(intercept, coef, days, observed_values)
residuals['nir'] = observed_values['B4'].values - predicted_values['B4'][:,0]
#SWIR1
intercept = result.swir1.intercept
coef = result.swir1.coefficients
predicted_values['B5'] = predict_band(intercept, coef, days, observed_values)
residuals['swir1'] = observed_values['B5'].values - predicted_values['B5'][:,0]
#SWIR2
intercept = result.swir2.intercept
coef = result.swir2.coefficients
predicted_values['B7'] = predict_band(intercept, coef, days, observed_values)
residuals['swir2'] = observed_values['B7'].values - predicted_values['B7'][:,0]
#Thermal
intercept = result.thermal.intercept
coef = result.thermal.coefficients
predicted_values['thermal'] = predict_band(intercept, coef, days, observed_values)
residuals['thermal'] = observed_values['thermal'].values - predicted_values['thermal'][:,0]
residuals_df = pd.DataFrame(residuals)
return residuals
def predict_band(intercept, coef, days, observed_values):
predicted_values = []
predict_indices = []
predict = []
predicted_values.append(intercept + coef[0] * days +
coef[1]*np.cos(days*1*2*np.pi/365.25) + coef[2]*np.sin(days*1*2*np.pi/365.25) +
coef[3]*np.cos(days*2*2*np.pi/365.25) + coef[4]*np.sin(days*2*2*np.pi/365.25) +
coef[5]*np.cos(days*3*2*np.pi/365.25) + coef[6]*np.sin(days*3*2*np.pi/365.25))
for i in observed_values['time'].values:
ind = np.where(days == i)[0]
predict.append(predicted_values[0][ind])
return np.array(predict)
def get_model(results, date):
""" return model to use for comparing to data """
before_models = []
during_models = []
after_models = []
for i, a in enumerate(results['change_models']):
if a.end_day < date:
before_models.append(a)
elif a.end_day > date and a.start_day < date:
during_models.append(a)
elif a.end_day > date and a.start_day > date:
after_models.append(a)
if len(during_models) == 1:
return during_models, 'during'
elif len(during_models) == 0 and len(before_models) > 0:
return before_models[-1], 'before'
elif len(during_models) == 0 and len(before_models) == 0:
return after_models[0], 'after'
else:
print('no models calculated')
sys.exit()
def get_observed(df, date, count):
""" Return observed values for [count] observations after [date] """
#Indices after date
after_indices = np.where(df['time'] > date)[0]
#Values after date
after_values = df.iloc[after_indices,:]
#Clear values after date
after_clear = after_values[after_values['cfmask'] < 3]
after_indices = after_indices[after_values['cfmask'] < 3]
#Clear values for count specified
after_count = after_clear.iloc[0:count,:]
return after_count, after_indices
def make_db(collection, point, band_list, rename_list):
info = collection.getRegion(point, 1).getInfo()
header = info[0]
files = array(info[0])
data = array(info[1:])
#data = array(info[:])
iTime = header.index('time')
time = [datetime.datetime.fromtimestamp(i/1000) for i in (data[0:,iTime].astype(int))]
time_new = [t.toordinal() for t in (time)]
iBands = [header.index(b) for b in band_list]
yData = data[0:,iBands].astype(np.float)
red = yData[:,0]
df = pd.DataFrame(data=yData, index=list(range(len(red))), columns=rename_list)
df['time'] = time_new
df['id'] = data[0:, 0]
return df
def update_df(df, df2):
df = df.append(df2)
return df
def plot_results(results, df, band, plotband, dates, yl, ylabel):
mask = results['processing_mask']
predicted_values = []
prediction_dates = []
break_dates = []
start_dates = []
for num, result in enumerate(results['change_models']):
print('Result: {}'.format(num))
print('Start Date: {}'.format(datetime.datetime.fromordinal(result.start_day)))
print('End Date: {}'.format(datetime.datetime.fromordinal(result.end_day)))
print(result.break_day)
print('Break Date: {}'.format(datetime.datetime.fromordinal(result.break_day)))
print('QA: {}'.format(result.curve_qa))
print('Norm: {}\n'.format(np.linalg.norm([result.green.magnitude,
result.red.magnitude,
result.nir.magnitude,
result.swir1.magnitude,
result.swir2.magnitude])))
print('Change prob: {}'.format(result.change_probability))
days = np.arange(result.start_day, result.end_day + 1)
prediction_dates.append(days)
break_dates.append(result.break_day)
start_dates.append(result.start_day)
intercept = result[6+band].intercept
coef = result[6+band].coefficients
predicted_values.append(intercept + coef[0] * days +
coef[1]*np.cos(days*1*2*np.pi/365.25) + coef[2]*np.sin(days*1*2*np.pi/365.25) +
coef[3]*np.cos(days*2*2*np.pi/365.25) + coef[4]*np.sin(days*2*2*np.pi/365.25) +
coef[5]*np.cos(days*3*2*np.pi/365.25) + coef[6]*np.sin(days*3*2*np.pi/365.25))
plt.style.use('ggplot')
fg = plt.figure(figsize=(16,9), dpi=300)
a1 = fg.add_subplot(2, 1, 1, xlim=(min(dates), max(dates)))
plot_dates = np.array([datetime.datetime.fromordinal(i) for i in (dates)])
a1.plot(plot_dates[mask], plotband[mask], 'k*', ms=2, label='Clear observation') # Observed values
a1.plot(plot_dates[~mask], plotband[~mask], 'r+', ms=1, label='Masked observation') # Observed values masked out
# Predicted curves
iter = 0
for _preddate, _predvalue in zip(prediction_dates, predicted_values):
if iter == 0:
a1.plot(_preddate, _predvalue, 'orange', linewidth=1, label='PyCCD Model')
iter += 1
else:
a1.plot(_preddate, _predvalue, 'orange', linewidth=1)
for b in break_dates: a1.axvline(b)
for s in start_dates: a1.axvline(s, color='r')
if yl:
a1.set_ylim(yl)
plt.ylabel(ylabel)
plt.xlabel('Date')
a1.legend(loc=2, fontsize=5)
plt.show()
if __name__ == '__main__':
args = docopt(__doc__, version='0.6.2')
pixel(args)
| [
"wget.download",
"ee.Image",
"ee.ImageCollection",
"numpy.array",
"sys.exit",
"numpy.sin",
"numpy.linalg.norm",
"docopt.docopt",
"numpy.arange",
"numpy.where",
"datetime.datetime.fromordinal",
"os.path.isdir",
"os.mkdir",
"pandas.DataFrame",
"ccd.detect",
"numpy.abs",
"ee.Filter.eq",... | [((787, 802), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (800, 802), False, 'import ee\n'), ((5173, 5211), 'pandas.merge', 'pd.merge', (['df_sr', 'df_thermal'], {'on': '"""time"""'}), "(df_sr, df_thermal, on='time')\n", (5181, 5211), True, 'import pandas as pd\n'), ((5687, 5707), 'numpy.array', 'np.array', (["df['time']"], {}), "(df['time'])\n", (5695, 5707), True, 'import numpy as np\n'), ((5720, 5738), 'numpy.array', 'np.array', (["df['B1']"], {}), "(df['B1'])\n", (5728, 5738), True, 'import numpy as np\n'), ((5752, 5770), 'numpy.array', 'np.array', (["df['B2']"], {}), "(df['B2'])\n", (5760, 5770), True, 'import numpy as np\n'), ((5782, 5800), 'numpy.array', 'np.array', (["df['B3']"], {}), "(df['B3'])\n", (5790, 5800), True, 'import numpy as np\n'), ((5812, 5830), 'numpy.array', 'np.array', (["df['B4']"], {}), "(df['B4'])\n", (5820, 5830), True, 'import numpy as np\n'), ((5844, 5862), 'numpy.array', 'np.array', (["df['B5']"], {}), "(df['B5'])\n", (5852, 5862), True, 'import numpy as np\n'), ((5876, 5894), 'numpy.array', 'np.array', (["df['B7']"], {}), "(df['B7'])\n", (5884, 5894), True, 'import numpy as np\n'), ((5910, 5933), 'numpy.array', 'np.array', (["df['thermal']"], {}), "(df['thermal'])\n", (5918, 5933), True, 'import numpy as np\n'), ((5944, 5966), 'numpy.array', 'np.array', (["df['cfmask']"], {}), "(df['cfmask'])\n", (5952, 5966), True, 'import numpy as np\n'), ((5981, 6075), 'ccd.detect', 'ccd.detect', (['dates', 'blues', 'greens', 'reds', 'nirs', 'swir1s', 'swir2s', 'thermals', 'qas'], {'params': 'params'}), '(dates, blues, greens, reds, nirs, swir1s, swir2s, thermals, qas,\n params=params)\n', (5991, 6075), False, 'import ccd\n'), ((8015, 8054), 'ee.Geometry.Point', 'ee.Geometry.Point', (["point['coordinates']"], {}), "(point['coordinates'])\n", (8032, 8054), False, 'import ee\n'), ((8295, 8325), 'wget.download', 'wget.download', (['thumb', 'out_save'], {}), '(thumb, out_save)\n', (8308, 8325), False, 'import wget\n'), ((9006, 9056), 'numpy.arange', 'np.arange', (['result.start_day', '(result.end_day + 5000)'], {}), '(result.start_day, result.end_day + 5000)\n', (9015, 9056), True, 'import numpy as np\n'), ((10908, 10931), 'pandas.DataFrame', 'pd.DataFrame', (['residuals'], {}), '(residuals)\n', (10920, 10931), True, 'import pandas as pd\n'), ((11607, 11624), 'numpy.array', 'np.array', (['predict'], {}), '(predict)\n', (11615, 11624), True, 'import numpy as np\n'), ((16473, 16505), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""0.6.2"""'}), "(__doc__, version='0.6.2')\n", (16479, 16505), False, 'from docopt import docopt\n'), ((1163, 1174), 'numpy.abs', 'np.abs', (['lon'], {}), '(lon)\n', (1169, 1174), True, 'import numpy as np\n'), ((1232, 1242), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1240, 1242), False, 'import os, sys\n'), ((1283, 1294), 'numpy.abs', 'np.abs', (['lat'], {}), '(lat)\n', (1289, 1294), True, 'import numpy as np\n'), ((1350, 1360), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1358, 1360), False, 'import os, sys\n'), ((1429, 1470), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['_date', '"""%Y%j"""'], {}), "(_date, '%Y%j')\n", (1455, 1470), False, 'import datetime\n'), ((1556, 1566), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1564, 1566), False, 'import os, sys\n'), ((2365, 2433), 'ee.FeatureCollection', 'ee.FeatureCollection', (['"""ft:1_RZgjlcqixp-L9hyS6NYGqLaKOlnhSC35AB5M5Ll"""'], {}), "('ft:1_RZgjlcqixp-L9hyS6NYGqLaKOlnhSC35AB5M5Ll')\n", (2385, 2433), False, 'import ee\n'), ((2472, 2501), 'ee.Geometry.Point', 'ee.Geometry.Point', (['[lon, lat]'], {}), '([lon, lat])\n', (2489, 2501), False, 'import ee\n'), ((12590, 12617), 'numpy.where', 'np.where', (["(df['time'] > date)"], {}), "(df['time'] > date)\n", (12598, 12617), True, 'import numpy as np\n'), ((13222, 13263), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['(i / 1000)'], {}), '(i / 1000)\n', (13253, 13263), False, 'import datetime\n'), ((14774, 14821), 'numpy.arange', 'np.arange', (['result.start_day', '(result.end_day + 1)'], {}), '(result.start_day, result.end_day + 1)\n', (14783, 14821), True, 'import numpy as np\n'), ((1901, 1922), 'os.path.isdir', 'os.path.isdir', (['output'], {}), '(output)\n', (1914, 1922), False, 'import os, sys\n'), ((1936, 1952), 'os.mkdir', 'os.mkdir', (['output'], {}), '(output)\n', (1944, 1952), False, 'import os, sys\n'), ((6558, 6577), 'numpy.array', 'np.array', (['residuals'], {}), '(residuals)\n', (6566, 6577), True, 'import numpy as np\n'), ((7952, 7970), 'ee.Image', 'ee.Image', (['filename'], {}), '(filename)\n', (7960, 7970), False, 'import ee\n'), ((11523, 11542), 'numpy.where', 'np.where', (['(days == i)'], {}), '(days == i)\n', (11531, 11542), True, 'import numpy as np\n'), ((15612, 15644), 'datetime.datetime.fromordinal', 'datetime.datetime.fromordinal', (['i'], {}), '(i)\n', (15641, 15644), False, 'import datetime\n'), ((2971, 2999), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_ROW"""', 'row'], {}), "('WRS_ROW', row)\n", (2983, 2999), False, 'import ee\n'), ((3184, 3212), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_ROW"""', 'row'], {}), "('WRS_ROW', row)\n", (3196, 3212), False, 'import ee\n'), ((3401, 3429), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_ROW"""', 'row'], {}), "('WRS_ROW', row)\n", (3413, 3429), False, 'import ee\n'), ((11432, 11469), 'numpy.sin', 'np.sin', (['(days * 3 * 2 * np.pi / 365.25)'], {}), '(days * 3 * 2 * np.pi / 365.25)\n', (11438, 11469), True, 'import numpy as np\n'), ((12415, 12425), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12423, 12425), False, 'import os, sys\n'), ((14020, 14067), 'datetime.datetime.fromordinal', 'datetime.datetime.fromordinal', (['result.start_day'], {}), '(result.start_day)\n', (14049, 14067), False, 'import datetime\n'), ((14106, 14151), 'datetime.datetime.fromordinal', 'datetime.datetime.fromordinal', (['result.end_day'], {}), '(result.end_day)\n', (14135, 14151), False, 'import datetime\n'), ((14224, 14271), 'datetime.datetime.fromordinal', 'datetime.datetime.fromordinal', (['result.break_day'], {}), '(result.break_day)\n', (14253, 14271), False, 'import datetime\n'), ((14356, 14493), 'numpy.linalg.norm', 'np.linalg.norm', (['[result.green.magnitude, result.red.magnitude, result.nir.magnitude, result\n .swir1.magnitude, result.swir2.magnitude]'], {}), '([result.green.magnitude, result.red.magnitude, result.nir.\n magnitude, result.swir1.magnitude, result.swir2.magnitude])\n', (14370, 14493), True, 'import numpy as np\n'), ((11392, 11429), 'numpy.cos', 'np.cos', (['(days * 3 * 2 * np.pi / 365.25)'], {}), '(days * 3 * 2 * np.pi / 365.25)\n', (11398, 11429), True, 'import numpy as np\n'), ((15409, 15446), 'numpy.sin', 'np.sin', (['(days * 3 * 2 * np.pi / 365.25)'], {}), '(days * 3 * 2 * np.pi / 365.25)\n', (15415, 15446), True, 'import numpy as np\n'), ((2919, 2949), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_PATH"""', 'path'], {}), "('WRS_PATH', path)\n", (2931, 2949), False, 'import ee\n'), ((3132, 3162), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_PATH"""', 'path'], {}), "('WRS_PATH', path)\n", (3144, 3162), False, 'import ee\n'), ((3349, 3379), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_PATH"""', 'path'], {}), "('WRS_PATH', path)\n", (3361, 3379), False, 'import ee\n'), ((3620, 3648), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_ROW"""', 'row'], {}), "('WRS_ROW', row)\n", (3632, 3648), False, 'import ee\n'), ((3861, 3889), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_ROW"""', 'row'], {}), "('WRS_ROW', row)\n", (3873, 3889), False, 'import ee\n'), ((4100, 4128), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_ROW"""', 'row'], {}), "('WRS_ROW', row)\n", (4112, 4128), False, 'import ee\n'), ((11324, 11361), 'numpy.sin', 'np.sin', (['(days * 2 * 2 * np.pi / 365.25)'], {}), '(days * 2 * 2 * np.pi / 365.25)\n', (11330, 11361), True, 'import numpy as np\n'), ((15369, 15406), 'numpy.cos', 'np.cos', (['(days * 3 * 2 * np.pi / 365.25)'], {}), '(days * 3 * 2 * np.pi / 365.25)\n', (15375, 15406), True, 'import numpy as np\n'), ((2849, 2885), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LC8_SR"""'], {}), "('LANDSAT/LC8_SR')\n", (2867, 2885), False, 'import ee\n'), ((3062, 3098), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LE7_SR"""'], {}), "('LANDSAT/LE7_SR')\n", (3080, 3098), False, 'import ee\n'), ((3279, 3315), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LT5_SR"""'], {}), "('LANDSAT/LT5_SR')\n", (3297, 3315), False, 'import ee\n'), ((11284, 11321), 'numpy.cos', 'np.cos', (['(days * 2 * 2 * np.pi / 365.25)'], {}), '(days * 2 * 2 * np.pi / 365.25)\n', (11290, 11321), True, 'import numpy as np\n'), ((15297, 15334), 'numpy.sin', 'np.sin', (['(days * 2 * 2 * np.pi / 365.25)'], {}), '(days * 2 * 2 * np.pi / 365.25)\n', (15303, 15334), True, 'import numpy as np\n'), ((3568, 3598), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_PATH"""', 'path'], {}), "('WRS_PATH', path)\n", (3580, 3598), False, 'import ee\n'), ((3809, 3839), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_PATH"""', 'path'], {}), "('WRS_PATH', path)\n", (3821, 3839), False, 'import ee\n'), ((4048, 4078), 'ee.Filter.eq', 'ee.Filter.eq', (['"""WRS_PATH"""', 'path'], {}), "('WRS_PATH', path)\n", (4060, 4078), False, 'import ee\n'), ((11216, 11253), 'numpy.sin', 'np.sin', (['(days * 1 * 2 * np.pi / 365.25)'], {}), '(days * 1 * 2 * np.pi / 365.25)\n', (11222, 11253), True, 'import numpy as np\n'), ((15257, 15294), 'numpy.cos', 'np.cos', (['(days * 2 * 2 * np.pi / 365.25)'], {}), '(days * 2 * 2 * np.pi / 365.25)\n', (15263, 15294), True, 'import numpy as np\n'), ((3489, 3534), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LC08/C01/T1_TOA"""'], {}), "('LANDSAT/LC08/C01/T1_TOA')\n", (3507, 3534), False, 'import ee\n'), ((3730, 3775), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LE07/C01/T1_TOA"""'], {}), "('LANDSAT/LE07/C01/T1_TOA')\n", (3748, 3775), False, 'import ee\n'), ((3969, 4014), 'ee.ImageCollection', 'ee.ImageCollection', (['"""LANDSAT/LT05/C01/T1_TOA"""'], {}), "('LANDSAT/LT05/C01/T1_TOA')\n", (3987, 4014), False, 'import ee\n'), ((11176, 11213), 'numpy.cos', 'np.cos', (['(days * 1 * 2 * np.pi / 365.25)'], {}), '(days * 1 * 2 * np.pi / 365.25)\n', (11182, 11213), True, 'import numpy as np\n'), ((15185, 15222), 'numpy.sin', 'np.sin', (['(days * 1 * 2 * np.pi / 365.25)'], {}), '(days * 1 * 2 * np.pi / 365.25)\n', (15191, 15222), True, 'import numpy as np\n'), ((15145, 15182), 'numpy.cos', 'np.cos', (['(days * 1 * 2 * np.pi / 365.25)'], {}), '(days * 1 * 2 * np.pi / 365.25)\n', (15151, 15182), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Parse intermediary data files and produce single hdf5 file with collected information about each read.
All the intermediary txt files can be deleted after this step.
Usage:
python collect_data.py test output.hdf5 {..long list of input files..} [.. optional files with restriction annot..]
Example:
python bin/collect_data.py tmp.hdf5 \
results/table/test.001.fastq.txt results/table/test.001.ids.unique.txt results/table/test.001.trimtable.txt \
results/cout/test.001.1.bridge_forward.txt results/table/test.001.GA.txt results/cout/test.001.2.ggg.txt \
results/filtered_fastq/test.001.dna.info.txt results/filtered_fastq/test.001.rna1.info.txt results/filtered_fastq/test.001.rna2.info.txt \
results/sam/test.001.dna.extended.sam results/sam/test.001.dna.sam results/sam/test.001.rna1.sam results/sam/test.001.rna2.sam \
results/bed/test.001.dna_ext.bed results/bed/test.001.rna1.bed results/bed/test.001.rna2.bed \
results/table/test.001.rna1.mme-.distances.txt results/table/test.001.rna1.mme+.distances.txt \
results/table/test.001.rna1.nla.distances.txt results/table/test.001.rna2.mme-.distances.txt \
results/table/test.001.rna2.mme+.distances.txt results/table/test.001.rna2.nla.distances.txt \
results/table/test.001.dna.nla.distances.txt
"""
import numpy as np
import h5py
import re
from sys import argv
from utils import *
if len(argv)<18:
raise Exception("Number of inputs is {len(argv)-1}, not 17 as expected. See usage in the file.")
output_filename = argv[1]
table_file, is_dup_file, trim_file, \
bridge_file, br_err, ggg_file, \
dna_len_file, rna1_len_file, rna2_len_file, \
dna_map_file_sam, dna_map_file_sam_nonextended, rna1_map_file_sam, rna2_map_file_sam, \
dna_map_file, rna1_map_file, rna2_map_file = argv[2:18]
renz_files = []
if len(argv)>=19:
renz_files = argv[18:]
outfile = h5py.File(output_filename, "w")
raw_file = raw_read_file(table_file,
[1, 3, 5, 3, 5],
['id', 'seqR1_len', 'seqR2_len', 'seqR1', 'seqR2'],
modifiers=[lambda x: x[1:], lambda x: len(x), lambda x: len(x), str, str], header=0)
update_hdf5(outfile, raw_file, {'id': 'S100', 'seqR1': 'S250', 'seqR2': 'S250', 'seqR1_len': int, 'seqR2_len': int})
bridge_raw_file = raw_read_file(bridge_file,
[3, 4, 5, 6],
['has_nobridge', 'bridge_nmm', 'bridge_start', 'bridge_end'],
modifiers=[lambda x: int(x) for y in range(4)], header=1, bottom=1)
update_hdf5(outfile, bridge_raw_file, {'has_nobridge':bool, 'bridge_nmm':int, 'bridge_start':int, 'bridge_end':int})
del bridge_raw_file
br_err_raw_file = raw_read_file(br_err,
[2],
['has_GA'],
modifiers=[lambda x: int(x)], header=0)
update_hdf5(outfile, br_err_raw_file, bool)
del br_err_raw_file
trim_raw_file = raw_read_file(trim_file,
[5, 7],
['trimF', 'trimR'],
modifiers=[lambda x: int(x) for y in range(2)], header=0)
update_hdf5(outfile, trim_raw_file, int)
del trim_raw_file
ggg_raw_file = raw_read_file(ggg_file,
[3, 5, 6],
['has_noggg', 'ggg_start', 'ggg_end'],
modifiers=[lambda x: int(x) for y in range(3)], header=1, bottom=1)
update_hdf5(outfile, ggg_raw_file, {'has_noggg':bool, 'ggg_start':int, 'ggg_end':int})
del ggg_raw_file
dna_len_raw_file = raw_read_file(dna_len_file,
[2, 3, 4, 5, 6],
['dna_R1_start', 'dna_R1_end', 'dna_R1_len_notrim', 'dna_R1_end_trim',
'dna_R1_len_trim'],
modifiers=[lambda x: int(x) for y in range(5)], header=0)
update_hdf5(outfile, dna_len_raw_file, int)
del dna_len_raw_file
rna2_len_raw_file = raw_read_file(rna2_len_file,
[2, 3, 4, 5, 6],
['rna2_R2_start', 'rna2_R2_end', 'rna2_R2_len_notrim', 'rna2_R2_end_trim',
'rna2_R2_len_trim'],
modifiers=[lambda x: int(x) for y in range(5)], header=0)
update_hdf5(outfile, rna2_len_raw_file, int)
del rna2_len_raw_file
rna1_len_raw_file = raw_read_file(rna1_len_file,
[2, 3, 4, 5, 6],
['rna1_R1_start', 'rna1_R1_end', 'rna1_R1_len_notrim', 'rna1_R1_end_trim',
'rna1_R1_len_trim'],
modifiers=[(lambda x: int(x)) for y in range(5)], header=0)
update_hdf5(outfile, rna1_len_raw_file, int)
del rna1_len_raw_file
dct_ids = {k: 0 for k in raw_file['id']}
with open(is_dup_file, 'r') as inf:
l = inf.readline().strip()
while len(l) > 0:
l = inf.readline().strip()
# Skip if fastuniq output is for larger file and contains extra ids
# (dedup is done on full library, not the chunks)
try:
dct_ids[l] = 1
except Exception as e:
pass
fastuniq_dct = {'is_notPCRdup': [dct_ids[l] for l in raw_file['id']]}
update_hdf5(outfile, fastuniq_dct, int)
del fastuniq_dct, dct_ids
dna_map_raw_file = raw_read_file(dna_map_file_sam,
[1, 2, -1, 3, 6],
['id', 'dna_is_mapped', 'dna_is_not_multi', 'dna_chr', 'dna_cigar'],
modifiers=[str, lambda x: 0 if x == '4' else 1, lambda x: 1 if x == 'NH:i:1' else 0,
str, str], header=0, comment="@")
dna_map_raw_file_inferred = \
reconstruct_by_ids(dna_map_raw_file, 'id', raw_file['id'],
default_dct={'dna_is_mapped': 0, 'dna_is_not_multi': 0, 'dna_chr': '-', 'dna_cigar': ''})
del dna_map_raw_file_inferred['id']
update_hdf5(outfile, dna_map_raw_file_inferred,
{'dna_is_mapped': bool, 'dna_is_not_multi': bool, 'dna_chr': 'S8', 'dna_cigar': 'S20'})
outfile.create_dataset('dna_nlen', data=np.array( \
[np.sum([int(m[0]) if m[1] == 'N' else 0 for m in re.findall(r'(\d+)([A-Z]{1})', x)]) for x in
dna_map_raw_file_inferred['dna_cigar']]))
del dna_map_raw_file_inferred, dna_map_raw_file
rna2_map_raw_file = raw_read_file(rna2_map_file_sam,
[1, 2, -1, 3, 6],
['id', 'rna2_is_mapped', 'rna2_is_not_multi', 'rna2_chr', 'rna2_cigar'],
modifiers=[str, lambda x: 0 if x == '4' else 1, lambda x: 1 if x == 'NH:i:1' else 0,
str, str], header=0, comment="@")
rna2_map_raw_file_inferred = \
reconstruct_by_ids(rna2_map_raw_file, 'id', raw_file['id'],
default_dct={'rna2_is_mapped': 0, 'rna2_is_not_multi': 0, 'rna2_chr': '-', 'rna2_start': -1,
'rna2_cigar': ''})
del rna2_map_raw_file_inferred['id']
update_hdf5(outfile, rna2_map_raw_file_inferred,
{'rna2_is_mapped': bool, 'rna2_is_not_multi': bool, 'rna2_chr': 'S8', 'rna2_cigar': 'S20'})
outfile.create_dataset('rna2_nlen', data=np.array( \
[np.sum([int(m[0]) if m[1] == 'N' else 0 for m in re.findall(r'(\d+)([A-Z]{1})', x)]) for x in
rna2_map_raw_file_inferred['rna2_cigar']]))
del rna2_map_raw_file_inferred, rna2_map_raw_file
rna1_map_raw_file = raw_read_file(rna1_map_file_sam,
[1, 2, -1, 3, 6],
['id', 'rna1_is_mapped', 'rna1_is_not_multi', 'rna1_chr', 'rna1_cigar'],
modifiers=[str, lambda x: 0 if x == '4' else 1, lambda x: 1 if x == 'NH:i:1' else 0,
str, str], header=0, comment="@")
rna1_map_raw_file_inferred = \
reconstruct_by_ids(rna1_map_raw_file, 'id', raw_file['id'],
default_dct={'rna1_is_mapped': 0, 'rna1_is_not_multi': 0, 'rna1_chr': '-', 'rna1_cigar': ''})
del rna1_map_raw_file_inferred['id']
update_hdf5(outfile, rna1_map_raw_file_inferred,
{'rna1_is_mapped': bool, 'rna1_is_not_multi': bool, 'rna1_chr': 'S8', 'rna1_cigar': 'S20'})
outfile.create_dataset('rna1_nlen', data=np.array(
[np.sum([int(m[0]) if m[1] == 'N' else 0 for m in re.findall(r'(\d+)([A-Z]{1})', x)]) for x in
rna1_map_raw_file_inferred['rna1_cigar']]))
del rna1_map_raw_file_inferred, rna1_map_raw_file
dna_map_raw_file_nonextended = raw_read_file(dna_map_file_sam_nonextended,
[1, 2, -1, 3, 6],
['id', 'dna_nonextended_is_mapped', 'dna_nonextended_is_not_multi', 'dna_nonextended_chr',
'dna_nonextended_cigar'],
modifiers=[str, lambda x: 0 if x == '4' else 1, lambda x: 1 if x == 'NH:i:1' else 0,
str, str], header=0, comment="@")
dna_map_raw_file_inferred = \
reconstruct_by_ids(dna_map_raw_file_nonextended, 'id', raw_file['id'],
default_dct={'dna_nonextended_is_mapped': 0, 'dna_nonextended_is_not_multi': 0, 'dna_nonextended_chr': '-',
'dna_nonextended_cigar': ''})
del dna_map_raw_file_inferred['id']
update_hdf5(outfile, dna_map_raw_file_inferred,
{'dna_nonextended_is_mapped': bool, 'dna_nonextended_is_not_multi': bool, 'dna_nonextended_chr': 'S8',
'dna_nonextended_cigar': 'S20'})
del dna_map_raw_file_inferred, dna_map_raw_file_nonextended
# Reading extended bed file:
dna_map_raw_file = raw_read_file(dna_map_file,
[2, 3, 4, 6],
['dna_start', 'dna_end', 'id', 'dna_strand'],
modifiers=[int, int, str, lambda x: 1 if x == '+' else 0], header=0, comment="@")
dna_map_raw_file_inferred = \
reconstruct_by_ids(dna_map_raw_file, 'id', raw_file['id'],
default_dct={'dna_start': 0, 'dna_end': 0, 'dna_strand': 0})
del dna_map_raw_file_inferred['id']
update_hdf5(outfile, dna_map_raw_file_inferred, {'dna_start': int, 'dna_end': int, 'dna_strand': bool})
del dna_map_raw_file_inferred, dna_map_raw_file
rna2_map_raw_file = raw_read_file(rna2_map_file,
[2, 3, 4, 6],
['rna2_start', 'rna2_end', 'id', 'rna2_strand'],
modifiers=[int, int, str, lambda x: 1 if x == '+' else 0], header=0, comment="@")
rna2_map_raw_file_inferred = \
reconstruct_by_ids(rna2_map_raw_file, 'id', raw_file['id'],
default_dct={'rna2_start': 0, 'rna2_end': 0, 'rna2_strand': 0})
del rna2_map_raw_file_inferred['id']
update_hdf5(outfile, rna2_map_raw_file_inferred, {'rna2_start': int, 'rna2_end': int, 'rna2_strand': bool})
del rna2_map_raw_file_inferred, rna2_map_raw_file
rna1_map_raw_file = raw_read_file(rna1_map_file,
[2, 3, 4, 6],
['rna1_start', 'rna1_end', 'id', 'rna1_strand'],
modifiers=[int, int, str, lambda x: 1 if x == '+' else 0], header=0, comment="@")
rna1_map_raw_file_inferred = \
reconstruct_by_ids(rna1_map_raw_file, 'id', raw_file['id'],
default_dct={'rna1_start': 0, 'rna1_end': 0, 'rna1_strand': 0})
del rna1_map_raw_file_inferred['id']
update_hdf5(outfile, rna1_map_raw_file_inferred, {'rna1_start': int, 'rna1_end': int, 'rna1_strand': bool})
del rna1_map_raw_file_inferred, rna1_map_raw_file
# Adjust the trimmed length by oligos found in read:
dna_len_trim = np.minimum(outfile['dna_R1_len_trim'][()], outfile['dna_R1_len_notrim'][()] )
rna2_len_trim = np.minimum( outfile['rna2_R2_len_trim'][()], outfile['rna2_R2_len_notrim'][()] )
rna1_len_trim = np.minimum( outfile['rna1_R1_len_trim'][()], outfile['rna1_R1_len_notrim'][()] )
outfile.create_dataset('dna_R1_len_trim_adjusted', data=np.array(dna_len_trim, dtype=int))
outfile.create_dataset('rna2_R2_len_trim_adjusted', data=np.array(rna2_len_trim, dtype=int))
outfile.create_dataset('rna1_R1_len_trim_adjusted', data=np.array(rna1_len_trim, dtype=int))
### Filtering by restiriction enzyme recognition site:
renz_keys = []
for renz_file in renz_files:
header = open(renz_file, 'r').readline().strip().split()
renz_keys += header[1:]
renz_raw_file = raw_read_file(renz_file,
[1, 2, 3, 4, 5],
header,
sep=' ',
modifiers=[str]+[int for x in range(4)],
header=1)
renz_raw_file_inferred = \
reconstruct_by_ids(renz_raw_file, 'id', raw_file['id'],
default_dct={x: 0 for x in header[1:]})
del renz_raw_file_inferred['id']
update_hdf5(outfile, renz_raw_file_inferred, {x: int for x in header[1:]})
del renz_raw_file_inferred, renz_raw_file
outfile.close()
| [
"numpy.array",
"re.findall",
"numpy.minimum",
"h5py.File"
] | [((1880, 1911), 'h5py.File', 'h5py.File', (['output_filename', '"""w"""'], {}), "(output_filename, 'w')\n", (1889, 1911), False, 'import h5py\n'), ((11826, 11902), 'numpy.minimum', 'np.minimum', (["outfile['dna_R1_len_trim'][()]", "outfile['dna_R1_len_notrim'][()]"], {}), "(outfile['dna_R1_len_trim'][()], outfile['dna_R1_len_notrim'][()])\n", (11836, 11902), True, 'import numpy as np\n'), ((11920, 11998), 'numpy.minimum', 'np.minimum', (["outfile['rna2_R2_len_trim'][()]", "outfile['rna2_R2_len_notrim'][()]"], {}), "(outfile['rna2_R2_len_trim'][()], outfile['rna2_R2_len_notrim'][()])\n", (11930, 11998), True, 'import numpy as np\n'), ((12017, 12095), 'numpy.minimum', 'np.minimum', (["outfile['rna1_R1_len_trim'][()]", "outfile['rna1_R1_len_notrim'][()]"], {}), "(outfile['rna1_R1_len_trim'][()], outfile['rna1_R1_len_notrim'][()])\n", (12027, 12095), True, 'import numpy as np\n'), ((12154, 12187), 'numpy.array', 'np.array', (['dna_len_trim'], {'dtype': 'int'}), '(dna_len_trim, dtype=int)\n', (12162, 12187), True, 'import numpy as np\n'), ((12246, 12280), 'numpy.array', 'np.array', (['rna2_len_trim'], {'dtype': 'int'}), '(rna2_len_trim, dtype=int)\n', (12254, 12280), True, 'import numpy as np\n'), ((12339, 12373), 'numpy.array', 'np.array', (['rna1_len_trim'], {'dtype': 'int'}), '(rna1_len_trim, dtype=int)\n', (12347, 12373), True, 'import numpy as np\n'), ((6287, 6320), 're.findall', 're.findall', (['"""(\\\\d+)([A-Z]{1})"""', 'x'], {}), "('(\\\\d+)([A-Z]{1})', x)\n", (6297, 6320), False, 'import re\n'), ((7398, 7431), 're.findall', 're.findall', (['"""(\\\\d+)([A-Z]{1})"""', 'x'], {}), "('(\\\\d+)([A-Z]{1})', x)\n", (7408, 7431), False, 'import re\n'), ((8462, 8495), 're.findall', 're.findall', (['"""(\\\\d+)([A-Z]{1})"""', 'x'], {}), "('(\\\\d+)([A-Z]{1})', x)\n", (8472, 8495), False, 'import re\n')] |
# Object Detector
# Developed by <NAME> : November 2018
#
# Developped on : Python 3.6.5.final.0 (Conda 4.5.11), OpenCV 3.4.1, Numpy 1.14.3
# The programs first extracts the circles (Hough Transform) on each frame,
# then compares each circle with the object using the SIFT detector.
# Execute as follows : detect.py -i positive.avi -o export.csv
import cv2
import numpy as np
import csv
import sys
# Extract circle array from an image
def image_circles(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
rows = gray.shape[0]
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
param1=70, param2=30,
minRadius=1, maxRadius=100)
return circles
# draw circles from circle array on an image
def draw_circles(image, circles):
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center = (i[0], i[1])
# circle center
cv2.circle(image, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv2.circle(image, center, radius, (255, 0, 255), 3)
# draw one circle from propreties array [x,y,radius] on an image
def draw_circle(image, circle):
if circle is not None:
circle = np.uint16(np.around(circle))
i = circle
center = (i[0], i[1])
# circle center
cv2.circle(image, center, 1, (0, 100, 100), 3)
# circle outline
radius = i[2]
cv2.circle(image, center, radius, (255, 0, 255), 3)
# draw a bounding box using the circle properties
def draw_box(image, circle):
if circle is not None:
circle = np.uint16(np.around(circle))
i = circle
xc, yc = (i[0], i[1])
radius = i[2]
# draw box
cv2.rectangle(image, (xc - radius, yc - radius), (xc + radius, yc + radius), (0, 255, 0), 3)
# crop image and avoid overpassing the limits
def image_crop(image, y, x, r):
y1, y2, x1, x2 = y - r, y + r, x - r, x + r
if x1 < 0:
x1 = 0
if x2 > image.shape[0]:
x2 = image.shape[0]
if y1 < 0:
y1 = 0
if y2 > image.shape[1]:
y2 = image.shape[1]
crop_img = image[x1:x2, y1:y2]
return crop_img
# return the number of matches between the keypoints of an image and the keypoints entered
def matches_number(sift, img, kp1, des1):
# kp1 : Keypoints of positive image
# des1 : descriptors of positive image
# find the keypoints and descriptors with SIFT
kp2, des2 = sift.detectAndCompute(img, None)
# If there is no keypoints and decsriptors
if not kp1 or not kp2:
return None
if len(kp1) <= 2 or len(kp2) <= 2:
return None
flann_index_kdtree = 0
index_params = dict(algorithm=flann_index_kdtree, trees=5)
search_params = dict(checks=50)
if len(des1 >= 2) and len(des2) >= 2:
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# store all the good matches as per Lowe's ratio test.
good = []
distance_min = 0.65
for m, n in matches:
if m.distance < distance_min * n.distance:
good.append(m)
return len(good)
# initialize csv file and erase old content
def csv_initialize(file):
with open(file, mode='w') as csv_file:
csv.writer(csv_file, delimiter=',', lineterminator='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# add a row at the end of the csv file
def csv_addrow(file, circle, frameid):
circle = np.uint16(np.around(circle))
i = circle
xc, yc = (i[0], i[1])
radius = i[2]
with open(file, mode='a') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', lineterminator='\n', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow([frameid, xc - radius, yc - radius, 2 * radius, 2 * radius])
# Interface management
def interface():
if not len(sys.argv) == 5 or not sys.argv[1] == "-i" or not sys.argv[3] == "-o":
raise Exception("Interface Error ! Use the following format : detector.py -i positive.avi -o export.csv")
return str(sys.argv[2]), str(sys.argv[4])
# ***********
# MAIN PROGRAM
# ***********
# ***********
video_file, export_file = interface()
cam = cv2.VideoCapture(video_file)
positive = cv2.imread('positive.png')
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(positive, None) # calculates the keypoints and the descriptors of the positive image
frameId = 0 # the current frame
csv_initialize(export_file)
# Parameters
threshold_value = 70
NB_matches_min = 7
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 30.0, (640, 480))
# Loop of the video frames
while True:
frameId = frameId + 1
ret, image = cam.read()
# Thresholding image
retval, image_r = cv2.threshold(image, threshold_value, 255, cv2.THRESH_BINARY)
circles = image_circles(image_r) # Play on : image or image_r : to activate or disable thresholding
# if we have circles in frame
if np.count_nonzero(circles) != 0:
# Loop on the different circles
for circle in circles[0, :]:
x, y, r = circle.astype(int)
crop_img = image_crop(image, x, y, r)
NB_matches = matches_number(sift, crop_img, kp1, des1)
print("number of matches :", NB_matches)
if NB_matches is not None:
# if we have enough matches draw the box and add the coordinates to the export file
if NB_matches > NB_matches_min:
draw_box(image, circle)
csv_addrow(export_file, circle, frameId)
# write the flipped frame
out.write(image)
# draw_circles(image,circles) #to draw all circles given by hough transform
cv2.imshow('result', image)
if cv2.waitKey(10) == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.threshold",
"csv.writer",
"cv2.medianBlur",
"cv2.HoughCircles",
"cv2.VideoWriter",
"cv2.imshow",
"numpy.count_nonzero",
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.xfeatures2d.SIFT_create",
"cv2.cvtColor",... | [((4477, 4505), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (4493, 4505), False, 'import cv2\n'), ((4518, 4544), 'cv2.imread', 'cv2.imread', (['"""positive.png"""'], {}), "('positive.png')\n", (4528, 4544), False, 'import cv2\n'), ((4579, 4608), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (4606, 4608), False, 'import cv2\n'), ((4861, 4892), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4883, 4892), False, 'import cv2\n'), ((4900, 4955), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.avi"""', 'fourcc', '(30.0)', '(640, 480)'], {}), "('output.avi', fourcc, 30.0, (640, 480))\n", (4915, 4955), False, 'import cv2\n'), ((6175, 6198), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6196, 6198), False, 'import cv2\n'), ((492, 531), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (504, 531), False, 'import cv2\n'), ((544, 567), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (558, 567), False, 'import cv2\n'), ((609, 719), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray', 'cv2.HOUGH_GRADIENT', '(1)', '(rows / 8)'], {'param1': '(70)', 'param2': '(30)', 'minRadius': '(1)', 'maxRadius': '(100)'}), '(gray, cv2.HOUGH_GRADIENT, 1, rows / 8, param1=70, param2=\n 30, minRadius=1, maxRadius=100)\n', (625, 719), False, 'import cv2\n'), ((5102, 5163), 'cv2.threshold', 'cv2.threshold', (['image', 'threshold_value', '(255)', 'cv2.THRESH_BINARY'], {}), '(image, threshold_value, 255, cv2.THRESH_BINARY)\n', (5115, 5163), False, 'import cv2\n'), ((6079, 6106), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'image'], {}), "('result', image)\n", (6089, 6106), False, 'import cv2\n'), ((1504, 1550), 'cv2.circle', 'cv2.circle', (['image', 'center', '(1)', '(0, 100, 100)', '(3)'], {}), '(image, center, 1, (0, 100, 100), 3)\n', (1514, 1550), False, 'import cv2\n'), ((1609, 1660), 'cv2.circle', 'cv2.circle', (['image', 'center', 'radius', '(255, 0, 255)', '(3)'], {}), '(image, center, radius, (255, 0, 255), 3)\n', (1619, 1660), False, 'import cv2\n'), ((1924, 2020), 'cv2.rectangle', 'cv2.rectangle', (['image', '(xc - radius, yc - radius)', '(xc + radius, yc + radius)', '(0, 255, 0)', '(3)'], {}), '(image, (xc - radius, yc - radius), (xc + radius, yc + radius),\n (0, 255, 0), 3)\n', (1937, 2020), False, 'import cv2\n'), ((3070, 3120), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (3091, 3120), False, 'import cv2\n'), ((3529, 3631), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'lineterminator': '"""\n"""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', lineterminator=\'\\n\', quotechar=\'"\',\n quoting=csv.QUOTE_MINIMAL)\n', (3539, 3631), False, 'import csv\n'), ((3736, 3753), 'numpy.around', 'np.around', (['circle'], {}), '(circle)\n', (3745, 3753), True, 'import numpy as np\n'), ((3883, 3985), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'lineterminator': '"""\n"""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(csv_file, delimiter=\',\', lineterminator=\'\\n\', quotechar=\'"\',\n quoting=csv.QUOTE_MINIMAL)\n', (3893, 3985), False, 'import csv\n'), ((5315, 5340), 'numpy.count_nonzero', 'np.count_nonzero', (['circles'], {}), '(circles)\n', (5331, 5340), True, 'import numpy as np\n'), ((6115, 6130), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (6126, 6130), False, 'import cv2\n'), ((942, 960), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (951, 960), True, 'import numpy as np\n'), ((1072, 1118), 'cv2.circle', 'cv2.circle', (['image', 'center', '(1)', '(0, 100, 100)', '(3)'], {}), '(image, center, 1, (0, 100, 100), 3)\n', (1082, 1118), False, 'import cv2\n'), ((1189, 1240), 'cv2.circle', 'cv2.circle', (['image', 'center', 'radius', '(255, 0, 255)', '(3)'], {}), '(image, center, radius, (255, 0, 255), 3)\n', (1199, 1240), False, 'import cv2\n'), ((1400, 1417), 'numpy.around', 'np.around', (['circle'], {}), '(circle)\n', (1409, 1417), True, 'import numpy as np\n'), ((1802, 1819), 'numpy.around', 'np.around', (['circle'], {}), '(circle)\n', (1811, 1819), True, 'import numpy as np\n')] |
# Piecewise-constant Neural ODEs
# <NAME>, <NAME>, <NAME>
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import matplotlib.collections as mcollections
from skimage.transform import resize
from moviepy.editor import ImageSequenceClip
##### GENERIC PLOTTING UTILITIES #####
def fig2image(fig):
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
##### CODE SPECIFIC TO 'BILLIARDS' DATASET #####
class UpdatablePatchCollection(mcollections.PatchCollection):
def __init__(self, patches, *args, **kwargs):
self.patches = patches
mcollections.PatchCollection.__init__(self, patches, *args, **kwargs)
def get_paths(self):
self.set_paths(self.patches)
return self._paths
def process_images(raw_ims, image_every=1):
side_len = raw_ims[0].shape[0]
k, dx, dy = int(0.15*side_len), int(0.05*side_len), int(0.02*side_len)
new_ims = []
for im in raw_ims[::image_every]:
im = im[k+dy:-k+dy,k+dx:-k+dx].mean(-1)
im = resize(im, (28, 28)) / 255.
new_ims.append(im)
return np.stack(new_ims)
def update_plot(fig, x, balls):
if len(balls) == 2:
colors = ['#000000', '#000000'] # make both balls black
else:
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for j, b in enumerate(balls):
b.set_center((x[j,0], x[j,1]))
b.set_fc(colors[j])
fig.canvas.draw()
fig.canvas.flush_events()
def coords2images(xs, r, fig=None, process=False, figsize=(2,2), dpi=50, **args):
fig = plt.figure(figsize=figsize, dpi=dpi) if fig is None else fig
plt.ion()
balls = [Circle((0,0), r) for _ in range(xs.shape[-2])]
collection = UpdatablePatchCollection(balls)
ax = fig.gca()
ax.add_artist(collection)
[ax.add_artist(b) for b in balls]
ax.set_xlim(0, 1) ; ax.set_ylim(0, 1)
ax.set_aspect('equal', adjustable='box')
ax.get_xaxis().set_ticks([]) ; ax.get_yaxis().set_ticks([])
images = []
for i in range(xs.shape[0]):
update_plot(fig, xs[i], balls)
images.append( fig2image(fig) )
plt.close()
images = np.stack(images)
return process_images(images, **args) if process else images
def tensor2videoframes(x):
n, w, h = x.shape
frames = []
for x_i in x:
x_i = x_i[...,np.newaxis].repeat(3,-1)
x_i = resize(x_i, (6*w, 6*h), anti_aliasing=False) * 255
frames.append(x_i)
return frames | [
"matplotlib.pyplot.close",
"numpy.stack",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ion",
"matplotlib.patches.Circle",
"matplotlib.collections.PatchCollection.__init__",
"skimage.transform.resize"
] | [((1201, 1218), 'numpy.stack', 'np.stack', (['new_ims'], {}), '(new_ims)\n', (1209, 1218), True, 'import numpy as np\n'), ((1703, 1712), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1710, 1712), True, 'import matplotlib.pyplot as plt\n'), ((2164, 2175), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2173, 2175), True, 'import matplotlib.pyplot as plt\n'), ((2187, 2203), 'numpy.stack', 'np.stack', (['images'], {}), '(images)\n', (2195, 2203), True, 'import numpy as np\n'), ((718, 787), 'matplotlib.collections.PatchCollection.__init__', 'mcollections.PatchCollection.__init__', (['self', 'patches', '*args'], {}), '(self, patches, *args, **kwargs)\n', (755, 787), True, 'import matplotlib.collections as mcollections\n'), ((1639, 1675), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (1649, 1675), True, 'import matplotlib.pyplot as plt\n'), ((1724, 1741), 'matplotlib.patches.Circle', 'Circle', (['(0, 0)', 'r'], {}), '((0, 0), r)\n', (1730, 1741), False, 'from matplotlib.patches import Circle\n'), ((1141, 1161), 'skimage.transform.resize', 'resize', (['im', '(28, 28)'], {}), '(im, (28, 28))\n', (1147, 1161), False, 'from skimage.transform import resize\n'), ((2398, 2446), 'skimage.transform.resize', 'resize', (['x_i', '(6 * w, 6 * h)'], {'anti_aliasing': '(False)'}), '(x_i, (6 * w, 6 * h), anti_aliasing=False)\n', (2404, 2446), False, 'from skimage.transform import resize\n')] |
import os, sys
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from anndata import read_mtx
import pegasus as pg
from termcolor import cprint
from sklearn.metrics import silhouette_score
method_list = ["baseline", "pegasus", "seurat", "mnn", "combat", "bbknn"]
figure_index = {'baseline' : 'B',
'pegasus' : 'C',
'combat' : 'D',
'mnn' : 'E',
'bbknn' : 'F',
'seurat' : 'G'
}
method_print_name = {'baseline' : 'Baseline',
'pegasus' : 'Pegasus',
'combat' : 'ComBat',
'mnn' : 'MNN',
'bbknn' : 'BBKNN',
'seurat' : 'Seurat v3'
}
measure_result = []
measure_precomputed_file = "correction_benchmark.txt"
palettes_cluster = "#1d6cab,#ff7410,#279627,#d02324,#875bb2,#814c42,#df6cba,#b4b51f,#17b6c9,#a5c0e5,#ffb36d,#8dda7f,#777777,#bda7cf,#bc9189,#f6adcc,#d6d682,#94d5e1,#a34041"
def process_baseline():
cprint("For pegasus with no batch correction:", "red")
if not os.path.exists("./ground/ground.h5ad"):
cprint("No processed data are found! Please first process data using pegasus without batch correction.", "red")
sys.exit(1)
cprint("Loading processed data...", "green")
adata = pg.read_input('./ground/ground.h5ad')
process_data(adata, method = 'baseline', processed = True)
def process_pegasus():
cprint("For pegasus:", "red")
if not os.path.exists("./pegasus/pegasus_corrected.h5ad"):
cprint("No corrected data are found! Please first correct data using pegasus.", "red")
sys.exit(1)
cprint("Loading corrected data...", "green")
adata = pg.read_input('./pegasus/pegasus_corrected.h5ad')
process_data(adata, method = 'pegasus', processed = True)
def process_mnn():
cprint("For MNN:", "red")
if not os.path.exists("./mnn/scanpy_mnn_corrected.h5ad"):
cprint("No corrected data are found! Please first correct data using MNN.", "red")
sys.exit(1)
cprint("Loading corrected data...", "green")
adata = pg.read_input('./mnn/scanpy_mnn_corrected.h5ad')
cprint("Correcting cell names...", "green")
adata.obs['cell_id'] = adata.obs.index.map(lambda s: s[:s.rfind('-')]).values
adata.obs.set_index('cell_id', inplace = True)
process_data(adata, method = 'mnn', output = "./mnn/scanpy_mnn_result")
def process_seurat():
cprint("For Seurat:", "red")
f_list = [f for f in os.listdir("./seurat") if f in ["matrix.mtx", "genes.txt", "barcodes.txt"]]
if len(f_list) != 3:
cprint("No corrected data are found! Please first correct data using Seurat CCA.", "red")
sys.exit(1)
cprint("Loading gene expression...", "green")
adata = read_mtx("./seurat/matrix.mtx")
cprint("Loading gene names...", "green")
df_genes = pd.read_csv("./seurat/genes.txt", header = None)
adata.var['index'] = df_genes[0].values
adata.var.set_index('index', inplace = True)
cprint("Loading barcode names...", "green")
df_barcodes = pd.read_csv("./seurat/barcodes.txt", header = None)
adata.obs['index'] = df_barcodes[0].values
adata.obs.set_index('index', inplace = True)
adata.obs['Channel'] = pd.Categorical(adata.obs.index.map(lambda s: s.split('-')[0]).values)
adata.uns['genome'] = 'GRCh38'
process_data(adata, method = 'seurat', output = "./seurat/seurat_result")
def process_combat():
cprint("For ComBat:", "red")
if not os.path.exists("./combat/scanpy_combat_corrected.h5ad"):
cprint("No corrected data are found! Please first correct data using ComBat.", "red")
sys.exit(1)
cprint("Loading corrected data...", "green")
adata = pg.read_input('./combat/scanpy_combat_corrected.h5ad')
process_data(adata, method = 'combat', output = "./combat/scanpy_combat_result")
def process_bbknn():
cprint("For BBKNN:", "red")
if not os.path.exists("./bbknn/scanpy_bbknn_corrected.h5ad"):
cprint("No corredted data are found! Please first correct data using BBKNN.", "red")
sys.exit(1)
cprint("loading corrected data...", "green")
adata = pg.read_input("./bbknn/scanpy_bbknn_corrected.h5ad")
adata.uns['pca_knn_indices'] = adata.uns['neighbors']['knn_indices'][:, 1:]
adata.uns['pca_knn_distances'] = adata.uns['neighbors']['knn_distances'][:, 1:]
cprint("Computing UMAP...", "green")
pg.umap(adata)
cprint("For UMAP coordinates:", "yellow")
process_data(adata, method = 'bbknn', processed = True)
pg.write_output(adata, "./bbknn/scanpy_bbknn_result")
def process_data(data, method, output = None, processed = False):
if not processed:
cprint("Calculating PCA and KNN...", "green")
pg.pca(data, features = "highly_variable_features" if "highly_variable_features" in data.var else None)
pg.neighbors(data, n_jobs = 8)
cprint("Computing UMAP...", "green")
pg.umap(data)
pg.write_output(data, output)
cprint("Calculating kBET measures on UMAP coordinates...", "green")
kbet_stat, kbet_pvalue, kbet_ac_rate = pg.calc_kBET(data, attr = 'Channel', rep = 'umap')
cprint("Mean statistics is {stat:.4f}; Mean p-value is {pvalue:.4f}; Mean accept rate is {rate:.4f}.".format(stat = kbet_stat, pvalue = kbet_pvalue, rate = kbet_ac_rate), "yellow")
cprint("Loading ground truth cell types...", "green")
df_celltype = pd.read_csv("ground_cell_type.txt")
assert np.sum(df_celltype['cell_id'] != data.obs.index.values) == 0
data.obs['cell_types'] = pd.Categorical(df_celltype['cell_types'].values)
data.obs['Cluster'] = pd.Categorical(df_celltype['louvain_labels'].values)
# Set Individual
if method == 'mnn':
data.obs['Individual'] = pd.Categorical(data.obs.index.map(lambda s: s.split('_')[0][8:]))
else:
data.obs['Individual'] = pd.Categorical(data.obs['Channel'].apply(lambda s: s.split('_')[0][8:]))
cprint("Calculating Mean Silhouette Score on UMAP coordinates...", "green")
sil_score = silhouette_score(data.obsm['X_umap'], data.obs['cell_types'])
cprint("Mean Silhouette Score on UMAP = {:.4f}.".format(sil_score), "yellow")
cprint("Calculating kSIM on UMAP coordinates...", "green")
ksim_mean, ksim_ac_rate = pg.calc_kSIM(data, attr = 'cell_types', rep = 'umap')
cprint("Mean kSIM = {mean:.4f}, with accept rate {rate:.4f}.".format(mean = ksim_mean, rate = ksim_ac_rate), "yellow")
measure_result.append((method_print_name[method], ksim_ac_rate, kbet_ac_rate))
cprint("Plotting UMAP for cells with cell types...", "green")
pg.write_output(data, "{}_compare".format(method))
if os.system('pegasus plot scatter --basis umap --attributes Cluster,Individual --set-palettes "{palettes}" {name}_compare.h5ad /output/Figure_S2{idx}.pdf'.format(name = method, idx = figure_index[method], palettes = palettes_cluster)):
sys.exit(1)
def plot_scatter(precomputed = False):
if precomputed:
df_measure = pd.read_csv(measure_precomputed_file)
else:
method_l = []
ksim_l = []
kbet_l = []
df_measure = pd.DataFrame({'method':[], 'kSIM':[], 'kBET':[]})
for (method, ksim, kbet) in measure_result:
method_l.append(method)
ksim_l.append(ksim)
kbet_l.append(kbet)
df_measure['method'] = method_l
df_measure['kSIM'] = ksim_l
df_measure['kBET'] = kbet_l
ax = sns.scatterplot(x = 'kSIM', y = 'kBET', hue = 'method', data = df_measure, legend = False)
for line in range(0, df_measure.shape[0]):
x_pos = df_measure.kSIM[line] + 0.003
if df_measure.method[line] in ['Baseline', 'ComBat']:
x_pos = df_measure.kSIM[line] - 0.003
y_pos = df_measure.kBET[line]
alignment = 'right' if df_measure.method[line] in ['Baseline', 'ComBat'] else 'left'
ax.text(x_pos, y_pos, df_measure.method[line], horizontalalignment = alignment, size = 'medium', color = 'black')
plt.xlabel('kSIM accept rate')
plt.ylabel('kBET accept rate')
plt.savefig("/output/Figure_2A.pdf")
plt.close()
if __name__ == "__main__":
method = sys.argv[1]
assert method in method_list or method == 'all' or method == 'plot'
if method == 'baseline' or method == 'all':
process_baseline()
if method == 'pegasus' or method == 'all':
process_pegasus()
if method == 'seurat' or method == 'all':
process_seurat()
if method == 'mnn' or method == 'all':
process_mnn()
if method == 'combat' or method == 'all':
process_combat()
if method == 'bbknn' or method == 'all':
process_bbknn()
if method == 'all' or method == 'plot':
precomputed = True if method == 'plot' else False
plot_scatter(precomputed)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"pegasus.neighbors",
"seaborn.scatterplot",
"sys.exit",
"pegasus.write_output",
"os.path.exists",
"os.listdir",
"pegasus.umap",
"matplotlib.pyplot.xlabel",
"pandas.Categorical",
"matplotlib.pyplot.close",
"pandas.DataFrame",
"pegasus.calc_kBET... | [((997, 1051), 'termcolor.cprint', 'cprint', (['"""For pegasus with no batch correction:"""', '"""red"""'], {}), "('For pegasus with no batch correction:', 'red')\n", (1003, 1051), False, 'from termcolor import cprint\n'), ((1230, 1274), 'termcolor.cprint', 'cprint', (['"""Loading processed data..."""', '"""green"""'], {}), "('Loading processed data...', 'green')\n", (1236, 1274), False, 'from termcolor import cprint\n'), ((1284, 1321), 'pegasus.read_input', 'pg.read_input', (['"""./ground/ground.h5ad"""'], {}), "('./ground/ground.h5ad')\n", (1297, 1321), True, 'import pegasus as pg\n'), ((1408, 1437), 'termcolor.cprint', 'cprint', (['"""For pegasus:"""', '"""red"""'], {}), "('For pegasus:', 'red')\n", (1414, 1437), False, 'from termcolor import cprint\n'), ((1603, 1647), 'termcolor.cprint', 'cprint', (['"""Loading corrected data..."""', '"""green"""'], {}), "('Loading corrected data...', 'green')\n", (1609, 1647), False, 'from termcolor import cprint\n'), ((1657, 1706), 'pegasus.read_input', 'pg.read_input', (['"""./pegasus/pegasus_corrected.h5ad"""'], {}), "('./pegasus/pegasus_corrected.h5ad')\n", (1670, 1706), True, 'import pegasus as pg\n'), ((1788, 1813), 'termcolor.cprint', 'cprint', (['"""For MNN:"""', '"""red"""'], {}), "('For MNN:', 'red')\n", (1794, 1813), False, 'from termcolor import cprint\n'), ((1974, 2018), 'termcolor.cprint', 'cprint', (['"""Loading corrected data..."""', '"""green"""'], {}), "('Loading corrected data...', 'green')\n", (1980, 2018), False, 'from termcolor import cprint\n'), ((2028, 2076), 'pegasus.read_input', 'pg.read_input', (['"""./mnn/scanpy_mnn_corrected.h5ad"""'], {}), "('./mnn/scanpy_mnn_corrected.h5ad')\n", (2041, 2076), True, 'import pegasus as pg\n'), ((2079, 2122), 'termcolor.cprint', 'cprint', (['"""Correcting cell names..."""', '"""green"""'], {}), "('Correcting cell names...', 'green')\n", (2085, 2122), False, 'from termcolor import cprint\n'), ((2348, 2376), 'termcolor.cprint', 'cprint', (['"""For Seurat:"""', '"""red"""'], {}), "('For Seurat:', 'red')\n", (2354, 2376), False, 'from termcolor import cprint\n'), ((2605, 2650), 'termcolor.cprint', 'cprint', (['"""Loading gene expression..."""', '"""green"""'], {}), "('Loading gene expression...', 'green')\n", (2611, 2650), False, 'from termcolor import cprint\n'), ((2660, 2691), 'anndata.read_mtx', 'read_mtx', (['"""./seurat/matrix.mtx"""'], {}), "('./seurat/matrix.mtx')\n", (2668, 2691), False, 'from anndata import read_mtx\n'), ((2694, 2734), 'termcolor.cprint', 'cprint', (['"""Loading gene names..."""', '"""green"""'], {}), "('Loading gene names...', 'green')\n", (2700, 2734), False, 'from termcolor import cprint\n'), ((2747, 2793), 'pandas.read_csv', 'pd.read_csv', (['"""./seurat/genes.txt"""'], {'header': 'None'}), "('./seurat/genes.txt', header=None)\n", (2758, 2793), True, 'import pandas as pd\n'), ((2885, 2928), 'termcolor.cprint', 'cprint', (['"""Loading barcode names..."""', '"""green"""'], {}), "('Loading barcode names...', 'green')\n", (2891, 2928), False, 'from termcolor import cprint\n'), ((2944, 2993), 'pandas.read_csv', 'pd.read_csv', (['"""./seurat/barcodes.txt"""'], {'header': 'None'}), "('./seurat/barcodes.txt', header=None)\n", (2955, 2993), True, 'import pandas as pd\n'), ((3312, 3340), 'termcolor.cprint', 'cprint', (['"""For ComBat:"""', '"""red"""'], {}), "('For ComBat:', 'red')\n", (3318, 3340), False, 'from termcolor import cprint\n'), ((3510, 3554), 'termcolor.cprint', 'cprint', (['"""Loading corrected data..."""', '"""green"""'], {}), "('Loading corrected data...', 'green')\n", (3516, 3554), False, 'from termcolor import cprint\n'), ((3564, 3618), 'pegasus.read_input', 'pg.read_input', (['"""./combat/scanpy_combat_corrected.h5ad"""'], {}), "('./combat/scanpy_combat_corrected.h5ad')\n", (3577, 3618), True, 'import pegasus as pg\n'), ((3725, 3752), 'termcolor.cprint', 'cprint', (['"""For BBKNN:"""', '"""red"""'], {}), "('For BBKNN:', 'red')\n", (3731, 3752), False, 'from termcolor import cprint\n'), ((3919, 3963), 'termcolor.cprint', 'cprint', (['"""loading corrected data..."""', '"""green"""'], {}), "('loading corrected data...', 'green')\n", (3925, 3963), False, 'from termcolor import cprint\n'), ((3973, 4025), 'pegasus.read_input', 'pg.read_input', (['"""./bbknn/scanpy_bbknn_corrected.h5ad"""'], {}), "('./bbknn/scanpy_bbknn_corrected.h5ad')\n", (3986, 4025), True, 'import pegasus as pg\n'), ((4186, 4222), 'termcolor.cprint', 'cprint', (['"""Computing UMAP..."""', '"""green"""'], {}), "('Computing UMAP...', 'green')\n", (4192, 4222), False, 'from termcolor import cprint\n'), ((4224, 4238), 'pegasus.umap', 'pg.umap', (['adata'], {}), '(adata)\n', (4231, 4238), True, 'import pegasus as pg\n'), ((4241, 4282), 'termcolor.cprint', 'cprint', (['"""For UMAP coordinates:"""', '"""yellow"""'], {}), "('For UMAP coordinates:', 'yellow')\n", (4247, 4282), False, 'from termcolor import cprint\n'), ((4342, 4395), 'pegasus.write_output', 'pg.write_output', (['adata', '"""./bbknn/scanpy_bbknn_result"""'], {}), "(adata, './bbknn/scanpy_bbknn_result')\n", (4357, 4395), True, 'import pegasus as pg\n'), ((4763, 4830), 'termcolor.cprint', 'cprint', (['"""Calculating kBET measures on UMAP coordinates..."""', '"""green"""'], {}), "('Calculating kBET measures on UMAP coordinates...', 'green')\n", (4769, 4830), False, 'from termcolor import cprint\n'), ((4871, 4917), 'pegasus.calc_kBET', 'pg.calc_kBET', (['data'], {'attr': '"""Channel"""', 'rep': '"""umap"""'}), "(data, attr='Channel', rep='umap')\n", (4883, 4917), True, 'import pegasus as pg\n'), ((5106, 5159), 'termcolor.cprint', 'cprint', (['"""Loading ground truth cell types..."""', '"""green"""'], {}), "('Loading ground truth cell types...', 'green')\n", (5112, 5159), False, 'from termcolor import cprint\n'), ((5175, 5210), 'pandas.read_csv', 'pd.read_csv', (['"""ground_cell_type.txt"""'], {}), "('ground_cell_type.txt')\n", (5186, 5210), True, 'import pandas as pd\n'), ((5306, 5354), 'pandas.Categorical', 'pd.Categorical', (["df_celltype['cell_types'].values"], {}), "(df_celltype['cell_types'].values)\n", (5320, 5354), True, 'import pandas as pd\n'), ((5378, 5430), 'pandas.Categorical', 'pd.Categorical', (["df_celltype['louvain_labels'].values"], {}), "(df_celltype['louvain_labels'].values)\n", (5392, 5430), True, 'import pandas as pd\n'), ((5673, 5748), 'termcolor.cprint', 'cprint', (['"""Calculating Mean Silhouette Score on UMAP coordinates..."""', '"""green"""'], {}), "('Calculating Mean Silhouette Score on UMAP coordinates...', 'green')\n", (5679, 5748), False, 'from termcolor import cprint\n'), ((5762, 5823), 'sklearn.metrics.silhouette_score', 'silhouette_score', (["data.obsm['X_umap']", "data.obs['cell_types']"], {}), "(data.obsm['X_umap'], data.obs['cell_types'])\n", (5778, 5823), False, 'from sklearn.metrics import silhouette_score\n'), ((5905, 5963), 'termcolor.cprint', 'cprint', (['"""Calculating kSIM on UMAP coordinates..."""', '"""green"""'], {}), "('Calculating kSIM on UMAP coordinates...', 'green')\n", (5911, 5963), False, 'from termcolor import cprint\n'), ((5991, 6040), 'pegasus.calc_kSIM', 'pg.calc_kSIM', (['data'], {'attr': '"""cell_types"""', 'rep': '"""umap"""'}), "(data, attr='cell_types', rep='umap')\n", (6003, 6040), True, 'import pegasus as pg\n'), ((6248, 6309), 'termcolor.cprint', 'cprint', (['"""Plotting UMAP for cells with cell types..."""', '"""green"""'], {}), "('Plotting UMAP for cells with cell types...', 'green')\n", (6254, 6309), False, 'from termcolor import cprint\n'), ((7062, 7147), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'x': '"""kSIM"""', 'y': '"""kBET"""', 'hue': '"""method"""', 'data': 'df_measure', 'legend': '(False)'}), "(x='kSIM', y='kBET', hue='method', data=df_measure, legend=False\n )\n", (7077, 7147), True, 'import seaborn as sns\n'), ((7570, 7600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""kSIM accept rate"""'], {}), "('kSIM accept rate')\n", (7580, 7600), True, 'import matplotlib.pyplot as plt\n'), ((7602, 7632), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""kBET accept rate"""'], {}), "('kBET accept rate')\n", (7612, 7632), True, 'import matplotlib.pyplot as plt\n'), ((7636, 7672), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/output/Figure_2A.pdf"""'], {}), "('/output/Figure_2A.pdf')\n", (7647, 7672), True, 'import matplotlib.pyplot as plt\n'), ((7674, 7685), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7683, 7685), True, 'import matplotlib.pyplot as plt\n'), ((1060, 1098), 'os.path.exists', 'os.path.exists', (['"""./ground/ground.h5ad"""'], {}), "('./ground/ground.h5ad')\n", (1074, 1098), False, 'import os, sys\n'), ((1102, 1223), 'termcolor.cprint', 'cprint', (['"""No processed data are found! Please first process data using pegasus without batch correction."""', '"""red"""'], {}), "(\n 'No processed data are found! Please first process data using pegasus without batch correction.'\n , 'red')\n", (1108, 1223), False, 'from termcolor import cprint\n'), ((1216, 1227), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1224, 1227), False, 'import os, sys\n'), ((1446, 1496), 'os.path.exists', 'os.path.exists', (['"""./pegasus/pegasus_corrected.h5ad"""'], {}), "('./pegasus/pegasus_corrected.h5ad')\n", (1460, 1496), False, 'import os, sys\n'), ((1500, 1590), 'termcolor.cprint', 'cprint', (['"""No corrected data are found! Please first correct data using pegasus."""', '"""red"""'], {}), "('No corrected data are found! Please first correct data using pegasus.',\n 'red')\n", (1506, 1590), False, 'from termcolor import cprint\n'), ((1589, 1600), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1597, 1600), False, 'import os, sys\n'), ((1822, 1871), 'os.path.exists', 'os.path.exists', (['"""./mnn/scanpy_mnn_corrected.h5ad"""'], {}), "('./mnn/scanpy_mnn_corrected.h5ad')\n", (1836, 1871), False, 'import os, sys\n'), ((1875, 1961), 'termcolor.cprint', 'cprint', (['"""No corrected data are found! Please first correct data using MNN."""', '"""red"""'], {}), "('No corrected data are found! Please first correct data using MNN.',\n 'red')\n", (1881, 1961), False, 'from termcolor import cprint\n'), ((1960, 1971), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1968, 1971), False, 'import os, sys\n'), ((2499, 2597), 'termcolor.cprint', 'cprint', (['"""No corrected data are found! Please first correct data using Seurat CCA."""', '"""red"""'], {}), "(\n 'No corrected data are found! Please first correct data using Seurat CCA.',\n 'red')\n", (2505, 2597), False, 'from termcolor import cprint\n'), ((2591, 2602), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2599, 2602), False, 'import os, sys\n'), ((3349, 3404), 'os.path.exists', 'os.path.exists', (['"""./combat/scanpy_combat_corrected.h5ad"""'], {}), "('./combat/scanpy_combat_corrected.h5ad')\n", (3363, 3404), False, 'import os, sys\n'), ((3408, 3497), 'termcolor.cprint', 'cprint', (['"""No corrected data are found! Please first correct data using ComBat."""', '"""red"""'], {}), "('No corrected data are found! Please first correct data using ComBat.',\n 'red')\n", (3414, 3497), False, 'from termcolor import cprint\n'), ((3496, 3507), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3504, 3507), False, 'import os, sys\n'), ((3761, 3814), 'os.path.exists', 'os.path.exists', (['"""./bbknn/scanpy_bbknn_corrected.h5ad"""'], {}), "('./bbknn/scanpy_bbknn_corrected.h5ad')\n", (3775, 3814), False, 'import os, sys\n'), ((3818, 3906), 'termcolor.cprint', 'cprint', (['"""No corredted data are found! Please first correct data using BBKNN."""', '"""red"""'], {}), "('No corredted data are found! Please first correct data using BBKNN.',\n 'red')\n", (3824, 3906), False, 'from termcolor import cprint\n'), ((3905, 3916), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3913, 3916), False, 'import os, sys\n'), ((4487, 4532), 'termcolor.cprint', 'cprint', (['"""Calculating PCA and KNN..."""', '"""green"""'], {}), "('Calculating PCA and KNN...', 'green')\n", (4493, 4532), False, 'from termcolor import cprint\n'), ((4535, 4641), 'pegasus.pca', 'pg.pca', (['data'], {'features': "('highly_variable_features' if 'highly_variable_features' in data.var else None\n )"}), "(data, features='highly_variable_features' if \n 'highly_variable_features' in data.var else None)\n", (4541, 4641), True, 'import pegasus as pg\n'), ((4641, 4669), 'pegasus.neighbors', 'pg.neighbors', (['data'], {'n_jobs': '(8)'}), '(data, n_jobs=8)\n', (4653, 4669), True, 'import pegasus as pg\n'), ((4675, 4711), 'termcolor.cprint', 'cprint', (['"""Computing UMAP..."""', '"""green"""'], {}), "('Computing UMAP...', 'green')\n", (4681, 4711), False, 'from termcolor import cprint\n'), ((4714, 4727), 'pegasus.umap', 'pg.umap', (['data'], {}), '(data)\n', (4721, 4727), True, 'import pegasus as pg\n'), ((4731, 4760), 'pegasus.write_output', 'pg.write_output', (['data', 'output'], {}), '(data, output)\n', (4746, 4760), True, 'import pegasus as pg\n'), ((5219, 5274), 'numpy.sum', 'np.sum', (["(df_celltype['cell_id'] != data.obs.index.values)"], {}), "(df_celltype['cell_id'] != data.obs.index.values)\n", (5225, 5274), True, 'import numpy as np\n'), ((6602, 6613), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6610, 6613), False, 'import os, sys\n'), ((6686, 6723), 'pandas.read_csv', 'pd.read_csv', (['measure_precomputed_file'], {}), '(measure_precomputed_file)\n', (6697, 6723), True, 'import pandas as pd\n'), ((6790, 6842), 'pandas.DataFrame', 'pd.DataFrame', (["{'method': [], 'kSIM': [], 'kBET': []}"], {}), "({'method': [], 'kSIM': [], 'kBET': []})\n", (6802, 6842), True, 'import pandas as pd\n'), ((2399, 2421), 'os.listdir', 'os.listdir', (['"""./seurat"""'], {}), "('./seurat')\n", (2409, 2421), False, 'import os, sys\n')] |
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_der(x):
return x / (1 - x)
def relu(x):
if x > 0:
return x
else:
return 0
def relu_der(x):
if x > 0:
return 1
else:
return 0
def leaky_relu(x):
if x < 0:
return 0.01 * x
return x
def leaky_relu_der(x):
if x < 0:
return 0.01
return 1
def swish(x):
return x / (1 + np.exp(-x))
def swish_der(x):
return np.exp(x) * (np.exp(x) + x + 1) / ((np.exp(x) + 1)**2)
def identity(x):
return x | [
"numpy.exp"
] | [((56, 66), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (62, 66), True, 'import numpy as np\n'), ((437, 447), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (443, 447), True, 'import numpy as np\n'), ((479, 488), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (485, 488), True, 'import numpy as np\n'), ((515, 524), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (521, 524), True, 'import numpy as np\n'), ((492, 501), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (498, 501), True, 'import numpy as np\n')] |
#! /usr/bin/env python
from distutils.core import Extension, setup
from distutils import sysconfig
import numpy
try:
numpy_include = numpy.get_include()
except:
numpy_include = numpy.get_numpy_include()
# inplace extension module
_inplace = Extension("_inplace",
["inplace.i", "inplace.c"],
include_dirs=[numpy_include],
)
setup( name="inplace function",
description="inplace takes a double array and doubles each of its elements in-place.",
author="<NAME>",
version="1.0",
ext_modules=[_inplace])
| [
"distutils.core.Extension",
"numpy.get_include",
"numpy.get_numpy_include",
"distutils.core.setup"
] | [((253, 332), 'distutils.core.Extension', 'Extension', (['"""_inplace"""', "['inplace.i', 'inplace.c']"], {'include_dirs': '[numpy_include]'}), "('_inplace', ['inplace.i', 'inplace.c'], include_dirs=[numpy_include])\n", (262, 332), False, 'from distutils.core import Extension, setup\n'), ((348, 530), 'distutils.core.setup', 'setup', ([], {'name': '"""inplace function"""', 'description': '"""inplace takes a double array and doubles each of its elements in-place."""', 'author': '"""<NAME>"""', 'version': '"""1.0"""', 'ext_modules': '[_inplace]'}), "(name='inplace function', description=\n 'inplace takes a double array and doubles each of its elements in-place.',\n author='<NAME>', version='1.0', ext_modules=[_inplace])\n", (353, 530), False, 'from distutils.core import Extension, setup\n'), ((140, 159), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (157, 159), False, 'import numpy\n'), ((188, 213), 'numpy.get_numpy_include', 'numpy.get_numpy_include', ([], {}), '()\n', (211, 213), False, 'import numpy\n')] |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Window Transformers '''
from __future__ import print_function, division, absolute_import, unicode_literals
import io
import numpy as np
import pandas as pd
import re
import six
from PIL import Image
def _bgr2rgb(pil_image):
return Image.fromarray(np.asarray(pil_image)[:,:,::-1])
def bgr2rgb(data, columns=None):
'''
Convert BGR images to RGB
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
if hasattr(data, 'columns'):
if len(data):
if not columns:
columns = list(data.columns)
elif isinstance(columns, six.string_types):
columns = [columns]
for col in columns:
if Image.isImageType(data[col].iloc[0]):
data[col] = data[col].apply(_bgr2rgb)
return data
elif Image.isImageType(data):
return _bgr2rgb(data)
return data
def rgb2bgr(data, columns=None):
'''
Convert RGB images to BGR
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
return bgr2rgb(data, columns=columns)
def _bytes2image(data):
return Image.open(io.BytesIO(data))
def bytes2image(data, columns=None):
'''
Convert bytes to PIL.Image objects
Parameters
----------
data : PIL.Image or DataFrame
The image data
columns : string or list-of-strings, optional
If `data` is a DataFrame, this is the list of columns that
contain image data.
Returns
-------
:class:`PIL.Image`
If `data` is a :class:`PIL.Image`
:class:`pandas.DataFrame`
If `data` is a :class:`pandas.DataFrame`
'''
if hasattr(data, 'columns'):
if len(data):
if not columns:
columns = list(data.columns)
elif isinstance(columns, six.string_types):
columns = [columns]
for col in columns:
if isinstance(data[col].iloc[0], bytes):
data[col] = data[col].apply(_bytes2image)
return data
elif isinstance(data, six.binary_type):
return _bytes2image(data)
return data
| [
"numpy.asarray",
"io.BytesIO",
"PIL.Image.isImageType"
] | [((1799, 1822), 'PIL.Image.isImageType', 'Image.isImageType', (['data'], {}), '(data)\n', (1816, 1822), False, 'from PIL import Image\n'), ((2445, 2461), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (2455, 2461), False, 'import io\n'), ((883, 904), 'numpy.asarray', 'np.asarray', (['pil_image'], {}), '(pil_image)\n', (893, 904), True, 'import numpy as np\n'), ((1673, 1709), 'PIL.Image.isImageType', 'Image.isImageType', (['data[col].iloc[0]'], {}), '(data[col].iloc[0])\n', (1690, 1709), False, 'from PIL import Image\n')] |
from __future__ import division
import numpy as np
import math
def vectorAdd(v1, v2):
ans = (v1[0]+v2[0], v1[1]+v2[1], v1[2]+v2[2])
return ans
def vectorSum(vList):
ans = (0, 0, 0)
for v in vList:
ans = vectorAdd(ans, v)
return ans
def vectorCross(v1, v2):
v1 = list(v1)
v2 = list(v2)
ans = tuple(np.cross(v1,v2))
return ans
def vectorDot(v1, v2):
ans = v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
return ans
def vectorMultiplyC(v1, C):
ans = (v1[0]*C, v1[1]*C, v1[2]*C)
return ans
def vectorDividedC(v1, C):
ans = (float(v1[0])/C, float(v1[1])/C, float(v1[2])/C)
return ans
def pointsMean(pList):
sum_= vectorSum(pList)
ans = vectorDividedC(sum_, len(pList))
return ans
def pointsDistance(p1, p2):
vec = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
dis = math.sqrt(math.pow(vec[0], 2) +
math.pow(vec[1], 2) +
math.pow(vec[2], 2) )
return dis
def pointsDirection(p1, p2):
vec = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
scalar = float(np.linalg.norm(vec))
if not scalar==0:
ans = (vec[0]/scalar, vec[1]/scalar, vec[2]/scalar)
else:
ans = (vec[0], vec[1], vec[2])
return ans
def pointsDirectionPow(p1, p2, pow_):
vec = [p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2]]
ans = (math.pow(vec[0],pow_), math.pow(vec[1],pow_),
math.pow(vec[2],pow_))
return ans
def pointsNormal(c, p1, p2):
vec1 = pointsDirection(c, p1)
vec2 = pointsDirection(c, p2)
normal = vectorCross(vec1, vec2)
return normal
def pointsSample(p1, p2, rate):
ans = [p1]
vec = pointsDirectionPow(p1, p2, 1)
step = vectorDividedC(vec, rate)
for i in range(1, rate):
xyz = vectorAdd(p1, vectorMultiplyC(step, i))
ans.append(xyz)
ans.append(p2)
return ans
def planeEquation(normal, p):
d = -vectorDot(normal, p)
equation = normal + (d,)
return equation
def vectorPlaneHit(vec, plane):
normal = (plane[0], plane[1], plane[2])
nv = vectorDot(normal, vec)
d = plane[3]
if nv == 0:
return None
t = -d / nv
if t < 0:
return None
point = vectorMultiplyC(vec, t)
return point
def normal2color(normal):
vec = vectorMultiplyC(normal, -0.5)
color = vectorAdd(vec, (0.5,0.5,0.5))
return color
| [
"math.pow",
"numpy.cross",
"numpy.linalg.norm"
] | [((363, 379), 'numpy.cross', 'np.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (371, 379), True, 'import numpy as np\n'), ((1151, 1170), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (1165, 1170), True, 'import numpy as np\n'), ((1427, 1449), 'math.pow', 'math.pow', (['vec[0]', 'pow_'], {}), '(vec[0], pow_)\n', (1435, 1449), False, 'import math\n'), ((1450, 1472), 'math.pow', 'math.pow', (['vec[1]', 'pow_'], {}), '(vec[1], pow_)\n', (1458, 1472), False, 'import math\n'), ((1487, 1509), 'math.pow', 'math.pow', (['vec[2]', 'pow_'], {}), '(vec[2], pow_)\n', (1495, 1509), False, 'import math\n'), ((1008, 1027), 'math.pow', 'math.pow', (['vec[2]', '(2)'], {}), '(vec[2], 2)\n', (1016, 1027), False, 'import math\n'), ((920, 939), 'math.pow', 'math.pow', (['vec[0]', '(2)'], {}), '(vec[0], 2)\n', (928, 939), False, 'import math\n'), ((964, 983), 'math.pow', 'math.pow', (['vec[1]', '(2)'], {}), '(vec[1], 2)\n', (972, 983), False, 'import math\n')] |
import requests as rt
import pandas as pd
import numpy as np
import PyPDF2 as p2
from pathlib import Path
def trans_csv(path):
#Recebe o caminho do arquivo csv e retorna uma array do tipo string. Leitura do arquivo csv com os dados das escolas. Transforma a coluna COD_ESC em uma unida Array do tipo string.
data = pd.read_csv(path)
cod_n = np.array(data['COD_ESC'])
cod_s = []
for x in cod_n:
x = str(x)
if len(x) < 6:
cod_s.append('0'*(6-len(x)) + str(x))
else:
cod_s.append(x)
return cod_s
def download_pdf(ano, COD_ESC):
#Recebe ano como inteiro e COD_ESC como string para determinar o endereรงo do arquivo e definiรงรฃo do nome para ser salvo. Retorna o nome do arquivo.
ano = str(ano)
url = 'http://idesp.edunet.sp.gov.br/arquivos' + ano + '/' + COD_ESC + '.pdf'
end_pdf = ano + '_' + COD_ESC + '.pdf'
response = rt.get(url)
filename = Path(end_pdf)
filename.write_bytes(response.content)
print(url)
return end_pdf
def le_pdf(path):
#Recebe o caminho do arquivo pdf e retorna um dict(key=Nยบ, value=conteรบdo da pรกgina).
arq = open(path, "rb")
pdf = p2.PdfFileReader(arq)
pg_pdf = {}
for i in range(0, pdf.getNumPages()):
pg_pdf[i+1] = [pdf.getPage(i).extractText()]
arq.close()
return pg_pdf
def PD_2007(pg_pdf,pg):
dados = []
for i in range(0, len(pg_pdf[pg][0])):
if pg_pdf[pg][0][i] == ',':
dados.append(float(pg_pdf[pg][0][i-1]+'.'+pg_pdf[pg][0][i+1]+pg_pdf[pg][0][i+2]))
return dados
def PD_novo(pg_pdf, pg):
#serve para 2013 para frente
dados = []
num_dados = []
start = 0
first = True
for i in range(0, len(pg_pdf[pg][0])):
if pg_pdf[pg][0][i] == '\n' and first:
dados.append(str(pg_pdf[pg][0][0:i]))
start = i+1
first = False
elif pg_pdf[pg][0][i] == '\n':
dados.append(str(pg_pdf[pg][0][start:i]))
start = i+1
for i in range(0,len(dados)):
num_dados.append(float(dados[i].replace(',','.')))
return num_dados
| [
"pandas.read_csv",
"pathlib.Path",
"requests.get",
"numpy.array",
"PyPDF2.PdfFileReader"
] | [((321, 338), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (332, 338), True, 'import pandas as pd\n'), ((351, 376), 'numpy.array', 'np.array', (["data['COD_ESC']"], {}), "(data['COD_ESC'])\n", (359, 376), True, 'import numpy as np\n'), ((907, 918), 'requests.get', 'rt.get', (['url'], {}), '(url)\n', (913, 918), True, 'import requests as rt\n'), ((934, 947), 'pathlib.Path', 'Path', (['end_pdf'], {}), '(end_pdf)\n', (938, 947), False, 'from pathlib import Path\n'), ((1168, 1189), 'PyPDF2.PdfFileReader', 'p2.PdfFileReader', (['arq'], {}), '(arq)\n', (1184, 1189), True, 'import PyPDF2 as p2\n')] |
import numpy as np
from .detector import LISADetector
class NoiseCorrelationBase(object):
""" Noise correlation objects have methods to compute
noise PSD correlation matrices.
Do not use the bare class.
"""
def __init__(self, ndet):
self.ndet
def _get_corrmat(self, f):
raise NotImplementedError("Don't use the NoiseCorrelationBase class")
def get_corrmat(self, f):
""" Return covariance matrix as a function
of frequency.
Args:
f: array of `N_f` frequencies.
Returns:
array_like: array of shape `[N_f, N_d, N_d]`, \
where `N_d` is the number of detectors in \
the network, containing the correlation \
matrix for each input frequency.
"""
return self._get_corrmat(f)
class NoiseCorrelationConstant(NoiseCorrelationBase):
""" This describes constant correlation matrices.
Args:
corrmat: 2D array providing the constant covariance
matrix.
"""
def __init__(self, corrmat):
if not np.all(np.fabs(corrmat) <= 1):
raise ValueError("The input correlation matrix "
"has elements larger than 1")
if not np.ndim(corrmat) == 2:
raise ValueError("Correlation matrices should be 2D")
self.ndet = len(corrmat)
self.mat = corrmat
def _get_corrmat(self, f):
f_use = np.atleast_1d(f)
nf = len(f_use)
return np.tile(self.mat, (nf, 1)).reshape([nf,
self.ndet,
self.ndet])
class NoiseCorrelationConstantIdentity(NoiseCorrelationConstant):
""" This describes diagonal correlation matrices.
Args:
ndet: number of detectors in the network.
"""
def __init__(self, ndet):
self.ndet = ndet
self.mat = np.eye(self.ndet)
class NoiseCorrelationConstantR(NoiseCorrelationConstant):
""" This class implements correlation matrices that
have the same cross-correlation coefficient for all
pairs of different detector, which is also constant
in frequency.
Args:
ndet: number of detectors in the network.
r: pairwise correlation coefficient.
"""
def __init__(self, ndet, r):
self.ndet = ndet
self.mat = ((1-r)*np.eye(self.ndet) +
np.full([self.ndet, self.ndet], r))
class NoiseCorrelationFromFunctions(NoiseCorrelationBase):
""" This implements a correlation matrix that has
the same auto-correlation PSD for all detectors and
the same cross-correlation PSD for all pairs of
different detectors.
Args:
ndet: number of detectors in the network.
psd_auto: function of frequency returning the
detector noise auto-correlation.
psd_cross: function of frequency returning the
detector noise cross-correlation.
"""
def __init__(self, ndet, psd_auto, psd_cross):
self.ndet = ndet
self.psda = psd_auto
self.psdx = psd_cross
def _rho(self, f):
a = self.psda(f)
x = self.psdx(f)
return x/a
def _get_corrmat(self, f):
f_use = np.atleast_1d(f)
r = self._rho(f_use)
mat = np.zeros([len(f_use), self.ndet, self.ndet])
for i in range(self.ndet):
mat[:, i, i] = 1
for j in range(i+1, self.ndet):
mat[:, i, j] = r
mat[:, j, i] = r
return mat
class NoiseCorrelationLISA(NoiseCorrelationFromFunctions):
""" This implements the LISA noise correlation
matrix.
Args:
det: :class:`~schnell.LISADetector` object.
"""
def __init__(self, det):
self.ndet = 3
if not isinstance(det, LISADetector):
raise ValueError("`det` must be of type LISADetector")
self.psda = det.psd_A
self.psdx = det.psd_X
| [
"numpy.tile",
"numpy.eye",
"numpy.fabs",
"numpy.ndim",
"numpy.full",
"numpy.atleast_1d"
] | [((1459, 1475), 'numpy.atleast_1d', 'np.atleast_1d', (['f'], {}), '(f)\n', (1472, 1475), True, 'import numpy as np\n'), ((1945, 1962), 'numpy.eye', 'np.eye', (['self.ndet'], {}), '(self.ndet)\n', (1951, 1962), True, 'import numpy as np\n'), ((3277, 3293), 'numpy.atleast_1d', 'np.atleast_1d', (['f'], {}), '(f)\n', (3290, 3293), True, 'import numpy as np\n'), ((2448, 2482), 'numpy.full', 'np.full', (['[self.ndet, self.ndet]', 'r'], {}), '([self.ndet, self.ndet], r)\n', (2455, 2482), True, 'import numpy as np\n'), ((1262, 1278), 'numpy.ndim', 'np.ndim', (['corrmat'], {}), '(corrmat)\n', (1269, 1278), True, 'import numpy as np\n'), ((1515, 1541), 'numpy.tile', 'np.tile', (['self.mat', '(nf, 1)'], {}), '(self.mat, (nf, 1))\n', (1522, 1541), True, 'import numpy as np\n'), ((2408, 2425), 'numpy.eye', 'np.eye', (['self.ndet'], {}), '(self.ndet)\n', (2414, 2425), True, 'import numpy as np\n'), ((1103, 1119), 'numpy.fabs', 'np.fabs', (['corrmat'], {}), '(corrmat)\n', (1110, 1119), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from matplotlib import cm
s=20
X, y = load_iris(return_X_y=True)
X = X[:, [2, 3]]
f, ax = plt.subplots(figsize=(4, 2.2))
ax.set_xlim(0, 7)
ax.set_ylim(0, 2.7)
x_ = ax.set_xlabel('Petal length')
y_ = ax.set_ylabel('Petal width')
plt.savefig('images/iris_1.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([X[0, 0]], [X[0, 1]], c='k', s=s)
plt.savefig('images/iris_2.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([X[51, 0]], [X[51, 1]], c='k', s=s)
plt.savefig('images/iris_3.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter(X[:, 0], X[:, 1], c='k', s=s)
plt.savefig('images/iris_4.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
for i, name in enumerate(['Setosa', 'Versicolor', 'Virginica']):
loc = np.where(y == i)[0]
plt.scatter(X[loc, 0], X[loc, 1], s=s, label=name)
plt.legend()
plt.savefig('images/iris_5.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
rf = RandomForestClassifier().fit(X, y)
xc = [1, .5]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_6.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([xc[0]], [xc[1]], c='blue', marker='x', s=4*s)
plt.savefig('images/iris_7.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [4, 1.2]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_8.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([xc[0]], [xc[1]], c='orange', marker='x', s=4*s)
plt.savefig('images/iris_9.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [5, 2.2]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_10.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
plt.scatter([xc[0]], [xc[1]], c='green', marker='x', s=4*s)
plt.savefig('images/iris_11.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [2.5, .8]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_12.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [4.9, 1.6]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_13.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
xc = [6, .2]
x = np.array([[xc[0], xc[1]]])
plt.scatter([xc[0]], [xc[1]], c='k', marker='x', s=4*s)
plt.savefig('images/iris_14.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
rf = RandomForestClassifier().fit(X, y)
xx, yy = np.meshgrid(np.linspace(0, 7),
np.linspace(0, 2.7))
Z = rf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
colors = ['b', 'orange', 'green']
plt.contourf(xx, yy, Z, levels=2, alpha=0.3, colors=colors)
plt.savefig('images/iris_15.png', bbox_extra_artists=[x_, y_],
bbox_inches='tight', dpi=200)
| [
"sklearn.datasets.load_iris",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.savefig",
"numpy.where",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((181, 207), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (190, 207), False, 'from sklearn.datasets import load_iris\n'), ((234, 264), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 2.2)'}), '(figsize=(4, 2.2))\n', (246, 264), True, 'import matplotlib.pyplot as plt\n'), ((374, 470), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_1.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_1.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (385, 470), True, 'import matplotlib.pyplot as plt\n'), ((479, 524), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[X[0, 0]]', '[X[0, 1]]'], {'c': '"""k"""', 's': 's'}), "([X[0, 0]], [X[0, 1]], c='k', s=s)\n", (490, 524), True, 'import matplotlib.pyplot as plt\n'), ((525, 621), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_2.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_2.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (536, 621), True, 'import matplotlib.pyplot as plt\n'), ((630, 677), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[X[51, 0]]', '[X[51, 1]]'], {'c': '"""k"""', 's': 's'}), "([X[51, 0]], [X[51, 1]], c='k', s=s)\n", (641, 677), True, 'import matplotlib.pyplot as plt\n'), ((678, 774), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_3.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_3.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (689, 774), True, 'import matplotlib.pyplot as plt\n'), ((784, 825), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[:, 0]', 'X[:, 1]'], {'c': '"""k"""', 's': 's'}), "(X[:, 0], X[:, 1], c='k', s=s)\n", (795, 825), True, 'import matplotlib.pyplot as plt\n'), ((826, 922), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_4.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_4.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (837, 922), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1093), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1091, 1093), True, 'import matplotlib.pyplot as plt\n'), ((1094, 1190), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_5.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_5.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (1105, 1190), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1284), 'numpy.array', 'np.array', (['[[xc[0], xc[1]]]'], {}), '([[xc[0], xc[1]]])\n', (1266, 1284), True, 'import numpy as np\n'), ((1287, 1344), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""k"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='k', marker='x', s=4 * s)\n", (1298, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1343, 1439), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_6.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_6.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (1354, 1439), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1508), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""blue"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='blue', marker='x', s=4 * s)\n", (1459, 1508), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1603), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_7.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_7.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (1518, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1631, 1657), 'numpy.array', 'np.array', (['[[xc[0], xc[1]]]'], {}), '([[xc[0], xc[1]]])\n', (1639, 1657), True, 'import numpy as np\n'), ((1660, 1717), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""k"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='k', marker='x', s=4 * s)\n", (1671, 1717), True, 'import matplotlib.pyplot as plt\n'), ((1716, 1812), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_8.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_8.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (1727, 1812), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1883), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""orange"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='orange', marker='x', s=4 * s)\n", (1832, 1883), True, 'import matplotlib.pyplot as plt\n'), ((1882, 1978), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_9.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_9.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (1893, 1978), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2033), 'numpy.array', 'np.array', (['[[xc[0], xc[1]]]'], {}), '([[xc[0], xc[1]]])\n', (2015, 2033), True, 'import numpy as np\n'), ((2036, 2093), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""k"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='k', marker='x', s=4 * s)\n", (2047, 2093), True, 'import matplotlib.pyplot as plt\n'), ((2092, 2189), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_10.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_10.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (2103, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2198, 2259), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""green"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='green', marker='x', s=4 * s)\n", (2209, 2259), True, 'import matplotlib.pyplot as plt\n'), ((2258, 2355), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_11.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_11.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (2269, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2410), 'numpy.array', 'np.array', (['[[xc[0], xc[1]]]'], {}), '([[xc[0], xc[1]]])\n', (2392, 2410), True, 'import numpy as np\n'), ((2413, 2470), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""k"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='k', marker='x', s=4 * s)\n", (2424, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_12.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_12.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (2480, 2566), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2622), 'numpy.array', 'np.array', (['[[xc[0], xc[1]]]'], {}), '([[xc[0], xc[1]]])\n', (2604, 2622), True, 'import numpy as np\n'), ((2625, 2682), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""k"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='k', marker='x', s=4 * s)\n", (2636, 2682), True, 'import matplotlib.pyplot as plt\n'), ((2681, 2778), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_13.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_13.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (2692, 2778), True, 'import matplotlib.pyplot as plt\n'), ((2805, 2831), 'numpy.array', 'np.array', (['[[xc[0], xc[1]]]'], {}), '([[xc[0], xc[1]]])\n', (2813, 2831), True, 'import numpy as np\n'), ((2834, 2891), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[xc[0]]', '[xc[1]]'], {'c': '"""k"""', 'marker': '"""x"""', 's': '(4 * s)'}), "([xc[0]], [xc[1]], c='k', marker='x', s=4 * s)\n", (2845, 2891), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2987), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_14.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_14.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (2901, 2987), True, 'import matplotlib.pyplot as plt\n'), ((3223, 3282), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'levels': '(2)', 'alpha': '(0.3)', 'colors': 'colors'}), '(xx, yy, Z, levels=2, alpha=0.3, colors=colors)\n', (3235, 3282), True, 'import matplotlib.pyplot as plt\n'), ((3283, 3380), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/iris_15.png"""'], {'bbox_extra_artists': '[x_, y_]', 'bbox_inches': '"""tight"""', 'dpi': '(200)'}), "('images/iris_15.png', bbox_extra_artists=[x_, y_], bbox_inches=\n 'tight', dpi=200)\n", (3294, 3380), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1080), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[loc, 0]', 'X[loc, 1]'], {'s': 's', 'label': 'name'}), '(X[loc, 0], X[loc, 1], s=s, label=name)\n', (1041, 1080), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3075), 'numpy.linspace', 'np.linspace', (['(0)', '(7)'], {}), '(0, 7)\n', (3069, 3075), True, 'import numpy as np\n'), ((3098, 3117), 'numpy.linspace', 'np.linspace', (['(0)', '(2.7)'], {}), '(0, 2.7)\n', (3109, 3117), True, 'import numpy as np\n'), ((1006, 1022), 'numpy.where', 'np.where', (['(y == i)'], {}), '(y == i)\n', (1014, 1022), True, 'import numpy as np\n'), ((1205, 1229), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (1227, 1229), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3002, 3026), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (3024, 3026), False, 'from sklearn.ensemble import RandomForestClassifier\n')] |
# stdlib
from abc import ABCMeta, abstractmethod
from typing import Any, List, Tuple
# third party
import numpy as np
class Params(metaclass=ABCMeta):
def __init__(self, name: str, bounds: Tuple[Any, Any]) -> None:
self.name = name
self.bounds = bounds
@abstractmethod
def get(self) -> List[Any]:
...
@abstractmethod
def sample(self) -> Any:
...
class Categorical(Params):
def __init__(self, name: str, choices: List[Any]) -> None:
super().__init__(name, (min(choices), max(choices)))
self.name = name
self.choices = choices
def get(self) -> List[Any]:
return [self.name, self.choices]
def sample(self) -> Any:
return np.random.choice(self.choices, 1)[0]
class Float(Params):
def __init__(self, name: str, low: float, high: float) -> None:
low = float(low)
high = float(high)
super().__init__(name, (low, high))
self.name = name
self.low = low
self.high = high
def get(self) -> List[Any]:
return [self.name, self.low, self.high]
def sample(self) -> Any:
return np.random.uniform(self.low, self.high)
class Integer(Params):
def __init__(self, name: str, low: int, high: int, step: int = 1) -> None:
self.low = low
self.high = high
self.step = step
super().__init__(name, (low, high))
self.name = name
self.low = low
self.high = high
self.step = step
self.choices = [val for val in range(low, high + 1, step)]
def get(self) -> List[Any]:
return [self.name, self.low, self.high, self.step]
def sample(self) -> Any:
return np.random.choice(self.choices, 1)[0]
| [
"numpy.random.choice",
"numpy.random.uniform"
] | [((1155, 1193), 'numpy.random.uniform', 'np.random.uniform', (['self.low', 'self.high'], {}), '(self.low, self.high)\n', (1172, 1193), True, 'import numpy as np\n'), ((731, 764), 'numpy.random.choice', 'np.random.choice', (['self.choices', '(1)'], {}), '(self.choices, 1)\n', (747, 764), True, 'import numpy as np\n'), ((1718, 1751), 'numpy.random.choice', 'np.random.choice', (['self.choices', '(1)'], {}), '(self.choices, 1)\n', (1734, 1751), True, 'import numpy as np\n')] |
import math
import numpy as np
from scipy.spatial.transform import Rotation
"""
The rotations can of two types:
1. In a global frame of reference (also known as rotation w.r.t. fixed or extrinsic frame)
2. In a body-centred frame of reference (also known as rotation with respect to current frame of reference.
It is also referred as rotation w.r.t. intrinsic frame).
For more details on intrinsic and extrinsic frames refer: https://en.wikipedia.org/wiki/Euler_angles#Definition_by_intrinsic_rotations
Euler angles as ROLL-PITCH-YAW refer the following links:
* [TaitโBryan angles](https://en.wikipedia.org/wiki/Euler_angles#TaitโBryan_angles#Conventions)
* [Euler angls as YAW-PITCH-ROLL](https://en.wikipedia.org/wiki/Euler_angles#Conventions_2)
* [Rotation using Euler Angles](https://adipandas.github.io/posts/2020/02/euler-rotation/)
* [scipy: ``from_euler``](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.from_euler.html#scipy.spatial.transform.Rotation.from_euler)
* [scipy: ``as_euler``](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.as_euler.html#scipy.spatial.transform.Rotation.as_euler)
To get the angles as yaw-pitch-roll we calculate rotation with intrinsic frame of reference.
1. In intrinsic frame we start with `yaw` to go from inertial frame `0` to frame `1`.
2. Than do `pitch` in frame `1` to go from frame `1` to frame `2`.
3. Than do `roll` in frame `2` to go from frame `2` to body frame `3`.
"""
INTRINSIC_ROTATION = "ZYX"
EXTRINSIC_ROTATION = "xyz"
def add_gaussian_noise(vector, noise_mag):
"""
Add gaussian noise to the input vector.
:param vector: vector of n-dimensions
:type vector: numpy.ndarray
:param noise_mag: magnitude of gaussian noise to add to input vector
:type noise_mag: float
:return: vector of same dimensions as input vector
:rtype: numpy.ndarray
"""
vector = vector + np.random.randn(*vector.shape) * float(noise_mag)
return vector
def euler2quat_raw(rpy):
"""
Euler angles of roll, pitch, yaw in radians. Returns quaternion in scalar first format.
:param rpy: vector of (roll, pitch, yaw) with shape (3,)
:type rpy: numpy.ndarray
:return: quaternion as (w, x, y, z) with shape (4,)
:rtype: numpy.ndarray
"""
roll, pitch, yaw = rpy
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return np.array([w, x, y, z])
def quat2euler_raw(quat):
"""
Convert quaternion orientation to euler angles.
:param quat: quaternion as (w, x, y, z) with shape (4,)
:type quat: numpy.ndarray
:return: vector of (roll, pitch, yaw) with shape (3,)
:rtype: numpy.ndarray
"""
w, x, y, z = quat
sinr_cosp = 2.0 * (w * x + y * z)
cosr_cosp = 1.0 - 2.0 * (x * x + y * y)
roll = np.arctan2(sinr_cosp, cosr_cosp)
sinp = 2.0 * (w * y - z * x)
if abs(sinp) >= 1.0:
pitch = np.copysign(math.pi*0.5, sinp) # use 90 degrees if out of range
else:
pitch = np.arcsin(sinp)
siny_cosp = 2. * (w * z + x * y)
cosy_cosp = 1. - 2. * (y * y + z * z)
yaw = np.arctan2(siny_cosp, cosy_cosp)
return np.array([roll, pitch, yaw])
def quat2euler(quat, noise_mag=0):
"""
Convert quaternion to euler.
:param quat: quaternion in scalar first format
:type quat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: numpy array of euler angles as roll, pitch, yaw (x, y, z) in radians
:rtype: numpy.ndarray
"""
quat = np.roll(quat, -1) # convert to scalar last
rot = Rotation.from_quat(quat) # rotation object
euler_angles = rot.as_euler(INTRINSIC_ROTATION)
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rpy = euler_angles[::-1]
return rpy
def euler2quat(euler, noise_mag=0):
"""
Euler angles are transformed to corresponding quaternion.
:param euler: vector of euler angles with shape (3,) in the order of roll-pitch-yaw (XYZ) in radians
:type euler: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: quaternion vector in scalar first format with shape (4,)
:rtype: numpy.ndarray
"""
euler = np.array([euler[2], euler[1], euler[0]]) # convert to YAW-PITCH-ROLL
if noise_mag:
euler = add_gaussian_noise(euler, noise_mag)
rot = Rotation.from_euler(INTRINSIC_ROTATION, euler)
quat_scalar_last = rot.as_quat()
quat = np.roll(quat_scalar_last, 1)
return quat
def quat2rot(quat, noise_mag=0):
"""
Method to convert quaternion vector to 3x3 direction cosine matrix.
:param quat: quaternion (in scalar first format)
:type quat: numpy.ndarray
:param noise_mag: (float) magnitude of gaussian noise added to orientation along each axis in radians
:type noise_mag: float
:return: rotation matrix SO(3)
:rtype: numpy.ndarray
"""
quat = np.roll(quat, -1) # quaternion in scalar last format
rot = Rotation.from_quat(quat) # rotation object
euler_angles = rot.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler_angles) # yaw-pitch-roll
rot_mat = rot_.as_matrix() # direction cosine matrix 3x3
return rot_mat
def rot2quat(rot_mat, noise_mag=0):
"""
Method to convert rotation matrix (SO3) to quaternion
:param rot_mat: direction cosine matrix of 3x3 dimensions
:type rot_mat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise added to orientation along each axis in radians.
:type noise_mag: float
:return quat: quaternion (in scalar first format) with a shape (4,).
:rtype: numpy.ndarray
"""
rot = Rotation.from_matrix(rot_mat)
euler_angles = rot.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler_angles) # yaw-pitch-roll
quat_scalar_last = rot_.as_quat()
quat = np.roll(quat_scalar_last, 1)
return quat
def euler2rot(euler, noise_mag=0):
"""
Convert euler angles to rotation (direction cosine) matrix
:param euler: vector with shape (3,) including euler angles as (roll, pitch, yaw) in radians
:type euler: numpy.ndarray
:param noise_mag: magnitude of gaussian noise included in euler angle
:type noise_mag: float
:return: rotation matrix of shape (3, 3)
:rtype: numpy.ndarray
"""
euler = np.array([euler[2], euler[1], euler[0]]) # convert roll-pitch-yaw to yaw-pitch-roll
if noise_mag:
euler = add_gaussian_noise(euler, noise_mag)
rot_ = Rotation.from_euler(INTRINSIC_ROTATION, euler)
rot_mat = rot_.as_matrix()
return rot_mat
def rot2euler(rot_mat, noise_mag=0):
"""
Convert rotation matrix (SO3) to euler angles
:param rot_mat: rotation matrix of shape (3, 3)
:type rot_mat: numpy.ndarray
:param noise_mag: magnitude of gaussian noise included in euler angle
:type noise_mag: float
:return: euler angles as (roll, pitch, yaw) with shape (3,)
:rtype: numpy.ndarray
"""
rot_ = Rotation.from_matrix(rot_mat)
euler_angles = rot_.as_euler(INTRINSIC_ROTATION) # yaw-pitch-roll
if noise_mag:
euler_angles = add_gaussian_noise(euler_angles, noise_mag)
rpy = np.array([euler_angles[2], euler_angles[1], euler_angles[0]])
return rpy
def quat2euler_scipy(quat):
quat = np.roll(quat, shift=-1) # scalar last
rpy = Rotation.from_quat(quat).as_euler('xyz')
return rpy
def euler2quat_scipy(rpy):
quat = Rotation.from_euler('xyz', rpy).as_quat()
quat = np.roll(quat, shift=1) # scalar first
return quat
def rotmat_world2body_scipy(rpy):
rotmat = Rotation.from_euler('xyz', rpy).as_matrix()
return rotmat
def rotmat_pqr2euler_rate(rpy):
rotmat = np.array([
[1, np.sin(rpy[0])*np.tan(rpy[1]), np.cos(rpy[0])*np.tan(rpy[1])],
[0, np.cos(rpy[0]), -np.sin(rpy[1])],
[0, np.sin(rpy[0])/np.cos(rpy[1]), np.cos(rpy[0])/np.cos(rpy[1])]
])
return rotmat
def cross(a, b):
a_skew = np.array(
[0, -a[2], a[1]],
[a[2], 0, -a[0]],
[-a[1], a[0], 0]
)
return np.dot(a_skew, b)
| [
"numpy.roll",
"scipy.spatial.transform.Rotation.from_euler",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.tan",
"scipy.spatial.transform.Rotation.from_quat",
"numpy.arcsin",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.arctan2",
"numpy.cos",
"numpy.sin",
"math.sin",
"numpy.rand... | [((2362, 2381), 'math.cos', 'math.cos', (['(yaw * 0.5)'], {}), '(yaw * 0.5)\n', (2370, 2381), False, 'import math\n'), ((2391, 2410), 'math.sin', 'math.sin', (['(yaw * 0.5)'], {}), '(yaw * 0.5)\n', (2399, 2410), False, 'import math\n'), ((2420, 2441), 'math.cos', 'math.cos', (['(pitch * 0.5)'], {}), '(pitch * 0.5)\n', (2428, 2441), False, 'import math\n'), ((2451, 2472), 'math.sin', 'math.sin', (['(pitch * 0.5)'], {}), '(pitch * 0.5)\n', (2459, 2472), False, 'import math\n'), ((2482, 2502), 'math.cos', 'math.cos', (['(roll * 0.5)'], {}), '(roll * 0.5)\n', (2490, 2502), False, 'import math\n'), ((2512, 2532), 'math.sin', 'math.sin', (['(roll * 0.5)'], {}), '(roll * 0.5)\n', (2520, 2532), False, 'import math\n'), ((2689, 2711), 'numpy.array', 'np.array', (['[w, x, y, z]'], {}), '([w, x, y, z])\n', (2697, 2711), True, 'import numpy as np\n'), ((3100, 3132), 'numpy.arctan2', 'np.arctan2', (['sinr_cosp', 'cosr_cosp'], {}), '(sinr_cosp, cosr_cosp)\n', (3110, 3132), True, 'import numpy as np\n'), ((3408, 3440), 'numpy.arctan2', 'np.arctan2', (['siny_cosp', 'cosy_cosp'], {}), '(siny_cosp, cosy_cosp)\n', (3418, 3440), True, 'import numpy as np\n'), ((3453, 3481), 'numpy.array', 'np.array', (['[roll, pitch, yaw]'], {}), '([roll, pitch, yaw])\n', (3461, 3481), True, 'import numpy as np\n'), ((3895, 3912), 'numpy.roll', 'np.roll', (['quat', '(-1)'], {}), '(quat, -1)\n', (3902, 3912), True, 'import numpy as np\n'), ((3971, 3995), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (['quat'], {}), '(quat)\n', (3989, 3995), False, 'from scipy.spatial.transform import Rotation\n'), ((4703, 4743), 'numpy.array', 'np.array', (['[euler[2], euler[1], euler[0]]'], {}), '([euler[2], euler[1], euler[0]])\n', (4711, 4743), True, 'import numpy as np\n'), ((4870, 4916), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['INTRINSIC_ROTATION', 'euler'], {}), '(INTRINSIC_ROTATION, euler)\n', (4889, 4916), False, 'from scipy.spatial.transform import Rotation\n'), ((4965, 4993), 'numpy.roll', 'np.roll', (['quat_scalar_last', '(1)'], {}), '(quat_scalar_last, 1)\n', (4972, 4993), True, 'import numpy as np\n'), ((5424, 5441), 'numpy.roll', 'np.roll', (['quat', '(-1)'], {}), '(quat, -1)\n', (5431, 5441), True, 'import numpy as np\n'), ((5526, 5550), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (['quat'], {}), '(quat)\n', (5544, 5550), False, 'from scipy.spatial.transform import Rotation\n'), ((5785, 5838), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['INTRINSIC_ROTATION', 'euler_angles'], {}), '(INTRINSIC_ROTATION, euler_angles)\n', (5804, 5838), False, 'from scipy.spatial.transform import Rotation\n'), ((6424, 6453), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['rot_mat'], {}), '(rot_mat)\n', (6444, 6453), False, 'from scipy.spatial.transform import Rotation\n'), ((6653, 6706), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['INTRINSIC_ROTATION', 'euler_angles'], {}), '(INTRINSIC_ROTATION, euler_angles)\n', (6672, 6706), False, 'from scipy.spatial.transform import Rotation\n'), ((6793, 6821), 'numpy.roll', 'np.roll', (['quat_scalar_last', '(1)'], {}), '(quat_scalar_last, 1)\n', (6800, 6821), True, 'import numpy as np\n'), ((7269, 7309), 'numpy.array', 'np.array', (['[euler[2], euler[1], euler[0]]'], {}), '([euler[2], euler[1], euler[0]])\n', (7277, 7309), True, 'import numpy as np\n'), ((7439, 7485), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['INTRINSIC_ROTATION', 'euler'], {}), '(INTRINSIC_ROTATION, euler)\n', (7458, 7485), False, 'from scipy.spatial.transform import Rotation\n'), ((7929, 7958), 'scipy.spatial.transform.Rotation.from_matrix', 'Rotation.from_matrix', (['rot_mat'], {}), '(rot_mat)\n', (7949, 7958), False, 'from scipy.spatial.transform import Rotation\n'), ((8126, 8187), 'numpy.array', 'np.array', (['[euler_angles[2], euler_angles[1], euler_angles[0]]'], {}), '([euler_angles[2], euler_angles[1], euler_angles[0]])\n', (8134, 8187), True, 'import numpy as np\n'), ((8244, 8267), 'numpy.roll', 'np.roll', (['quat'], {'shift': '(-1)'}), '(quat, shift=-1)\n', (8251, 8267), True, 'import numpy as np\n'), ((8442, 8464), 'numpy.roll', 'np.roll', (['quat'], {'shift': '(1)'}), '(quat, shift=1)\n', (8449, 8464), True, 'import numpy as np\n'), ((8918, 8980), 'numpy.array', 'np.array', (['[0, -a[2], a[1]]', '[a[2], 0, -a[0]]', '[-a[1], a[0], 0]'], {}), '([0, -a[2], a[1]], [a[2], 0, -a[0]], [-a[1], a[0], 0])\n', (8926, 8980), True, 'import numpy as np\n'), ((9022, 9039), 'numpy.dot', 'np.dot', (['a_skew', 'b'], {}), '(a_skew, b)\n', (9028, 9039), True, 'import numpy as np\n'), ((3208, 3240), 'numpy.copysign', 'np.copysign', (['(math.pi * 0.5)', 'sinp'], {}), '(math.pi * 0.5, sinp)\n', (3219, 3240), True, 'import numpy as np\n'), ((3302, 3317), 'numpy.arcsin', 'np.arcsin', (['sinp'], {}), '(sinp)\n', (3311, 3317), True, 'import numpy as np\n'), ((1948, 1978), 'numpy.random.randn', 'np.random.randn', (['*vector.shape'], {}), '(*vector.shape)\n', (1963, 1978), True, 'import numpy as np\n'), ((8293, 8317), 'scipy.spatial.transform.Rotation.from_quat', 'Rotation.from_quat', (['quat'], {}), '(quat)\n', (8311, 8317), False, 'from scipy.spatial.transform import Rotation\n'), ((8389, 8420), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', 'rpy'], {}), "('xyz', rpy)\n", (8408, 8420), False, 'from scipy.spatial.transform import Rotation\n'), ((8546, 8577), 'scipy.spatial.transform.Rotation.from_euler', 'Rotation.from_euler', (['"""xyz"""', 'rpy'], {}), "('xyz', rpy)\n", (8565, 8577), False, 'from scipy.spatial.transform import Rotation\n'), ((8753, 8767), 'numpy.cos', 'np.cos', (['rpy[0]'], {}), '(rpy[0])\n', (8759, 8767), True, 'import numpy as np\n'), ((8678, 8692), 'numpy.sin', 'np.sin', (['rpy[0]'], {}), '(rpy[0])\n', (8684, 8692), True, 'import numpy as np\n'), ((8693, 8707), 'numpy.tan', 'np.tan', (['rpy[1]'], {}), '(rpy[1])\n', (8699, 8707), True, 'import numpy as np\n'), ((8709, 8723), 'numpy.cos', 'np.cos', (['rpy[0]'], {}), '(rpy[0])\n', (8715, 8723), True, 'import numpy as np\n'), ((8724, 8738), 'numpy.tan', 'np.tan', (['rpy[1]'], {}), '(rpy[1])\n', (8730, 8738), True, 'import numpy as np\n'), ((8770, 8784), 'numpy.sin', 'np.sin', (['rpy[1]'], {}), '(rpy[1])\n', (8776, 8784), True, 'import numpy as np\n'), ((8799, 8813), 'numpy.sin', 'np.sin', (['rpy[0]'], {}), '(rpy[0])\n', (8805, 8813), True, 'import numpy as np\n'), ((8814, 8828), 'numpy.cos', 'np.cos', (['rpy[1]'], {}), '(rpy[1])\n', (8820, 8828), True, 'import numpy as np\n'), ((8830, 8844), 'numpy.cos', 'np.cos', (['rpy[0]'], {}), '(rpy[0])\n', (8836, 8844), True, 'import numpy as np\n'), ((8845, 8859), 'numpy.cos', 'np.cos', (['rpy[1]'], {}), '(rpy[1])\n', (8851, 8859), True, 'import numpy as np\n')] |
"""
Provide some basic statistics on the get_valid_cpgs_dataset file before converting it to a data for the nn
"""
import argparse
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('seaborn')
sns.set_style('whitegrid')
# Change this when moving between different datasets
SC_PATIENTS = ["CRC01", "CRC10", "CRC11", "CRC13"]
BULK_PATIENTS = [""]
PATIENTS = SC_PATIENTS
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument('--cpg_file', help='Path to the cpg file', required=True)
parser.add_argument('--output_folder', help='Path of the output folder', required=False,
default=os.path.dirname(sys.argv[0]))
args = parser.parse_args()
return args
def print_basic_information(df):
"""
Provide some basic statisic about the CpG in the dataframe
:param df: The df to work on
"""
# Give some statistics on the data
num_of_pmd = df.groupby(["chromosome", "pmd_index"]).ngroups
num_of_unique_seq = len(df["sequence"].unique())
print("##########\nprinting information\n#######")
print("%s CpG passed PMD filter (%s PMDs). %s are unique seq" % (df.shape[0], num_of_pmd,
num_of_unique_seq))
solo = df[df["sequence"].str.count("CG") == 1]
solo_ss = solo[solo["seq4"].str.contains("[CG]CG[CG]", regex=True)]
solo_ww = solo[solo["seq4"].str.contains("[AT]CG[AT]", regex=True)]
print("We have %s(%.2f%%) solo CpG" % (solo.shape[0], solo.shape[0] / df.shape[0] * 100))
print("solo WCGW: %s(%.2f%%) and solo SCGS: %s(%.2f%%)" %
(solo_ww.shape[0], solo_ww.shape[0] / solo.shape[0] * 100,
solo_ss.shape[0], solo_ss.shape[0] / solo.shape[0] * 100))
for patient in PATIENTS:
label = "var%s" % patient
num_of_empty = np.sum(pd.isnull(df[label]))
print("CRC%s has %s(%s) cpg without data" % (patient, num_of_empty, num_of_empty / df.shape[0] * 100))
def plot_variance_histogram(df, output_folder):
"""
Plot histogram of variance
:param df: The df
:param output_folder: Output folder
"""
for patient in PATIENTS:
label = "var%s" % patient
df[label].hist()
plt.style.use('ggplot')
plt.title("Hist of variance in PMD for crc%s" % patient)
plt.xlabel("Variance value")
plt.ylabel("Amount")
plt.savefig(os.path.join(output_folder, "hist_variance_in_pmd_crc%s.png" % patient))
plt.close()
def plot_variance_histogram_vs_ww_ss(df, output_folder):
"""
Plot histogram of variance strong solo vs weak solo
:param df: The df
:param output_folder: Output folder
"""
solo = df[df["sequence"].str.count("CG") == 1]
solo_ss = solo[solo["seq4"].str.contains("[CG]CG[CG]", regex=True)]
solo_ww = solo[solo["seq4"].str.contains("[AT]CG[AT]", regex=True)]
for patient in PATIENTS:
label = "var%s" % patient
solo_ss[label].hist(label="SCGS")
solo_ww[label].hist(label="WCGW")
plt.style.use('ggplot')
plt.title("Hist of variance in PMD for crc%s SCGS vs WCGW" % patient)
plt.xlabel("Variance value")
plt.ylabel("Amount")
plt.legend()
plt.savefig(os.path.join(output_folder, "hist_variance_in_pmd_crc%s_ss_ww.png" % patient))
plt.close()
def plot_meth_density_violin(df, output_folder):
"""
Plot the methylation value as a violin plot for different density
:param df: The data frame
:param output_folder: Output folder
"""
for patient in PATIENTS:
meth_label = "meth%s" % patient
new_df = pd.DataFrame()
for i in range(1, 6):
if i != 5:
df.loc[df["sequence"].str.count("CG") == i, "#CpG"] = "%s" % i
else:
df.loc[df["sequence"].str.count("CG") >= i, "#CpG"] = ">%s" % i
new_df["Methylation level"] = df[meth_label]
new_df["#CpG"] = df["#CpG"]
sns.violinplot(y="Methylation level", x="#CpG", data=new_df, palette="muted", order=["1", "2", "3",
"4", ">5"])
plt.title("Methylation density for for CRC%s" % patient)
plt.savefig(os.path.join(output_folder, "density_meth_crc%s.png" % patient))
plt.close()
def plot_meth_density(df, output_folder):
"""
Plot the methylation density as a facotr of the cpg density
:param df: The data frame
:param output_folder: Output folder
"""
for patient in PATIENTS:
meth_label = "meth%s" % patient
for i in range(1, 6):
if i == 1:
sns.distplot(df[df["sequence"].str.count("CG") == i][meth_label], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="#CpG=%s (solo)" % (i - 1))
elif i != 5:
sns.distplot(df[df["sequence"].str.count("CG") == i][meth_label], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="#CpG=%s" % (i - 1))
else:
sns.distplot(df[df["sequence"].str.count("CG") >= i][meth_label], hist=False, kde=True,
kde_kws={'linewidth': 3}, label="#CpG>=%s" % (i - 1))
plt.title("Methylation density for CRC%s" % patient, fontsize=20)
plt.xlabel("Methylation Level", fontsize=16)
plt.grid(False)
plt.ylabel("Distribution", fontsize=16)
plt.legend()
plt.savefig(os.path.join(output_folder, "density_meth_crc%s.png" % patient))
plt.close()
def plot_meth_vs_var_jointplot(df, output_folder):
"""
Plot methylation vs variance of all patients as a jointplot
:param df: The df
:param output_folder: path for output folder
"""
for patient in PATIENTS:
patient_df = df[df["patient"] == patient]
var_label = "var"
meth_label = "meth"
plt.subplots_adjust(top=0.9)
sns_plot = sns.jointplot(x=patient_df[meth_label], y=patient_df[var_label], kind="kde")
sns_plot.fig.suptitle("Methylation vs Variance %s" % patient, fontsize=20)
sns_plot.set_axis_labels("Methylation", "Variance", fontsize=16)
sns_plot.savefig(os.path.join(output_folder, "dist_meth_vs_var_%s.png" % (patient)))
plt.close()
def remove_by_nc_methylation_info(df, nc_filter=0.6):
"""
Check how many CpG will be removed with nc filter
:param df: The df
:param nc_filter: the nc filter value
:return:
"""
for patient in PATIENTS:
nc_label = "nc_avg"
var_label = "var%s" % patient
patient_df = df[~pd.isnull(df[var_label])]
amount_of_cpg = patient_df.shape[0]
amount_of_cpg_with_nc = np.sum(patient_df[nc_label] >= nc_filter)
print("CRC%s using methylation normal cell filter>=%s will give %s%% cpg which is %s" %
(nc_filter, patient, amount_of_cpg_with_nc / amount_of_cpg * 100, amount_of_cpg))
def main():
args = parse_input()
df = pd.read_csv(args.cpg_file)
# df = pd.read_pickle(args.cpg_file)
# Remove empty cpg
# methylation_columns = df[["meth%s" % i for i in PATIENTS]]
# df = df[~pd.isnull(methylation_columns.min(axis=1))]
df = df[df["orig_meth_avg"] >= 0.7]
df["small_seq"] = df["sequence"].str[73:77]
# keep only solo
# solo_rows = df[df["sequence"].str.count("CG") == 1]
# just solo
# print_basic_information(df)
# print_basic_information(solo_after_nc_rows)
plot_meth_vs_var_jointplot(df, args.output_folder)
# plot_meth_density(df, args.output_folder)
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.subplots_adjust",
"pandas.isnull",
"matplotlib.pyplot.grid",
"argparse.ArgumentParser",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"matplotlib.pyplot.style.use",
"seaborn.set_style",
"matplotlib.pyplot.close",
"numpy.sum",
"o... | [((248, 272), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (261, 272), True, 'import matplotlib.pyplot as plt\n'), ((273, 299), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (286, 299), True, 'import seaborn as sns\n'), ((484, 509), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (507, 509), False, 'import argparse\n'), ((7149, 7175), 'pandas.read_csv', 'pd.read_csv', (['args.cpg_file'], {}), '(args.cpg_file)\n', (7160, 7175), True, 'import pandas as pd\n'), ((2313, 2336), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2326, 2336), True, 'import matplotlib.pyplot as plt\n'), ((2345, 2401), 'matplotlib.pyplot.title', 'plt.title', (["('Hist of variance in PMD for crc%s' % patient)"], {}), "('Hist of variance in PMD for crc%s' % patient)\n", (2354, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2438), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Variance value"""'], {}), "('Variance value')\n", (2420, 2438), True, 'import matplotlib.pyplot as plt\n'), ((2447, 2467), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amount"""'], {}), "('Amount')\n", (2457, 2467), True, 'import matplotlib.pyplot as plt\n'), ((2569, 2580), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2578, 2580), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3149), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (3139, 3149), True, 'import matplotlib.pyplot as plt\n'), ((3158, 3227), 'matplotlib.pyplot.title', 'plt.title', (["('Hist of variance in PMD for crc%s SCGS vs WCGW' % patient)"], {}), "('Hist of variance in PMD for crc%s SCGS vs WCGW' % patient)\n", (3167, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3236, 3264), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Variance value"""'], {}), "('Variance value')\n", (3246, 3264), True, 'import matplotlib.pyplot as plt\n'), ((3273, 3293), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amount"""'], {}), "('Amount')\n", (3283, 3293), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3314), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3312, 3314), True, 'import matplotlib.pyplot as plt\n'), ((3422, 3433), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3431, 3433), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3741), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3739, 3741), True, 'import pandas as pd\n'), ((4071, 4187), 'seaborn.violinplot', 'sns.violinplot', ([], {'y': '"""Methylation level"""', 'x': '"""#CpG"""', 'data': 'new_df', 'palette': '"""muted"""', 'order': "['1', '2', '3', '4', '>5']"}), "(y='Methylation level', x='#CpG', data=new_df, palette=\n 'muted', order=['1', '2', '3', '4', '>5'])\n", (4085, 4187), True, 'import seaborn as sns\n'), ((4284, 4340), 'matplotlib.pyplot.title', 'plt.title', (["('Methylation density for for CRC%s' % patient)"], {}), "('Methylation density for for CRC%s' % patient)\n", (4293, 4340), True, 'import matplotlib.pyplot as plt\n'), ((4434, 4445), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4443, 4445), True, 'import matplotlib.pyplot as plt\n'), ((5382, 5447), 'matplotlib.pyplot.title', 'plt.title', (["('Methylation density for CRC%s' % patient)"], {'fontsize': '(20)'}), "('Methylation density for CRC%s' % patient, fontsize=20)\n", (5391, 5447), True, 'import matplotlib.pyplot as plt\n'), ((5456, 5500), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Methylation Level"""'], {'fontsize': '(16)'}), "('Methylation Level', fontsize=16)\n", (5466, 5500), True, 'import matplotlib.pyplot as plt\n'), ((5509, 5524), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5517, 5524), True, 'import matplotlib.pyplot as plt\n'), ((5533, 5572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Distribution"""'], {'fontsize': '(16)'}), "('Distribution', fontsize=16)\n", (5543, 5572), True, 'import matplotlib.pyplot as plt\n'), ((5581, 5593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5591, 5593), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5699), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5697, 5699), True, 'import matplotlib.pyplot as plt\n'), ((6046, 6074), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.9)'}), '(top=0.9)\n', (6065, 6074), True, 'import matplotlib.pyplot as plt\n'), ((6094, 6170), 'seaborn.jointplot', 'sns.jointplot', ([], {'x': 'patient_df[meth_label]', 'y': 'patient_df[var_label]', 'kind': '"""kde"""'}), "(x=patient_df[meth_label], y=patient_df[var_label], kind='kde')\n", (6107, 6170), True, 'import seaborn as sns\n'), ((6429, 6440), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6438, 6440), True, 'import matplotlib.pyplot as plt\n'), ((6866, 6907), 'numpy.sum', 'np.sum', (['(patient_df[nc_label] >= nc_filter)'], {}), '(patient_df[nc_label] >= nc_filter)\n', (6872, 6907), True, 'import numpy as np\n'), ((717, 745), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (732, 745), False, 'import os\n'), ((1924, 1944), 'pandas.isnull', 'pd.isnull', (['df[label]'], {}), '(df[label])\n', (1933, 1944), True, 'import pandas as pd\n'), ((2488, 2559), 'os.path.join', 'os.path.join', (['output_folder', "('hist_variance_in_pmd_crc%s.png' % patient)"], {}), "(output_folder, 'hist_variance_in_pmd_crc%s.png' % patient)\n", (2500, 2559), False, 'import os\n'), ((3335, 3412), 'os.path.join', 'os.path.join', (['output_folder', "('hist_variance_in_pmd_crc%s_ss_ww.png' % patient)"], {}), "(output_folder, 'hist_variance_in_pmd_crc%s_ss_ww.png' % patient)\n", (3347, 3412), False, 'import os\n'), ((4361, 4424), 'os.path.join', 'os.path.join', (['output_folder', "('density_meth_crc%s.png' % patient)"], {}), "(output_folder, 'density_meth_crc%s.png' % patient)\n", (4373, 4424), False, 'import os\n'), ((5615, 5678), 'os.path.join', 'os.path.join', (['output_folder', "('density_meth_crc%s.png' % patient)"], {}), "(output_folder, 'density_meth_crc%s.png' % patient)\n", (5627, 5678), False, 'import os\n'), ((6353, 6417), 'os.path.join', 'os.path.join', (['output_folder', "('dist_meth_vs_var_%s.png' % patient)"], {}), "(output_folder, 'dist_meth_vs_var_%s.png' % patient)\n", (6365, 6417), False, 'import os\n'), ((6764, 6788), 'pandas.isnull', 'pd.isnull', (['df[var_label]'], {}), '(df[var_label])\n', (6773, 6788), True, 'import pandas as pd\n')] |
# map labels to shuffled images
import os
import yaml
import numpy as np
def div_labels(src_file, orig_batch_size, num_div):
'''
src_file, is a .npy file
orig_batch_size, original batch size
num_div, is number of batches to divide, can only be 2 or 4 for now
'''
labels = np.load(src_file)
labels = labels[:labels.size / orig_batch_size * orig_batch_size]
assert labels.size % orig_batch_size == 0
batch_size = orig_batch_size / num_div
if num_div == 2:
labels_0 = labels.reshape((-1, batch_size))[::num_div].reshape(-1)
labels_1 = labels.reshape((-1, batch_size))[1::num_div].reshape(-1)
# sanity check
for ind in range(labels.size / batch_size):
assert np.all(labels.reshape((-1, batch_size))[ind] ==
labels[batch_size * ind: batch_size * (ind + 1)])
labels_sub = labels_1 if ind % 2 else labels_0
ind_sub = ind / 2
assert np.all(labels[batch_size * ind: batch_size * (ind + 1)] ==
labels_sub[batch_size * ind_sub: batch_size * (ind_sub + 1)])
# sanity check finished
tar_file = src_file[:-4] + '_0.npy'
np.save(tar_file, labels_0)
tar_file = src_file[:-4] + '_1.npy'
np.save(tar_file, labels_1)
elif num_div == 4:
labels_00 = labels.reshape((-1, batch_size))[::num_div].reshape(-1)
labels_10 = labels.reshape((-1, batch_size))[1::num_div].reshape(-1)
labels_01 = labels.reshape((-1, batch_size))[2::num_div].reshape(-1)
labels_11 = labels.reshape((-1, batch_size))[3::num_div].reshape(-1)
tar_file = src_file[:-4] + '_00.npy'
np.save(tar_file, labels_00)
tar_file = src_file[:-4] + '_10.npy'
np.save(tar_file, labels_10)
tar_file = src_file[:-4] + '_01.npy'
np.save(tar_file, labels_01)
tar_file = src_file[:-4] + '_11.npy'
np.save(tar_file, labels_11)
# sanity check
dict_labels = {0: labels_00, 1: labels_10, 2: labels_01, 3: labels_11}
for ind in range(labels.size / batch_size):
assert np.all(labels.reshape((-1, batch_size))[ind] ==
labels[batch_size * ind: batch_size * (ind + 1)])
labels_sub = dict_labels[ind % 4]
ind_sub = ind / 4
assert np.all(labels[batch_size * ind: batch_size * (ind + 1)] ==
labels_sub[batch_size * ind_sub: batch_size * (ind_sub + 1)])
else:
NotImplementedError("num_sub_batch has to be 1, 2, or 4")
def save_train_labels(misc_dir, train_label_name):
### TRAIN LABELS ###
label_dict = {}
# read the labels from train.txt
with open(os.path.join(misc_dir, 'train.txt'), 'r') as text_labels:
lines = text_labels.readlines()
for line in lines:
filename, label = line.split()
filename = filename.split('/')[1]
label_dict[filename] = int(label)
# save the label npy file according to the shuffled filenames
train_filenames = np.load(os.path.join(misc_dir,
'shuffled_train_filenames.npy'))
final_labels = []
for train_filename in train_filenames:
key = train_filename.split('/')[-1]
final_labels.append(label_dict[key])
np.save(train_label_name, final_labels)
def save_val_labels(misc_dir, val_label_name):
### VALIDATION LABELS ###
with open(os.path.join(misc_dir, 'val.txt'), 'r') as text_labels:
lines = text_labels.readlines()
labels = []
for line in lines:
labels.append(int(line.split()[1]))
np.save(val_label_name, labels)
if __name__ == '__main__':
with open('paths.yaml', 'r') as f:
paths = yaml.load(f)
tar_root_dir = paths['tar_root_dir']
misc_dir = os.path.join(tar_root_dir, 'misc')
label_dir = os.path.join(tar_root_dir, 'labels')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
train_label_name = os.path.join(label_dir, 'train_labels.npy')
val_label_name = os.path.join(label_dir, 'val_labels.npy')
orig_batch_size = 256
save_val_labels(misc_dir, val_label_name)
save_train_labels(misc_dir, train_label_name)
num_div = 2
div_labels(train_label_name, orig_batch_size, num_div)
div_labels(val_label_name, orig_batch_size, num_div)
| [
"os.makedirs",
"os.path.join",
"yaml.load",
"os.path.isdir",
"numpy.all",
"numpy.load",
"numpy.save"
] | [((300, 317), 'numpy.load', 'np.load', (['src_file'], {}), '(src_file)\n', (307, 317), True, 'import numpy as np\n'), ((3350, 3389), 'numpy.save', 'np.save', (['train_label_name', 'final_labels'], {}), '(train_label_name, final_labels)\n', (3357, 3389), True, 'import numpy as np\n'), ((3666, 3697), 'numpy.save', 'np.save', (['val_label_name', 'labels'], {}), '(val_label_name, labels)\n', (3673, 3697), True, 'import numpy as np\n'), ((3852, 3886), 'os.path.join', 'os.path.join', (['tar_root_dir', '"""misc"""'], {}), "(tar_root_dir, 'misc')\n", (3864, 3886), False, 'import os\n'), ((3903, 3939), 'os.path.join', 'os.path.join', (['tar_root_dir', '"""labels"""'], {}), "(tar_root_dir, 'labels')\n", (3915, 3939), False, 'import os\n'), ((4031, 4074), 'os.path.join', 'os.path.join', (['label_dir', '"""train_labels.npy"""'], {}), "(label_dir, 'train_labels.npy')\n", (4043, 4074), False, 'import os\n'), ((4096, 4137), 'os.path.join', 'os.path.join', (['label_dir', '"""val_labels.npy"""'], {}), "(label_dir, 'val_labels.npy')\n", (4108, 4137), False, 'import os\n'), ((1214, 1241), 'numpy.save', 'np.save', (['tar_file', 'labels_0'], {}), '(tar_file, labels_0)\n', (1221, 1241), True, 'import numpy as np\n'), ((1295, 1322), 'numpy.save', 'np.save', (['tar_file', 'labels_1'], {}), '(tar_file, labels_1)\n', (1302, 1322), True, 'import numpy as np\n'), ((3092, 3146), 'os.path.join', 'os.path.join', (['misc_dir', '"""shuffled_train_filenames.npy"""'], {}), "(misc_dir, 'shuffled_train_filenames.npy')\n", (3104, 3146), False, 'import os\n'), ((3781, 3793), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (3790, 3793), False, 'import yaml\n'), ((3951, 3975), 'os.path.isdir', 'os.path.isdir', (['label_dir'], {}), '(label_dir)\n', (3964, 3975), False, 'import os\n'), ((3985, 4007), 'os.makedirs', 'os.makedirs', (['label_dir'], {}), '(label_dir)\n', (3996, 4007), False, 'import os\n'), ((982, 1105), 'numpy.all', 'np.all', (['(labels[batch_size * ind:batch_size * (ind + 1)] == labels_sub[batch_size *\n ind_sub:batch_size * (ind_sub + 1)])'], {}), '(labels[batch_size * ind:batch_size * (ind + 1)] == labels_sub[\n batch_size * ind_sub:batch_size * (ind_sub + 1)])\n', (988, 1105), True, 'import numpy as np\n'), ((1709, 1737), 'numpy.save', 'np.save', (['tar_file', 'labels_00'], {}), '(tar_file, labels_00)\n', (1716, 1737), True, 'import numpy as np\n'), ((1791, 1819), 'numpy.save', 'np.save', (['tar_file', 'labels_10'], {}), '(tar_file, labels_10)\n', (1798, 1819), True, 'import numpy as np\n'), ((1873, 1901), 'numpy.save', 'np.save', (['tar_file', 'labels_01'], {}), '(tar_file, labels_01)\n', (1880, 1901), True, 'import numpy as np\n'), ((1955, 1983), 'numpy.save', 'np.save', (['tar_file', 'labels_11'], {}), '(tar_file, labels_11)\n', (1962, 1983), True, 'import numpy as np\n'), ((2751, 2786), 'os.path.join', 'os.path.join', (['misc_dir', '"""train.txt"""'], {}), "(misc_dir, 'train.txt')\n", (2763, 2786), False, 'import os\n'), ((3483, 3516), 'os.path.join', 'os.path.join', (['misc_dir', '"""val.txt"""'], {}), "(misc_dir, 'val.txt')\n", (3495, 3516), False, 'import os\n'), ((2378, 2501), 'numpy.all', 'np.all', (['(labels[batch_size * ind:batch_size * (ind + 1)] == labels_sub[batch_size *\n ind_sub:batch_size * (ind_sub + 1)])'], {}), '(labels[batch_size * ind:batch_size * (ind + 1)] == labels_sub[\n batch_size * ind_sub:batch_size * (ind_sub + 1)])\n', (2384, 2501), True, 'import numpy as np\n')] |
"""
" License:
" -----------------------------------------------------------------------------
" Copyright (c) 2018, <NAME>.
" All rights reserved.
"
" Redistribution and use in source and binary forms, with or without
" modification, are permitted provided that the following conditions are met:
"
" 1. Redistributions of source code must retain the above copyright notice,
" this list of conditions and the following disclaimer.
"
" 2. Redistributions in binary form must reproduce the above copyright notice,
" this list of conditions and the following disclaimer in the documentation
" and/or other materials provided with the distribution.
"
" 3. Neither the name of the copyright holder nor the names of its contributors
" may be used to endorse or promote products derived from this software
" without specific prior written permission.
"
" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
" POSSIBILITY OF SUCH DAMAGE.
" -----------------------------------------------------------------------------
"
" Author: <NAME>, <EMAIL>
" Date: October 2018
"""
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from hdf5io.hdf5datasetwriter import HDF5DatasetWriter
class BuildTinyImageNetDataset:
def __init__(self, root_path):
self.global_train_path = os.path.join(root_path, 'train')
self.global_val_path = os.path.join(root_path, 'val', 'images')
self.global_output_path = os.path.join(root_path, 'hdf5Files')
self.val_mappings = os.path.join(root_path, 'val', 'val_annotations.txt')
# The wordnet IDs are used to search the words in the words txt file and thus join
# and create the data labels
self.global_wordnet_id = os.path.join(root_path, 'wnids.txt')
self.global_words = os.path.join(root_path, 'words.txt')
print("\n Starting to build TinyImageProject dataset for image classification...")
def configDataSet(self):
if not os.path.exists(self.global_output_path):
print('\n HDF5 output directory does not exist. Creating a new directory')
os.makedirs(self.global_output_path)
train_HDF5 = os.path.join(self.global_output_path, 'train.hdf5')
val_HDF5 = os.path.join(self.global_output_path, 'val.hdf5')
test_HDF5 = os.path.join(self.global_output_path, 'test.hdf5')
return train_HDF5, val_HDF5, test_HDF5
def buildDataSet(self):
# safety check to test whether files have already been built or not
# extract all the training paths from the subdirs
train_paths = [os.path.join(root, filename) for root, subdirs, files in os.walk(self.global_train_path)
for filename in files if filename.endswith(".JPEG")]
train_labels = [filepath.split(os.path.sep)[-3] for filepath in train_paths]
# convert training labels to unique integer values
le = LabelEncoder()
train_labels = le.fit_transform(train_labels)
# In TinyImageNet project, we don't have access to test data. Therefore, we split train data -> 10% for test
(train_paths, test_paths, train_labels, test_labels) = train_test_split(train_paths, train_labels,
test_size=0.1, stratify=train_labels,
random_state=20)
# Next we handle the validation paths creating the validation labels
val_contents = open(self.val_mappings).read().strip().split('\n')
val_contents = [line.split('\t')[:2] for line in val_contents]
val_paths = [os.path.join(self.global_val_path, line[0]) for line in val_contents]
val_labels = le.fit_transform([line[1] for line in val_contents])
# Now we have train, val and test paths and labels. Next building the datasets
(train_HDF5, val_HDF5, test_HDF5) = self.configDataSet()
train_val_test_dataset = [('train', train_paths, train_labels, train_HDF5),
('val', val_paths, val_labels, val_HDF5),
('test', test_paths, test_labels, test_HDF5)]
(RList, GList, BList) = ([], [], [])
for (usage, paths, labels, output_path) in train_val_test_dataset:
print('\n Building dataset {0}...'.format(output_path))
dswriter = HDF5DatasetWriter((len(paths), 64, 64, 3), outputPath=output_path)
for (i, path, label) in zip(tqdm(range(int(len(paths)))), paths, labels):
img = cv2.imread(path) # the image is read in BGR and not RGB order
if usage == 'train':
RList.append(np.mean(np.ravel(img[:, :, 2])))
GList.append(np.mean(np.ravel(img[:, :, 1])))
BList.append(np.mean(np.ravel(img[:, :, 0])))
dswriter.add([img], [label])
dswriter.close()
print('\n Finished building dataset {0}'.format(output_path))
print('[PROGRESS INFO: ] Extracting training data mean.')
rgb_mean = {'RMean': np.mean(RList), 'GMean': np.mean(GList), 'BMean': np.mean(BList)}
return rgb_mean
if __name__ == '__main__':
root_path = input("\n Please enter the root path: ")
buildTinyImageNet = BuildTinyImageNetDataset(root_path)
rgb_mean = buildTinyImageNet.buildDataSet()
| [
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"numpy.mean",
"os.makedirs",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.ravel",
"cv2.imread",
"os.walk"
] | [((2127, 2159), 'os.path.join', 'os.path.join', (['root_path', '"""train"""'], {}), "(root_path, 'train')\n", (2139, 2159), False, 'import os\n'), ((2191, 2231), 'os.path.join', 'os.path.join', (['root_path', '"""val"""', '"""images"""'], {}), "(root_path, 'val', 'images')\n", (2203, 2231), False, 'import os\n'), ((2266, 2302), 'os.path.join', 'os.path.join', (['root_path', '"""hdf5Files"""'], {}), "(root_path, 'hdf5Files')\n", (2278, 2302), False, 'import os\n'), ((2331, 2384), 'os.path.join', 'os.path.join', (['root_path', '"""val"""', '"""val_annotations.txt"""'], {}), "(root_path, 'val', 'val_annotations.txt')\n", (2343, 2384), False, 'import os\n'), ((2547, 2583), 'os.path.join', 'os.path.join', (['root_path', '"""wnids.txt"""'], {}), "(root_path, 'wnids.txt')\n", (2559, 2583), False, 'import os\n'), ((2612, 2648), 'os.path.join', 'os.path.join', (['root_path', '"""words.txt"""'], {}), "(root_path, 'words.txt')\n", (2624, 2648), False, 'import os\n'), ((2983, 3034), 'os.path.join', 'os.path.join', (['self.global_output_path', '"""train.hdf5"""'], {}), "(self.global_output_path, 'train.hdf5')\n", (2995, 3034), False, 'import os\n'), ((3054, 3103), 'os.path.join', 'os.path.join', (['self.global_output_path', '"""val.hdf5"""'], {}), "(self.global_output_path, 'val.hdf5')\n", (3066, 3103), False, 'import os\n'), ((3124, 3174), 'os.path.join', 'os.path.join', (['self.global_output_path', '"""test.hdf5"""'], {}), "(self.global_output_path, 'test.hdf5')\n", (3136, 3174), False, 'import os\n'), ((3733, 3747), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3745, 3747), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3983, 4086), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_paths', 'train_labels'], {'test_size': '(0.1)', 'stratify': 'train_labels', 'random_state': '(20)'}), '(train_paths, train_labels, test_size=0.1, stratify=\n train_labels, random_state=20)\n', (3999, 4086), False, 'from sklearn.model_selection import train_test_split\n'), ((2785, 2824), 'os.path.exists', 'os.path.exists', (['self.global_output_path'], {}), '(self.global_output_path)\n', (2799, 2824), False, 'import os\n'), ((2925, 2961), 'os.makedirs', 'os.makedirs', (['self.global_output_path'], {}), '(self.global_output_path)\n', (2936, 2961), False, 'import os\n'), ((3409, 3437), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (3421, 3437), False, 'import os\n'), ((4486, 4529), 'os.path.join', 'os.path.join', (['self.global_val_path', 'line[0]'], {}), '(self.global_val_path, line[0])\n', (4498, 4529), False, 'import os\n'), ((5954, 5968), 'numpy.mean', 'np.mean', (['RList'], {}), '(RList)\n', (5961, 5968), True, 'import numpy as np\n'), ((5979, 5993), 'numpy.mean', 'np.mean', (['GList'], {}), '(GList)\n', (5986, 5993), True, 'import numpy as np\n'), ((6004, 6018), 'numpy.mean', 'np.mean', (['BList'], {}), '(BList)\n', (6011, 6018), True, 'import numpy as np\n'), ((3466, 3497), 'os.walk', 'os.walk', (['self.global_train_path'], {}), '(self.global_train_path)\n', (3473, 3497), False, 'import os\n'), ((5412, 5428), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (5422, 5428), False, 'import cv2\n'), ((5553, 5575), 'numpy.ravel', 'np.ravel', (['img[:, :, 2]'], {}), '(img[:, :, 2])\n', (5561, 5575), True, 'import numpy as np\n'), ((5619, 5641), 'numpy.ravel', 'np.ravel', (['img[:, :, 1]'], {}), '(img[:, :, 1])\n', (5627, 5641), True, 'import numpy as np\n'), ((5685, 5707), 'numpy.ravel', 'np.ravel', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (5693, 5707), True, 'import numpy as np\n')] |
"""
File: pylinex/loglikelihood/NonlinearTruncationLoglikelihood.py
Author: <NAME>
Date: 29 Sep 2018
Description: File containing a class which represents a DIC-like loglikelihood
which uses the number of coefficients to use in each of a number
of bases as the parameters of the likelihood.
"""
import numpy as np
from distpy import Expression
from ..util import int_types, real_numerical_types, sequence_types,\
create_hdf5_dataset, get_hdf5_value
from ..basis import Basis, BasisSet
from ..fitter import Fitter
from ..model import TruncatedBasisHyperModel, CompositeModel
from .LoglikelihoodWithData import LoglikelihoodWithData
from .LoglikelihoodWithModel import LoglikelihoodWithModel
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class NonlinearTruncationLoglikelihood(LoglikelihoodWithModel):
"""
Class which represents a DIC-like loglikelihood which uses the number of
coefficients to use in each of a number of bases as the parameters of the
likelihood.
"""
def __init__(self, basis_set, data, error, expression,\
parameter_penalty=1):
"""
Initializes a new TruncationLoglikelihood with the given basis_sum,
data, and error.
basis_set: BasisSet objects containing basis with the largest number
of basis vectors allowed for each component
data: 1D data vector to fit
error: 1D vector of noise level estimates for data
expression: Expression object which forms full model from submodels.
The ith submodel (with i starting at 0) should be
represented by {i} in the expression string
parameter_penalty: the logL parameter penalty for adding a parameter in
any given model. Should be a non-negative constant.
It defaults to 1, which is the penalty used for the
Deviance Information Criterion (DIC)
"""
self.basis_set = basis_set
self.data = data
self.error = error
self.expression = expression
self.parameter_penalty = parameter_penalty
self.model =\
CompositeModel(self.expression, self.basis_set.names, self.models)
@property
def basis_set(self):
"""
Property storing the BasisSet object
"""
if not hasattr(self, '_basis_set'):
raise AttributeError("basis_set was referenced before it was set.")
return self._basis_set
@basis_set.setter
def basis_set(self, value):
"""
Setter for the basis_set object.
value: a BasisSet object
"""
if isinstance(value, BasisSet):
self._basis_set = value
else:
raise TypeError("basis_set was set to a non-BasisSet object.")
@property
def error(self):
"""
Property storing the error on the data given to this likelihood.
"""
if not hasattr(self, '_error'):
raise AttributeError("error referenced before it was set.")
return self._error
@error.setter
def error(self, value):
"""
Setter for the error used to define the likelihood.
value: must be a numpy.ndarray of the same shape as the data property
"""
value = np.array(value)
if value.shape == self.data.shape:
self._error = value
elif value.shape == (self.data.shape * 2):
self._error = value
else:
raise ValueError("error given was not the same shape as the data.")
@property
def expression(self):
"""
Property storing the Expression object which allows for the combination
of all of the sets of basis vectors.
"""
if not hasattr(self, '_expression'):
raise AttributeError("expression was referenced before it was " +\
"set.")
return self._expression
@expression.setter
def expression(self, value):
"""
Setter for the Expression object which allows for the combination of
all of the sets of basis vectors.
value: an Expression object which has as many arguments as the
basis_set has names.
"""
if isinstance(value, Expression):
if value.num_arguments == len(self.basis_set.names):
self._expression = value
else:
raise ValueError("expression had a different number of " +\
"arguments than the basis_set had sets of basis vectors.")
else:
raise TypeError("expression was set to a non-Expression object.")
@property
def parameter_penalty(self):
"""
Property storing the penalty imposed on the log-likelihood when an
extra parameter is included in any given model.
"""
if not hasattr(self, '_parameter_penalty'):
raise AttributeError("parameter_penalty was referenced before " +\
"it was set.")
return self._parameter_penalty
@parameter_penalty.setter
def parameter_penalty(self, value):
"""
Setter for the penalty assessed when an extra parameter is included in
any given model.
value: a non-negative number
"""
if type(value) in real_numerical_types:
if value >= 0:
self._parameter_penalty = value
else:
raise ValueError("parameter_penalty was set to a negative " +\
"number.")
else:
raise TypeError("parameter_penalty was set to a non-number.")
@property
def models(self):
"""
Property storing the underlying models which are combined into the
composite model.
"""
if not hasattr(self, '_models'):
self._models = [TruncatedBasisHyperModel(self.basis_set[name])\
for name in self.basis_set.names]
return self._models
def save_error(self, group, error_link=None):
"""
Saves the error of this Loglikelihood object.
group: hdf5 file group where information about this object is being
saved
error_link: link to where error is already saved somewhere (if it
exists)
"""
create_hdf5_dataset(group, 'error', data=self.error, link=error_link)
def fill_hdf5_group(self, group, data_link=None, error_link=None):
"""
Fills the given hdf5 group with information about this Loglikelihood.
group: the group to fill with information about this Loglikelihood
data_link: link like that returned by pylinex.h5py_extensions.HDF5Link
error_link: link like that returned by pylinex.h5py_extensions.HDF5Link
"""
group.attrs['class'] = 'NonlinearTruncationLoglikelihood'
self.save_data(group, data_link=data_link)
self.save_error(group, error_link=error_link)
self.basis_set.fill_hdf5_group(group.create_group('basis_set'))
self.expression.fill_hdf5_group(group.create_group('expression'))
group.attrs['parameter_penalty'] = self.parameter_penalty
@staticmethod
def load_error(group):
"""
Loads the error of a Loglikelihood object from the given group.
group: hdf5 file group where loglikelihood.save_error(group)
has previously been called
returns: error, an array
"""
return get_hdf5_value(group['error'])
@staticmethod
def load_from_hdf5_group(group):
"""
Loads a Loglikelihood object from an hdf5 file group in which it was
previously saved.
group: the hdf5 file group from which to load a Loglikelihood object
returns: the Loglikelihood object loaded from the given hdf5 file group
"""
try:
assert group.attrs['class'] == 'NonlinearTruncationLoglikelihood'
except:
raise ValueError("group doesn't appear to point to a " +\
"NonlinearTruncationLoglikelihood object.")
data = LoglikelihoodWithData.load_data(group)
error = NonlinearTruncationLoglikelihood.load_error(group)
basis_set = BasisSet.load_from_hdf5_group(group['basis_set'])
expression = Expression.load_from_hdf5_group(group['expression'])
parameter_penalty = group.attrs['parameter_penalty']
return NonlinearTruncationLoglikelihood(basis_set, data, error,\
expression, parameter_penalty=parameter_penalty)
@property
def weighting_matrix(self):
"""
Property storing the matrix to use for weighting if error is given as
2D array.
"""
if not hasattr(self, '_weighting_matrix'):
if self.error.ndim == 1:
raise AttributeError("The weighting_matrix property only " +\
"makes sense if the error given was a covariance matrix.")
else:
(eigenvalues, eigenvectors) = la.eigh(self.error)
eigenvalues = np.power(eigenvalues, -0.5)
self._weighting_matrix = np.dot(\
eigenvectors * eigenvalues[np.newaxis,:], eigenvectors.T)
return self._weighting_matrix
def weight(self, quantity):
"""
Meant to generalize weighting by the inverse square root of the
covariance matrix so that it is efficient when the error is 1D
quantity: quantity whose 0th axis is channel space which should be
weighted
returns: numpy.ndarray of same shape as quantity containing weighted
quantity
"""
if self.error.ndim == 1:
error_index =\
((slice(None),) + ((np.newaxis,) * (quantity.ndim - 1)))
return quantity / self.error[error_index]
elif quantity.ndim in [1, 2]:
return np.dot(self.weighting_matrix, quantity)
else:
quantity_shape = quantity.shape
quantity = np.reshape(quantity, (quantity_shape[0], -1))
quantity = np.dot(self.weighting_matrix, quantity)
return np.reshape(quantity, quantity_shape)
def weighted_bias(self, pars):
"""
Computes the weighted difference between the data and the model
evaluated at the given parameters.
pars: array of parameter values at which to evaluate the weighted_bias
returns: 1D numpy array of biases (same shape as data and error arrays)
"""
return self.weight(self.data - self.model(pars))
def __call__(self, pars, return_negative=False):
"""
Gets the value of the loglikelihood at the given parameters.
pars: the parameter values at which to evaluate the likelihood
return_negative: if true the negative of the loglikelihood is returned
(this is useful for times when the loglikelihood must
be maximized since scipy optimization functions only
deal with minimization
returns: the value of this Loglikelihood (or its negative if indicated)
"""
self.check_parameter_dimension(pars)
try:
logL_value =\
np.sum(np.abs(self.weighted_bias(pars)) ** 2) / (-2.) -\
(self.parameter_penalty * self.num_used_parameters(pars))
except (ValueError, ZeroDivisionError):
logL_value = -np.inf
if np.isnan(logL_value):
logL_value = -np.inf
if return_negative:
return -logL_value
else:
return logL_value
def chi_squared(self, parameters):
"""
Computes the (non-reduced) chi squared statistic. It should follow a
chi squared distribution with the correct number of degrees of freedom.
parameters: the parameter values at which to evaluate chi squared
returns: single number statistic equal to the negative of twice the
loglikelihood
"""
return ((-2.) * self(parameters, return_negative=False))
def num_used_parameters(self, parameters):
"""
Finds effective number of parameters given the given parameter vector.
parameters: parameter vector at which to find the number of effective
parameters
returns: integer number of effective parameters
"""
return sum([int(round(parameters[index])) for (index, name) in\
enumerate(self.parameters) if ('nterms' in name)])
def chi_squared_z_score(self, parameters):
"""
Computes the z-score of the chi squared value computed at the given
parameters.
parameters: the parameter values at which to evaluate chi squared
returns: single value which should be roughly Gaussian with mean 0 and
stdv 1 if degrees_of_freedom is very large.
"""
degrees_of_freedom =\
self.num_channels - self.num_used_parameters(parameters)
return (self.chi_squared(parameters) - degrees_of_freedom) /\
np.sqrt(2 * degrees_of_freedom)
def reduced_chi_squared(self, parameters):
"""
Computes the reduced chi squared statistic. It should follow a
chi2_reduced distribution with the correct number of degrees of
freedom.
pars: the parameter values at which to evaluate the likelihood
returns: single number statistic proportional to the value of this
GaussianLoglikelihood object (since additive constant
corresponding to normalization constant is not included)
"""
degrees_of_freedom =\
self.num_channels - self.num_used_parameters(parameters)
return self.chi_squared(parameters) / degrees_of_freedom
@property
def gradient_computable(self):
"""
Returns False because NonlinearTruncationLoglikelihood has some
discrete and some continuous parameters.
"""
return False
def auto_gradient(self, *args, **kwargs):
"""
Cannot compute the gradient of NonlinearTruncationLoglikelihood objects
because they have some discrete parameters.
"""
raise NotImplementedError("gradient cannot be computed for " +\
"NonlinearTruncationLoglikelihood because some parameters are " +\
"discrete.")
@property
def hessian_computable(self):
"""
Returns False because NonlinearTruncationLoglikelihood has some
discrete and some continuous parameters.
"""
return False
def auto_hessian(self, *args, **kwargs):
"""
Cannot compute the hessian of NonlinearTruncationLoglikelihood objects
because they have some discrete parameters.
"""
raise NotImplementedError("hessian cannot be computed for " +\
"NonlinearTruncationLoglikelihood because some parameters are " +\
"discrete.")
def __eq__(self, other):
"""
Checks if self is equal to other.
other: a Loglikelihood object to check for equality
returns: True if other and self have the same properties
"""
if not isinstance(other, NonlinearTruncationLoglikelihood):
return False
if self.basis_set != other.basis_set:
return False
if not np.allclose(self.data, other.data):
return False
if not np.allclose(self.error, other.error):
return False
if self.expression != other.expression:
return False
return (self.parameter_penalty == other.parameter_penalty)
def change_data(self, new_data):
"""
Finds the NonlinearTruncationLoglikelihood with a different data vector
with everything else kept constant.
new_data: data to use for new NonlinearTruncationLoglikelihood object
returns: a new NonlinearTruncationLoglikelihood with the given data
property
"""
return NonlinearTruncationLoglikelihood(self.basis_set, new_data,\
self.error, self.expression,\
parameter_penalty=self.parameter_penalty)
def change_model(self, new_model):
"""
This function is not implemented for the
NonlinearTruncationLoglikelihood class.
"""
raise NotImplementedError("The NonlinearTruncationLoglikelihood " +\
"class does not implement the change_model class because the " +\
"model is internally defined.")
| [
"distpy.Expression.load_from_hdf5_group",
"numpy.allclose",
"numpy.sqrt",
"numpy.reshape",
"numpy.power",
"numpy.array",
"numpy.dot",
"numpy.isnan"
] | [((3533, 3548), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (3541, 3548), True, 'import numpy as np\n'), ((8652, 8704), 'distpy.Expression.load_from_hdf5_group', 'Expression.load_from_hdf5_group', (["group['expression']"], {}), "(group['expression'])\n", (8683, 8704), False, 'from distpy import Expression\n'), ((11921, 11941), 'numpy.isnan', 'np.isnan', (['logL_value'], {}), '(logL_value)\n', (11929, 11941), True, 'import numpy as np\n'), ((13626, 13657), 'numpy.sqrt', 'np.sqrt', (['(2 * degrees_of_freedom)'], {}), '(2 * degrees_of_freedom)\n', (13633, 13657), True, 'import numpy as np\n'), ((15991, 16025), 'numpy.allclose', 'np.allclose', (['self.data', 'other.data'], {}), '(self.data, other.data)\n', (16002, 16025), True, 'import numpy as np\n'), ((16067, 16103), 'numpy.allclose', 'np.allclose', (['self.error', 'other.error'], {}), '(self.error, other.error)\n', (16078, 16103), True, 'import numpy as np\n'), ((9430, 9457), 'numpy.power', 'np.power', (['eigenvalues', '(-0.5)'], {}), '(eigenvalues, -0.5)\n', (9438, 9457), True, 'import numpy as np\n'), ((9499, 9564), 'numpy.dot', 'np.dot', (['(eigenvectors * eigenvalues[np.newaxis, :])', 'eigenvectors.T'], {}), '(eigenvectors * eigenvalues[np.newaxis, :], eigenvectors.T)\n', (9505, 9564), True, 'import numpy as np\n'), ((10295, 10334), 'numpy.dot', 'np.dot', (['self.weighting_matrix', 'quantity'], {}), '(self.weighting_matrix, quantity)\n', (10301, 10334), True, 'import numpy as np\n'), ((10416, 10461), 'numpy.reshape', 'np.reshape', (['quantity', '(quantity_shape[0], -1)'], {}), '(quantity, (quantity_shape[0], -1))\n', (10426, 10461), True, 'import numpy as np\n'), ((10485, 10524), 'numpy.dot', 'np.dot', (['self.weighting_matrix', 'quantity'], {}), '(self.weighting_matrix, quantity)\n', (10491, 10524), True, 'import numpy as np\n'), ((10544, 10580), 'numpy.reshape', 'np.reshape', (['quantity', 'quantity_shape'], {}), '(quantity, quantity_shape)\n', (10554, 10580), True, 'import numpy as np\n')] |
import os
import glob
import argparse
import numpy as np
from deepSM import SMData
from deepSM import bpm_estimator
def evaluate_bpm(raw_data_path, gen_path):
songs = os.listdir(gen_path)
true_bpms = []
est_bpms = []
for song_name in songs:
est_sm = SMData.SMFile(
song_name, raw_data_path=raw_data_path + '/' + song_name)
true_sm = SMData.SMFile (
song_name, raw_data_path=gen_path + '/' + song_name)
try:
true_bpm = bpm_estimator.true_bpm(true_sm)
est_bpm = bpm_estimator.true_bpm(est_sm)
true_bpms.append(true_bpm)
est_bpms.append(est_bpm)
except:
continue
true_bpms = np.array(true_bpms)
est_bpms = np.array(est_bpms)
print(true_bpms)
print(est_bpms)
print("N songs:", len(true_bpms))
print("MSE:", np.mean((true_bpms - est_bpms)**2))
print("MAE:", np.mean(np.abs(true_bpms - est_bpms)))
print("Accuracy:", np.mean(true_bpms == est_bpms))
print("Off by one errors:", np.mean(np.abs(true_bpms - est_bpms) == 1))
print(np.abs(true_bpms - est_bpms))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('raw_data_path', type=str)
parser.add_argument('gen_path', type=str)
args = parser.parse_args()
evaluate_bpm(args.raw_data_path, args.gen_path)
| [
"numpy.mean",
"numpy.abs",
"os.listdir",
"argparse.ArgumentParser",
"deepSM.bpm_estimator.true_bpm",
"numpy.array",
"deepSM.SMData.SMFile"
] | [((174, 194), 'os.listdir', 'os.listdir', (['gen_path'], {}), '(gen_path)\n', (184, 194), False, 'import os\n'), ((723, 742), 'numpy.array', 'np.array', (['true_bpms'], {}), '(true_bpms)\n', (731, 742), True, 'import numpy as np\n'), ((758, 776), 'numpy.array', 'np.array', (['est_bpms'], {}), '(est_bpms)\n', (766, 776), True, 'import numpy as np\n'), ((1181, 1206), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1204, 1206), False, 'import argparse\n'), ((278, 349), 'deepSM.SMData.SMFile', 'SMData.SMFile', (['song_name'], {'raw_data_path': "(raw_data_path + '/' + song_name)"}), "(song_name, raw_data_path=raw_data_path + '/' + song_name)\n", (291, 349), False, 'from deepSM import SMData\n'), ((385, 451), 'deepSM.SMData.SMFile', 'SMData.SMFile', (['song_name'], {'raw_data_path': "(gen_path + '/' + song_name)"}), "(song_name, raw_data_path=gen_path + '/' + song_name)\n", (398, 451), False, 'from deepSM import SMData\n'), ((875, 911), 'numpy.mean', 'np.mean', (['((true_bpms - est_bpms) ** 2)'], {}), '((true_bpms - est_bpms) ** 2)\n', (882, 911), True, 'import numpy as np\n'), ((991, 1021), 'numpy.mean', 'np.mean', (['(true_bpms == est_bpms)'], {}), '(true_bpms == est_bpms)\n', (998, 1021), True, 'import numpy as np\n'), ((1109, 1137), 'numpy.abs', 'np.abs', (['(true_bpms - est_bpms)'], {}), '(true_bpms - est_bpms)\n', (1115, 1137), True, 'import numpy as np\n'), ((507, 538), 'deepSM.bpm_estimator.true_bpm', 'bpm_estimator.true_bpm', (['true_sm'], {}), '(true_sm)\n', (529, 538), False, 'from deepSM import bpm_estimator\n'), ((561, 591), 'deepSM.bpm_estimator.true_bpm', 'bpm_estimator.true_bpm', (['est_sm'], {}), '(est_sm)\n', (583, 591), False, 'from deepSM import bpm_estimator\n'), ((937, 965), 'numpy.abs', 'np.abs', (['(true_bpms - est_bpms)'], {}), '(true_bpms - est_bpms)\n', (943, 965), True, 'import numpy as np\n'), ((1063, 1091), 'numpy.abs', 'np.abs', (['(true_bpms - est_bpms)'], {}), '(true_bpms - est_bpms)\n', (1069, 1091), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2016-2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "23/09/18"
import logging
import os
import json
import sys
import pandas as pd
import numpy as np
import random
import math
import itertools
import scipy.stats
from sklearn import linear_model
from math import exp, sqrt
import ai4materials.utils.unit_conversion as uc
logger = logging.getLogger('ai4materials')
def choose_atomic_features(selected_feature_list=None,
atomic_data_file=None, binary_data_file=None):
"""Choose primary features for the extended lasso procedure."""
df1 = pd.read_csv(atomic_data_file, index_col=False)
df2 = pd.read_csv(binary_data_file, index_col=False)
# merge two dataframes on Material
df = pd.merge(df1, df2, on='Mat')
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
radii_s_p = ['rp(A)', 'rs(A)', 'rp(B)', 'rs(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
e_val_z = ['Es(A)', 'val(A)']
df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Es(B)', 'val(B)']
df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(A)', 'val(A)']
df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
e_val_z = ['Ep(B)', 'val(B)']
df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
column_list = df.columns.tolist()
feature_list = column_list
if 'Mat' in feature_list:
feature_list.remove('Mat')
if 'Edim' in feature_list:
feature_list.remove('Edim')
logger.debug("Available features: \n {}".format(feature_list))
df_selected = df[selected_feature_list]
df_selected.insert(0, 'Mat', df['Mat'])
if selected_feature_list:
logger.info("Primary features selected: \n {}".format(selected_feature_list))
else:
logger.error("No selected features.")
sys.exit(1)
return df_selected
def classify_rs_zb(structure):
"""Classify if a structure is rocksalt of zincblend from a list of NoMaD structure.
(one json file). Supports multiple frames (TO DO: check that). Hard-coded.
rocksalt:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.5 0.5 0.5
zincblende:
atom_frac1 0.0 0.0 0.0
atom_frac2 0.25 0.25 0.25
zincblende --> label=0
rocksalt --> label=1
"""
energy = {}
chemical_formula = {}
label = {}
# gIndexRun=0
# gIndexDesc=1
for (gIndexRun, gIndexDesc), atoms in structure.atoms.iteritems():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[(gIndexRun, gIndexDesc)]
# energy=1.0
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[(gIndexRun, gIndexDesc)]
# get labels, works only for RS/ZB dataset
pos_atom_2 = np.asarray(list(structure.scaled_positions.values())).reshape(2, 3)[1, :]
if all(i < 0.375 for i in pos_atom_2):
# label='zincblend'
label[gIndexRun, gIndexDesc] = 0
else:
# label='rocksalt'
label[gIndexRun, gIndexDesc] = 1
break
return chemical_formula, energy, label
def get_energy_diff(chemical_formula_list, energy_list, label_list):
""" Obtain difference in energy (eV) between rocksalt and zincblend structures of a given binary.
From a list of chemical formulas, energies and labels returns a dictionary
with {`material`: `delta_e`} where `delta_e` is the difference between the energy
with label 1 and energy with label 0, grouped by material.
Each element of such list corresponds to a json file.
The `delta_e` is exactly what reported in the PRL 114, 105503(2015).
.. todo:: Check if it works for multiple frames.
"""
energy_ = []
chemical_formula_ = []
label_ = []
# energy and chemical formula are lists even if only one frame is present
for i, energy_i in enumerate(energy_list):
energy_.append(energy_i.values())
for i, chemical_formula_i in enumerate(chemical_formula_list):
chemical_formula_.append(chemical_formula_i.values())
for i, label_i in enumerate(label_list):
label_.append(label_i.values())
# flatten the lists
energy = list(itertools.chain(*energy_))
chemical_formula = list(itertools.chain(*chemical_formula_))
label = list(itertools.chain(*label_))
df = pd.DataFrame()
df['Mat'] = chemical_formula
df['Energy'] = energy
df['Label'] = label
# generate summary dataframe with lowest zincblend and rocksalt energy
# zincblend --> label=0
# rocksalt --> label=1
df_summary = df.sort_values(by='Energy').groupby(['Mat', 'Label'], as_index=False).first()
groupby_mat = df_summary.groupby('Mat')
dict_delta_e = {}
for mat, df in groupby_mat:
# calculate the delta_e (E_RS - E_ZB)
energy_label_1 = df.loc[df['Label'] == 1].Energy.values
energy_label_0 = df.loc[df['Label'] == 0].Energy.values
# if energy_diff>0 --> rs
# if energy_diff<0 --> zb
if (energy_label_0 and energy_label_1):
# single element numpy array --> convert to scalar
energy_diff = (energy_label_1 - energy_label_0).item(0)
# divide by 2 because it is the energy_diff for each atom
energy_diff = energy_diff / 2.0
else:
logger.error(
"Could not find all the energies needed to calculate required property for material '{0}'".format(mat))
sys.exit(1)
dict_delta_e.update({mat: (energy_diff, energy_label_0, energy_label_1)})
return dict_delta_e
def get_lowest_energy_structures(structure, dict_delta_e):
"""Get lowest energy structure for each material and label type.
Works only with two possible labels for a given material.
.. todo:: Check if it works for multiple frames.
"""
energy = {}
chemical_formula = {}
is_lowest_energy = {}
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
energy[gIndexRun, gIndexDesc] = structure.energy_eV[gIndexRun, gIndexDesc]
chemical_formula[gIndexRun, gIndexDesc] = structure.chemical_formula[gIndexRun, gIndexDesc]
lowest_energy_label_0 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[1]
lowest_energy_label_1 = dict_delta_e.get(chemical_formula[gIndexRun, gIndexDesc])[2]
if lowest_energy_label_0 > lowest_energy_label_1:
lowest_energy_label_01 = lowest_energy_label_1
else:
lowest_energy_label_01 = lowest_energy_label_0
if energy[gIndexRun, gIndexDesc] == lowest_energy_label_01:
is_lowest_energy[gIndexRun, gIndexDesc] = True
else:
is_lowest_energy[gIndexRun, gIndexDesc] = False
return is_lowest_energy
def write_atomic_features(structure, selected_feature_list, df, dict_delta_e=None,
path=None, filename_suffix='.json', json_file=None):
"""Given the chemical composition, build the descriptor made of atomic features only.
Includes all the frames in the same json file.
.. todo:: Check if it works for multiple frames.
"""
# make dictionary {primary_feature: value} for each structure
# dictionary of a dictionary, key: Mat, value: atomic_features
dict_features = df.set_index('chemical_formula').T.to_dict()
# label=0: rocksalt, label=1: zincblend
#chemical_formula_, energy_, label_ = classify_rs_zb(structure)
#is_lowest_energy_ = get_lowest_energy_structures(structure, dict_delta_e)
if structure.isPeriodic == True:
for (gIndexRun, gIndexDesc), atoms in structure.atoms.items():
if atoms is not None:
# filename is the normalized absolute path
filename = os.path.abspath(os.path.normpath(os.path.join(path,
'{0}{1}'.format(structure.name, filename_suffix))))
outF = file(filename, 'w')
outF.write("""
{
"data":[""")
cell = structure.atoms[gIndexRun, gIndexDesc].get_cell()
cell = np.transpose(cell)
atoms = structure.atoms[gIndexRun, gIndexDesc]
chemical_formula = structure.chemical_formula_[gIndexRun, gIndexDesc]
energy = structure.energy_eV[gIndexRun, gIndexDesc]
label = label_[gIndexRun, gIndexDesc]
#target = dict_delta_e.get(chemical_formula_[gIndexRun, gIndexDesc])[0]
target = dict_delta_e.get(chemical_formula)
atomic_features = dict_features[structure.chemical_formula[gIndexRun, gIndexDesc]]
#is_lowest_energy = is_lowest_energy_[gIndexRun,gIndexDesc]
res = {
"checksum": structure.name,
"label": label,
"energy": energy,
#"is_lowest_energy": is_lowest_energy,
"delta_e_rs_zb": target,
"chemical_formula": chemical_formula,
"gIndexRun": gIndexRun,
"gIndexDesc": gIndexDesc,
"cell": cell.tolist(),
"particle_atom_number": map(lambda x: x.number, atoms),
"particle_position": map(lambda x: [x.x, x.y, x.z], atoms),
"atomic_features": atomic_features,
"main_json_file_name": json_file,
}
json.dump(res, outF, indent=2)
outF.write("""
] }""")
outF.flush()
return filename
def r_sigma(row):
"""Calculates r_sigma.
John-Bloch's indicator1: |rp(A) + rs(A) - rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
"""
return abs(row[0] + row[1] - row[2] + row[3])
def r_pi(row):
"""Calculates r_pi.
John-Bloch's indicator2: |rp(A) - rs(A)| +| rp(B) -rs(B)| from Phys. Rev. Lett. 33, 1095 (1974).
Input rp(A), rs(A), rp(B), rs(B)
They need to be given in this order.
combine_features
"""
return abs(row[0] - row[1]) + abs(row[2] - row[3])
def e_sqrt_z(row):
"""Calculates e/sqrt(val_Z).
Es/sqrt(Zval) and Ep/sqrt(Zval) from Phys. Rev. B 85, 104104 (2012).
Input Es(A) or Ep(A), val(A) (A-->B)
They need to be given in this order.
"""
return row[0] / math.sqrt(row[1])
def _get_scaling_factors(columns, metadata_info, energy_unit, length_unit):
"""Calculates characteristic energy and length, given an atomic metadata"""
scaling_factor = []
if columns is not None:
for col in columns:
try:
col_unit = metadata_info[col.split('(', 1)[0]]['units']
# check allowed values, to avoid problem with substance - NOT IDEAD
if col_unit == 'J':
scaling_factor.append(uc.convert_unit(1, energy_unit, target_unit='eV'))
# divide all column by e_0
#df.loc[:, col] *= e_0
elif col_unit == 'm':
scaling_factor.append(uc.convert_unit(1, length_unit, target_unit='angstrom'))
# divide all column by e_0
#df.loc[:, col] *= d_0
else:
scaling_factor.append(1.0)
logger.debug("Feature units are not energy nor lengths. "
"No scale to characteristic length.")
except BaseException:
scaling_factor.append(1.0)
logger.debug("Feature units not included in metadata")
return scaling_factor
def _my_power_2(row):
return pow(row[0], 2)
def _my_power_3(row):
return pow(row[0], 3)
def _my_power_m1(row):
return pow(row[0], -1)
def _my_power_m2(row):
return pow(row[0], -2)
def _my_power_m3(row):
return pow(row[0], -3)
def _my_abs_sqrt(row):
return math.sqrtabs(abs(row[0]))
def _my_exp(row):
return exp(row[0])
def _my_exp_power_2(row):
return exp(pow(row[0], 2))
def _my_exp_power_3(row):
return exp(pow(row[0], 3))
def _my_sum(row):
return row[0] + row[1]
def _my_abs_sum(row):
return abs(row[0] + row[1])
def _my_abs_diff(row):
return abs(row[0] - row[1])
def _my_diff(row):
return row[0] - row[1]
def _my_div(row):
return row[0] / row[1]
def _my_sum_power_2(row):
return pow((row[0] + row[1]), 2)
def _my_sum_power_3(row):
return pow((row[0] + row[1]), 3)
def _my_sum_exp(row):
return exp(row[0] + row[1])
def _my_sum_exp_power_2(row):
return exp(pow(row[0] + row[1], 2))
def _my_sum_exp_power_3(row):
return exp(pow(row[0] + row[1], 3))
def combine_features(df=None, energy_unit=None, length_unit=None,
metadata_info=None, allowed_operations=None, derived_features=None):
"""Generate combination of features given a dataframe and a list of allowed operations.
For the exponentials, we introduce a characteristic energy/length
converting the
..todo:: Fix under/overflow errors, and introduce handling of exceptions.
"""
if allowed_operations:
logger.info('Selected operations:\n {0}'.format(allowed_operations))
else:
logger.warning('No allowed operations selected.')
# make derived features
if derived_features is not None:
if 'r_sigma' in derived_features:
# calculate r_sigma and r_pi [Phys. Rev. Lett. 33, 1095(1974)]
logger.info('Including rs and rp to allow r_sigma calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_sigma'] = df[radii_s_p].apply(r_sigma, axis=1)
if 'r_pi' in derived_features:
logger.info('Including rs and rp to allow r_pi calculation')
radii_s_p = ['atomic_rp_max(A)', 'atomic_rs_max(A)', 'atomic_rp_max(B)', 'atomic_rs_max(B)']
df['r_pi'] = df[radii_s_p].apply(r_pi, axis=1)
# calculate Es/sqrt(Zval) and Ep/sqrt(Zval)
# e_val_z = ['Es(A)', 'val(A)']
# df['Es(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Es(B)', 'val(B)']
# df['Es(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
#
# e_val_z = ['Ep(A)', 'val(A)']
# df['Ep(A)/sqrt(Zval(A))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
# e_val_z = ['Ep(B)', 'val(B)']
# df['Ep(B)/sqrt(Zval(B))'] = df[e_val_z].apply(e_sqrt_z, axis=1)
columns_ = df.columns.tolist()
# define subclasses of features (see Phys. Rev. Lett. 114, 105503(2015) Supp. info. pag.1)
# make a dictionary {feature: subgroup}
# features belonging to a0 will not be combined, just added at the end
# dict_features = {
# u'val(B)': 'a0', u'val(A)': 'a0',
#
# u'period__el0':'a0',
# u'period__el1':'a0',
# u'atomic_number__el0': 'a0',
# u'atomic_number__el1': 'a0',
# u'group__el0': 'a0',
# u'group__el1': 'a0',
#
# u'atomic_ionization_potential__el0': 'a1',
# u'atomic_ionization_potential__el1': 'a1',
# u'atomic_electron_affinity__el0': 'a1',
# u'atomic_electron_affinity__el1': 'a1',
# u'atomic_homo_lumo_diff__el0': 'a1',
# u'atomic_homo_lumo_diff__el1': 'a1',
# u'atomic_electronic_binding_energy_el0': 'a1',
# u'atomic_electronic_binding_energy_el1': 'a1',
#
#
# u'HOMO(A)': 'a2', u'LUMO(A)': 'a2', u'HOMO(B)': 'a2', u'LUMO(B)': 'a2',
# u'HL_gap_AB': 'a2',
# u'Ebinding_AB': 'a2',
#
# u'atomic_rs_max__el0': 'a3',
# u'atomic_rs_max__el1': 'a3',
# u'atomic_rp_max__el0': 'a3',
# u'atomic_rp_max__el1': 'a3',
# u'atomic_rd_max__el0': 'a3',
# u'atomic_rd_max__el1': 'a3',
# u'atomic_r_by_2_dimer__el0': 'a3',
# u'atomic_r_by_2_dimer__el1': 'a3',
#
# u'd_AB': 'a3',
# u'r_sigma': 'a3', u'r_pi': 'a3',
#
# u'Eh': 'a4', u'C': 'a4'
# }
dict_features = {
u'period': 'a0',
u'atomic_number': 'a0',
u'group': 'a0',
u'atomic_ionization_potential': 'a1',
u'atomic_electron_affinity': 'a1',
u'atomic_homo_lumo_diff': 'a1',
u'atomic_electronic_binding_energy': 'a1',
u'atomic_homo': 'a2', u'atomic_lumo': 'a2',
u'atomic_rs_max': 'a3',
u'atomic_rp_max': 'a3',
u'atomic_rd_max': 'a3',
u'atomic_r_by_2_dimer': 'a3',
u'r_sigma': 'a3', u'r_pi': 'a3'
}
# standardize the data -
# we cannot reproduce the PRL if we standardize the data
#df_a0 = (df_a0 - df_a0.mean()) / (df_a0.max() - df_a0.min())
#df_a1 = (df_a1 - df_a1.mean()) / (df_a1.max() - df_a1.min())
#df_a2 = (df_a2 - df_a2.mean()) / (df_a2.max() - df_a2.min())
#df_a3 = (df_a3 - df_a3.mean()) / (df_a3.max() - df_a3.min())
#df_a4 = (df_a4 - df_a4.mean()) / (df_a4.max() - df_a4.min())
# df_a0 = df[[col for col in columns_ if dict_features.get(col)=='a0']].astype('float32')
df_a0 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a0']].astype('float32')
df_a1 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a1']].astype('float32')
df_a2 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a2']].astype('float32')
df_a3 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a3']].astype('float32')
df_a4 = df[[col for col in columns_ if dict_features.get(col.split('(', 1)[0]) == 'a4']].astype('float32')
col_a0 = df_a0.columns.tolist()
col_a1 = df_a1.columns.tolist()
col_a2 = df_a2.columns.tolist()
col_a3 = df_a3.columns.tolist()
col_a4 = df_a4.columns.tolist()
# this list will at the end all the dataframes created
df_list = []
df_b0_list = []
df_b1_list = []
df_b2_list = []
df_b3_list = []
df_c3_list = []
df_d3_list = []
df_e3_list = []
df_f1_list = []
df_f2_list = []
df_f3_list = []
df_x1_list = []
df_x2_list = []
df_x_list = []
# create b0: absolute differences and sums of a0
# this is not in the PRL.
for subset in itertools.combinations(col_a0, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = ['(' + subset[1] + '-' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_sum, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a0[list(subset)].apply(_my_abs_diff, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '/' in allowed_operations:
cols = [subset[0] + '/' + subset[1]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
cols = [subset[1] + '/' + subset[0]]
data = df_a0[list(subset)].apply(_my_div, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a0, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a0[list(subset)].apply(_my_power_2, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a0[list(subset)].apply(_my_power_3, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
data = df_a0[list(subset)].apply(_my_exp, axis=1)
df_b0_list.append(pd.DataFrame(data, columns=cols))
# create b1: absolute differences and sums of a1
for subset in itertools.combinations(col_a1, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a1[list(subset)].apply(_my_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_sum, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a1[list(subset)].apply(_my_abs_diff, axis=1)
df_b1_list.append(pd.DataFrame(data, columns=cols))
# create b2: absolute differences and sums of a2
for subset in itertools.combinations(col_a2, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a2[list(subset)].apply(_my_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_sum, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a2[list(subset)].apply(_my_abs_diff, axis=1)
df_b2_list.append(pd.DataFrame(data, columns=cols))
# create b3: absolute differences and sums of a3
for subset in itertools.combinations(col_a3, 2):
if '+' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '-' in allowed_operations:
cols = ['(' + subset[0] + '-' + subset[1] + ')']
data = df_a3[list(subset)].apply(_my_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|+|' in allowed_operations:
cols = ['|' + subset[0] + '+' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_sum, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
if '|-|' in allowed_operations:
cols = ['|' + subset[0] + '-' + subset[1] + '|']
data = df_a3[list(subset)].apply(_my_abs_diff, axis=1)
df_b3_list.append(pd.DataFrame(data, columns=cols))
# create c3: two steps:
# 1) squares of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if '^2' in allowed_operations:
cols = [subset[0] + '^2']
data = df_a3[list(subset)].apply(_my_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = [subset[0] + '^3']
data = df_a3[list(subset)].apply(_my_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# 2) squares of b3 (only sums) --> sum squared of a3
for subset in itertools.combinations(col_a3, 2):
if '^2' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^2']
data = df_a3[list(subset)].apply(_my_sum_power_2, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
if '^3' in allowed_operations:
cols = ['(' + subset[0] + '+' + subset[1] + ')^3']
data = df_a3[list(subset)].apply(_my_sum_power_3, axis=1)
df_c3_list.append(pd.DataFrame(data, columns=cols))
# create d3: two steps:
# 1) exponentials of a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
if 'exp' in allowed_operations:
cols = ['exp(' + subset[0] + '+' + subset[1] + ')']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp, axis=1)
df_d3_list.append(pd.DataFrame(data, columns=cols))
# create e3: two steps:
# 1) exponentials of squared a3 - unary operations
# we kept itertools.combinations to make the code more uniform with the binary operations
for subset in itertools.combinations(col_a3, 1):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp(' + subset[0] + '^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp(' + subset[0] + '^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# 2) exponentials of b3 (only sums) --> exponential of sum of a3
for subset in itertools.combinations(col_a3, 2):
operations = {'exp', '^2'}
if operations <= set(allowed_operations):
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^2)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_2, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
operations = {'exp', '^3'}
if operations <= set(allowed_operations):
try:
cols = ['exp((' + subset[0] + '+' + subset[1] + ')^3)']
# find scaling factor for e_0 or d_0 for scaling
# and multiply each column by the scaling factor
scaling_factors = _get_scaling_factors(list(subset), metadata_info, energy_unit, length_unit)
df_subset = df_a3[list(subset)] * scaling_factors
data = df_subset.apply(_my_sum_exp_power_3, axis=1)
df_e3_list.append(pd.DataFrame(data, columns=cols))
except OverflowError as e:
logger.warning('Dropping feature combination that caused under/overflow.\n')
# make dataframes from lists, check if they are not empty
# we make there here because they are going to be used to further
# combine the features
if not df_a0.empty:
df_list.append(df_a0)
if not df_a1.empty:
df_x1_list.append(df_a1)
df_list.append(df_a1)
if not df_a2.empty:
df_x1_list.append(df_a2)
df_list.append(df_a2)
if not df_a3.empty:
df_x1_list.append(df_a3)
df_list.append(df_a3)
if not df_a4.empty:
df_list.append(df_a4)
if df_b0_list:
df_b0 = pd.concat(df_b0_list, axis=1)
col_b0 = df_b0.columns.tolist()
df_b0.to_csv('./df_b0.csv', index=True)
df_list.append(df_b0)
if df_b1_list:
df_b1 = pd.concat(df_b1_list, axis=1)
col_b1 = df_b1.columns.tolist()
df_x1_list.append(df_b1)
df_list.append(df_b1)
if df_b2_list:
df_b2 = pd.concat(df_b2_list, axis=1)
col_b2 = df_b2.columns.tolist()
df_x1_list.append(df_b2)
df_list.append(df_b2)
if df_b3_list:
df_b3 = pd.concat(df_b3_list, axis=1)
col_b3 = df_b3.columns.tolist()
df_x1_list.append(df_b3)
df_list.append(df_b3)
if df_c3_list:
df_c3 = pd.concat(df_c3_list, axis=1)
col_c3 = df_c3.columns.tolist()
df_x2_list.append(df_c3)
df_list.append(df_c3)
if df_d3_list:
df_d3 = pd.concat(df_d3_list, axis=1)
col_d3 = df_d3.columns.tolist()
df_x2_list.append(df_d3)
df_list.append(df_d3)
if df_e3_list:
df_e3 = pd.concat(df_e3_list, axis=1)
col_e3 = df_e3.columns.tolist()
df_x2_list.append(df_e3)
df_list.append(df_e3)
if df_x1_list:
df_x1 = pd.concat(df_x1_list, axis=1)
col_x1 = df_x1.columns.tolist()
if df_x2_list:
df_x2 = pd.concat(df_x2_list, axis=1)
col_x2 = df_x2.columns.tolist()
# create f1 - abs differences and sums of b1 without repetitions
# TO DO: calculate f1
# create x - ratios of any of {a_i, b_i} i=1,2,3
# with any of {c3, d3, e3} - typo in the PRL - no a3
# total = (4+4+6+12+12+30)*(21+21+21) = 68*63 = 4284
# for subset in itertools.combinations(col_a3, 1):
# if 'exp' in allowed_operations:
# cols = ['exp('+subset[0]+'^2)']
# data = df_a3[list(subset)].apply(_my_exp_power_2, axis=1)
# df_e3_list.append(pd.DataFrame(data, columns=cols))
if df_x1_list and df_x2_list:
for el_x1 in col_x1:
for el_x2 in col_x2:
if '/' in allowed_operations:
cols = [el_x1 + '/' + el_x2]
# now the operation is between two dataframes
data = df_x1[el_x1].divide(df_x2[el_x2])
df_x_list.append(pd.DataFrame(data, columns=cols))
if df_f1_list:
df_f1 = pd.concat(df_f1_list, axis=1)
col_f1 = df_f1.columns.tolist()
df_list.append(df_f1)
if df_x_list:
df_x = pd.concat(df_x_list, axis=1)
col_x = df_x.columns.tolist()
df_list.append(df_x)
logger.debug('\n l1-l0 feature creation')
if not df_a0.empty:
logger.debug('Number of features in subgroup a0: {0}'.format(df_a0.shape[1]))
logger.debug('Example of feature in subgroup a0: {0}'
.format(df_a0.columns.tolist()[random.randint(0, df_a0.shape[1] - 1)]))
else:
logger.debug('No features in subgroup a0.')
if not df_a1.empty:
logger.debug('Number of features in subgroup a1: {0}'.format(df_a1.shape[1]))
logger.debug('Example of feature in subgroup a1: {0}'
.format(df_a1.columns.tolist()[random.randint(0, df_a1.shape[1] - 1)]))
else:
logger.debug('No features in subgroup a1.')
if not df_a2.empty:
logger.debug('Number of features in subgroup a2: {0}'.format(df_a2.shape[1]))
logger.debug('Example of feature in subgroup a2: {0}'
.format(df_a2.columns.tolist()[random.randint(0, df_a2.shape[1] - 1)]))
else:
logger.debug('No features in subgroup a2.')
if not df_a3.empty:
logger.debug('Number of features in subgroup a3: {0}'.format(df_a3.shape[1]))
logger.debug('Example of feature in subgroup a3: {0}'
.format(df_a3.columns.tolist()[random.randint(0, df_a3.shape[1] - 1)]))
else:
logger.debug('No features in subgroup a3.')
if not df_a4.empty:
logger.debug('Number of features in subgroup a4: {0}'.format(df_a4.shape[1]))
logger.debug('Example of feature in subgroup a4: {0}'
.format(df_a3.columns.tolist()[random.randint(0, df_a4.shape[1] - 1)]))
else:
logger.debug('No features in subgroup a4.')
if df_b0_list:
logger.debug('Number of features in subgroup b0: {0}'.format(df_b0.shape[1]))
logger.debug('Example of feature in subgroup b0: {0}'
.format(df_b0.columns.tolist()[random.randint(0, df_b0.shape[1] - 1)]))
else:
logger.debug('No features in subgroup b0.')
if df_b1_list:
logger.debug('Number of features in subgroup b1: {0}'.format(df_b1.shape[1]))
logger.debug('Example of feature in subgroup b1: {0}'
.format(df_b1.columns.tolist()[random.randint(0, df_b1.shape[1] - 1)]))
else:
logger.debug('No features in subgroup b1.')
if df_b2_list:
logger.debug('Number of features in subgroup b2: {0}'.format(df_b2.shape[1]))
logger.debug('Example of feature in subgroup b2: {0}'
.format(df_b2.columns.tolist()[random.randint(0, df_b2.shape[1] - 1)]))
else:
logger.debug('No features in subgroup b2.')
if df_b3_list:
logger.debug('Number of features in subgroup b3: {0}'.format(df_b3.shape[1]))
logger.debug('Example of feature in subgroup b3: {0}'
.format(df_b3.columns.tolist()[random.randint(0, df_b3.shape[1] - 1)]))
else:
logger.debug('No features in subgroup b3.')
if df_c3_list:
logger.debug('Number of features in subgroup c3: {0}'.format(df_c3.shape[1]))
logger.debug('Example of feature in subgroup c3: {0}'
.format(df_c3.columns.tolist()[random.randint(0, df_c3.shape[1] - 1)]))
else:
logger.debug('No features in subgroup c3.')
if df_d3_list:
logger.debug('Number of features in subgroup d3: {0}'.format(df_d3.shape[1]))
logger.debug('Example of feature in subgroup d3: {0}'
.format(df_d3.columns.tolist()[random.randint(0, df_d3.shape[1] - 1)]))
else:
logger.debug('No features in subgroup d3.')
if df_e3_list:
logger.debug('Number of features in subgroup e3: {0}'.format(df_e3.shape[1]))
logger.debug('Example of feature in subgroup e3: {0}'
.format(df_e3.columns.tolist()[random.randint(0, df_e3.shape[1] - 1)]))
else:
logger.debug('No features in subgroup e3.')
if df_f1_list:
logger.debug('Number of features in subgroup f1: {0}'.format(df_f1.shape[1]))
logger.debug('Example of feature in subgroup f1: {0}'
.format(df_f1.columns.tolist()[random.randint(0, df_f1.shape[1] - 1)]))
else:
logger.debug('No features in subgroup f1.')
if df_x_list:
logger.debug('Number of features in subgroup x: {0}'.format(df_x.shape[1]))
logger.debug('Example of feature in subgroup x: {0}'
.format(df_x.columns.tolist()[random.randint(0, df_x.shape[1] - 1)]))
else:
logger.debug('No features in subgroup x.')
logger.debug('Please see Phys. Rev. Lett. 114, 105503(2015) Supplementary Information \n for more details.\n')
if df_list:
df_combined_features = pd.concat(df_list, axis=1)
else:
logger.error('No features selected. Please select at least two primary features.')
sys.exit(1)
logger.info('Number of total features generated: {0}'.format(df_combined_features.shape[1]))
return df_combined_features
def l1_l0_minimization(y_true, D, features,
energy_unit=None,
print_lasso=False, lambda_grid=None, lassonumber=25,
max_dim=3, lambda_grid_points=100, lambda_max_factor=1.0, lambda_min_factor=0.001):
""" Select an optimal descriptor using a combined l1-l0 procedure.
1. step (l 1): Solve the LASSO minimization problem
.. math::
argmin_c {||P-Dc||^2 + \lambda |c|_1}
for different lambdas, starting from a 'high' lambda.
Collect all indices(Features) i appearing with nonzero coefficients c_i,
while decreasing lambda, until size of collection equals `lassonumber`.
2. step (l 0): Check the least-squares errors for all single features/pairs/triples/... of
collection from 1. step. Choose the single/pair/triple/... with the lowest
mean squared error (MSE) to be the best 1D/2D/3D-descriptor.
Parameters:
y_true : array, [n_samples]
Array with the target property (ground truth)
D : array, [n_samples, n_features]
Matrix with the data.
features : list of strings
List of feature names. Needs to be in the same order as the feature vectors in D
dimrange : list of int
Specify for which dimensions the optimal descriptor is calculated.
It is the number of feature vectors used in the linear combination
lassonumber : int, default 25
The number of features, which will be collected in ther l1-step
lamdba_grid_points : int, default 100
Number of lamdbas between lamdba_max and lambdba_min for which the l1-problem shall be solved.
Sometimes a denser grid could be needed, if the lamda-steps are too high.
This can be checked with 'print_lasso'. `lamdba_max` and `lamdba_min` are chosen as in
Tibshirani's paper "Regularization Paths for Generalized Linear Models via Coordinate Descent".
The values in between are generated on the log scale.
lambda_min_factor : float, default 0.001
Sets `lam_min` = `lambda_min_factor` * `lam_max`.
lambda_max_factor : float, default 1.0
Sets calculated `lam_max` = `lam_max` * `lambda_max_factor`.
print_lasso: bool, default `True`
Prints the indices of coulumns of `D` with nonzero coefficients for each lambda.
lambda_grid: array
The list/array of lambda values for the l1-problem can be chosen by the user.
The list/array should start from the highest number and lambda_i > lamda_i+1 should hold.
(?) `lambda_grid_point` is then ignored. (?)
Returns:
list of panda dataframes
(D', c', selected_features) :
A list of tuples (D',c',selected_features) for each dimension.
`selected_features` is a list of strings. D'*c' is the selected linear model/fit where the last column
of `D` is a vector with ones.
References:
.. [1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
"Big Data of Materials Science: Critical Role of the Descriptor"
Phys. Rev. Lett. 114, 105503 (2015)
"""
dimrange = range(1, max_dim + 1)
compounds = len(y_true)
# standardize D
Dstan = np.array(scipy.stats.zscore(D))
y_true = y_true.flatten()
#lambda_grid=[pow(1.7,i) for i in np.arange(-40.,-1,1.)]
# lambda_grid.sort(reverse=True)
logger.info('Selecting optimal descriptors.')
if lambda_grid is None:
# find max lambda, and build lambda grid as in
# Tibshirani's paper "Regularization Paths for Generalized Linear
# Models via Coordinate Descent". Here lam_max can be set to a higher
# with a factor lambda_max_factor
correlations = abs(np.dot(y_true, Dstan))
correlations = np.asarray(correlations)
lam_max = max(correlations) / (compounds)
lam_min = lam_max * lambda_min_factor
lam_max = lambda_max_factor * lam_max
log_max, log_min = np.log10(lam_max), np.log10(lam_min)
lambda_grid = [pow(10, i) for i in np.linspace(log_min, log_max, lambda_grid_points)]
lambda_grid.sort(reverse=True)
# LASSO begin, iter over lamda grid, and collect all indices(Features)
# with nonzero coefficient until len(collection)=lassonumber
collection = []
if print_lasso:
logger.debug('lambda #collected Indices')
for l, lam in enumerate(lambda_grid):
lasso = linear_model.Lasso(alpha=lam, copy_X=True, fit_intercept=True,
max_iter=100000, normalize=False, positive=False, precompute=False,
random_state=None, selection='cyclic', tol=0.0001, warm_start=False)
lasso.fit(Dstan, y_true)
coef = lasso.coef_
for pos in np.nonzero(coef)[0]:
if not pos in collection:
collection.append(pos)
if print_lasso: # print the indices of nonzero coefficients for a given lambda.
# (It is NOT the collection at that moment)
logger.debug('%.10f %s %s' % (lam, len(collection), np.nonzero(coef)[0]))
if len(collection) > lassonumber - 1:
break
collection = sorted(collection[:lassonumber])
# LASSO end
# collection is the list with the features that have been collected
len_collection = len(collection)
if len_collection < lassonumber:
logger.debug("Only %s features are collected" % len_collection)
# make small matrix with size of (compounds,lassonumber), only with selected features from LASSO
D_collection = D[:, collection]
D_collection = np.column_stack((D_collection, np.ones(compounds)))
# get the different dimensional descriptor and save the
# tuple (D_model, coefficients, selected_features) for each dimension in the list out
out = []
out_df = []
y_pred = []
for dimension in dimrange:
# L0: save for each single Feature/ pair, triple/... the Least-Squares-Error
# with its coefficient and index in Dictionary MSEdic
MSEdic = {}
for permu in itertools.combinations(range(len_collection), dimension):
D_ls = D_collection[:, permu + (-1,)]
x = np.linalg.lstsq(D_ls, y_true, rcond=None)
# if there are linear dependencies in D_ls np.linalg.lstsq gives no error and len(x[1])==0.
if not len(x[1]) == 0:
# (There could be other reasons, too...which should/could be checked)
MSE = x[1][0] / compounds
MSEdic.update({MSE: [x[0], permu]})
# check if MSEdic is empty
if not bool(MSEdic):
logger.error('Could not find configuration with lowest MSE.\n Try to select ' +
'more features\n or reduce the number of the descriptor dimension. ')
sys.exit(1)
# select the model with the lowest MSE
minimum = min(MSEdic)
logger.info("Root Mean Squared Error (RMSE) for {0}D descriptor: {1:.6e} {2}"
.format(dimension, sqrt(minimum), energy_unit))
model = MSEdic[minimum]
coefficients, good_permu = model[0], model[1]
# transform the D_collection-indices into D-indices, and get strings for selected features
# from the list 'features' , and get D_model
selected_features = [features[collection[gp]] for gp in good_permu]
D_model = D_collection[:, good_permu + (-1,)]
# save the model for the actual dimension and strings of features
out.append((D_model, coefficients, selected_features))
# print in terminal
string = '{0}D case: \n'.format(dimension)
for i in range(dimension + 1):
if coefficients[i] > 0:
sign = '+'
c = coefficients[i]
else:
sign = '-'
c = abs(coefficients[i])
if i < dimension:
string += '%s %.6e %s\n ' % (sign, c, selected_features[i])
else:
string += '%s %.6e\n' % (sign, c)
logger.info(string)
# calculate E_predict
y_pred.append(np.dot(D_model, coefficients))
# RMSE (it should be the same as before): sqrt(mean_squared_error(P, P_pred))
# create panda dataframe
selected_features.append('Intercept')
data = D_model
out_df.append(pd.DataFrame(data, columns=selected_features))
return out_df, y_pred, y_true
| [
"logging.getLogger",
"itertools.chain",
"numpy.log10",
"pandas.read_csv",
"sklearn.linear_model.Lasso",
"math.sqrt",
"sys.exit",
"math.exp",
"numpy.asarray",
"numpy.dot",
"numpy.linspace",
"numpy.linalg.lstsq",
"pandas.DataFrame",
"random.randint",
"numpy.ones",
"pandas.merge",
"nump... | [((1100, 1133), 'logging.getLogger', 'logging.getLogger', (['"""ai4materials"""'], {}), "('ai4materials')\n", (1117, 1133), False, 'import logging\n'), ((1343, 1389), 'pandas.read_csv', 'pd.read_csv', (['atomic_data_file'], {'index_col': '(False)'}), '(atomic_data_file, index_col=False)\n', (1354, 1389), True, 'import pandas as pd\n'), ((1400, 1446), 'pandas.read_csv', 'pd.read_csv', (['binary_data_file'], {'index_col': '(False)'}), '(binary_data_file, index_col=False)\n', (1411, 1446), True, 'import pandas as pd\n'), ((1496, 1524), 'pandas.merge', 'pd.merge', (['df1', 'df2'], {'on': '"""Mat"""'}), "(df1, df2, on='Mat')\n", (1504, 1524), True, 'import pandas as pd\n'), ((5298, 5312), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5310, 5312), True, 'import pandas as pd\n'), ((13122, 13133), 'math.exp', 'exp', (['row[0]'], {}), '(row[0])\n', (13125, 13133), False, 'from math import exp, sqrt\n'), ((13672, 13692), 'math.exp', 'exp', (['(row[0] + row[1])'], {}), '(row[0] + row[1])\n', (13675, 13692), False, 'from math import exp, sqrt\n'), ((19325, 19358), 'itertools.combinations', 'itertools.combinations', (['col_a0', '(2)'], {}), '(col_a0, 2)\n', (19347, 19358), False, 'import itertools\n'), ((20969, 21002), 'itertools.combinations', 'itertools.combinations', (['col_a0', '(1)'], {}), '(col_a0, 1)\n', (20991, 21002), False, 'import itertools\n'), ((21704, 21737), 'itertools.combinations', 'itertools.combinations', (['col_a1', '(2)'], {}), '(col_a1, 2)\n', (21726, 21737), False, 'import itertools\n'), ((22728, 22761), 'itertools.combinations', 'itertools.combinations', (['col_a2', '(2)'], {}), '(col_a2, 2)\n', (22750, 22761), False, 'import itertools\n'), ((23752, 23785), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(2)'], {}), '(col_a3, 2)\n', (23774, 23785), False, 'import itertools\n'), ((24887, 24920), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(1)'], {}), '(col_a3, 1)\n', (24909, 24920), False, 'import itertools\n'), ((25412, 25445), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(2)'], {}), '(col_a3, 2)\n', (25434, 25445), False, 'import itertools\n'), ((26108, 26141), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(1)'], {}), '(col_a3, 1)\n', (26130, 26141), False, 'import itertools\n'), ((26723, 26756), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(2)'], {}), '(col_a3, 2)\n', (26745, 26756), False, 'import itertools\n'), ((27468, 27501), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(1)'], {}), '(col_a3, 1)\n', (27490, 27501), False, 'import itertools\n'), ((28863, 28896), 'itertools.combinations', 'itertools.combinations', (['col_a3', '(2)'], {}), '(col_a3, 2)\n', (28885, 28896), False, 'import itertools\n'), ((2753, 2764), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2761, 2764), False, 'import sys\n'), ((5153, 5178), 'itertools.chain', 'itertools.chain', (['*energy_'], {}), '(*energy_)\n', (5168, 5178), False, 'import itertools\n'), ((5208, 5243), 'itertools.chain', 'itertools.chain', (['*chemical_formula_'], {}), '(*chemical_formula_)\n', (5223, 5243), False, 'import itertools\n'), ((5262, 5286), 'itertools.chain', 'itertools.chain', (['*label_'], {}), '(*label_)\n', (5277, 5286), False, 'import itertools\n'), ((11504, 11521), 'math.sqrt', 'math.sqrt', (['row[1]'], {}), '(row[1])\n', (11513, 11521), False, 'import math\n'), ((30787, 30816), 'pandas.concat', 'pd.concat', (['df_b0_list'], {'axis': '(1)'}), '(df_b0_list, axis=1)\n', (30796, 30816), True, 'import pandas as pd\n'), ((30971, 31000), 'pandas.concat', 'pd.concat', (['df_b1_list'], {'axis': '(1)'}), '(df_b1_list, axis=1)\n', (30980, 31000), True, 'import pandas as pd\n'), ((31140, 31169), 'pandas.concat', 'pd.concat', (['df_b2_list'], {'axis': '(1)'}), '(df_b2_list, axis=1)\n', (31149, 31169), True, 'import pandas as pd\n'), ((31309, 31338), 'pandas.concat', 'pd.concat', (['df_b3_list'], {'axis': '(1)'}), '(df_b3_list, axis=1)\n', (31318, 31338), True, 'import pandas as pd\n'), ((31478, 31507), 'pandas.concat', 'pd.concat', (['df_c3_list'], {'axis': '(1)'}), '(df_c3_list, axis=1)\n', (31487, 31507), True, 'import pandas as pd\n'), ((31647, 31676), 'pandas.concat', 'pd.concat', (['df_d3_list'], {'axis': '(1)'}), '(df_d3_list, axis=1)\n', (31656, 31676), True, 'import pandas as pd\n'), ((31816, 31845), 'pandas.concat', 'pd.concat', (['df_e3_list'], {'axis': '(1)'}), '(df_e3_list, axis=1)\n', (31825, 31845), True, 'import pandas as pd\n'), ((31985, 32014), 'pandas.concat', 'pd.concat', (['df_x1_list'], {'axis': '(1)'}), '(df_x1_list, axis=1)\n', (31994, 32014), True, 'import pandas as pd\n'), ((32091, 32120), 'pandas.concat', 'pd.concat', (['df_x2_list'], {'axis': '(1)'}), '(df_x2_list, axis=1)\n', (32100, 32120), True, 'import pandas as pd\n'), ((33128, 33157), 'pandas.concat', 'pd.concat', (['df_f1_list'], {'axis': '(1)'}), '(df_f1_list, axis=1)\n', (33137, 33157), True, 'import pandas as pd\n'), ((33262, 33290), 'pandas.concat', 'pd.concat', (['df_x_list'], {'axis': '(1)'}), '(df_x_list, axis=1)\n', (33271, 33290), True, 'import pandas as pd\n'), ((38109, 38135), 'pandas.concat', 'pd.concat', (['df_list'], {'axis': '(1)'}), '(df_list, axis=1)\n', (38118, 38135), True, 'import pandas as pd\n'), ((38245, 38256), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (38253, 38256), False, 'import sys\n'), ((42183, 42207), 'numpy.asarray', 'np.asarray', (['correlations'], {}), '(correlations)\n', (42193, 42207), True, 'import numpy as np\n'), ((42845, 43054), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {'alpha': 'lam', 'copy_X': '(True)', 'fit_intercept': '(True)', 'max_iter': '(100000)', 'normalize': '(False)', 'positive': '(False)', 'precompute': '(False)', 'random_state': 'None', 'selection': '"""cyclic"""', 'tol': '(0.0001)', 'warm_start': '(False)'}), "(alpha=lam, copy_X=True, fit_intercept=True, max_iter=\n 100000, normalize=False, positive=False, precompute=False, random_state\n =None, selection='cyclic', tol=0.0001, warm_start=False)\n", (42863, 43054), False, 'from sklearn import linear_model\n'), ((6432, 6443), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6440, 6443), False, 'import sys\n'), ((42137, 42158), 'numpy.dot', 'np.dot', (['y_true', 'Dstan'], {}), '(y_true, Dstan)\n', (42143, 42158), True, 'import numpy as np\n'), ((42378, 42395), 'numpy.log10', 'np.log10', (['lam_max'], {}), '(lam_max)\n', (42386, 42395), True, 'import numpy as np\n'), ((42397, 42414), 'numpy.log10', 'np.log10', (['lam_min'], {}), '(lam_min)\n', (42405, 42414), True, 'import numpy as np\n'), ((43194, 43210), 'numpy.nonzero', 'np.nonzero', (['coef'], {}), '(coef)\n', (43204, 43210), True, 'import numpy as np\n'), ((44080, 44098), 'numpy.ones', 'np.ones', (['compounds'], {}), '(compounds)\n', (44087, 44098), True, 'import numpy as np\n'), ((44642, 44683), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['D_ls', 'y_true'], {'rcond': 'None'}), '(D_ls, y_true, rcond=None)\n', (44657, 44683), True, 'import numpy as np\n'), ((45286, 45297), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (45294, 45297), False, 'import sys\n'), ((46597, 46626), 'numpy.dot', 'np.dot', (['D_model', 'coefficients'], {}), '(D_model, coefficients)\n', (46603, 46626), True, 'import numpy as np\n'), ((46841, 46886), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'selected_features'}), '(data, columns=selected_features)\n', (46853, 46886), True, 'import pandas as pd\n'), ((9189, 9207), 'numpy.transpose', 'np.transpose', (['cell'], {}), '(cell)\n', (9201, 9207), True, 'import numpy as np\n'), ((10546, 10576), 'json.dump', 'json.dump', (['res', 'outF'], {'indent': '(2)'}), '(res, outF, indent=2)\n', (10555, 10576), False, 'import json\n'), ((19551, 19583), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (19563, 19583), True, 'import pandas as pd\n'), ((19778, 19810), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (19790, 19810), True, 'import pandas as pd\n'), ((19967, 19999), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (19979, 19999), True, 'import pandas as pd\n'), ((20199, 20231), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (20211, 20231), True, 'import pandas as pd\n'), ((20432, 20464), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (20444, 20464), True, 'import pandas as pd\n'), ((20646, 20678), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (20658, 20678), True, 'import pandas as pd\n'), ((20822, 20854), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (20834, 20854), True, 'import pandas as pd\n'), ((21177, 21209), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (21189, 21209), True, 'import pandas as pd\n'), ((21385, 21417), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (21397, 21417), True, 'import pandas as pd\n'), ((21598, 21630), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (21610, 21630), True, 'import pandas as pd\n'), ((21930, 21962), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (21942, 21962), True, 'import pandas as pd\n'), ((22157, 22189), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (22169, 22189), True, 'import pandas as pd\n'), ((22389, 22421), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (22401, 22421), True, 'import pandas as pd\n'), ((22622, 22654), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (22634, 22654), True, 'import pandas as pd\n'), ((22954, 22986), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (22966, 22986), True, 'import pandas as pd\n'), ((23181, 23213), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (23193, 23213), True, 'import pandas as pd\n'), ((23413, 23445), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (23425, 23445), True, 'import pandas as pd\n'), ((23646, 23678), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (23658, 23678), True, 'import pandas as pd\n'), ((23978, 24010), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (23990, 24010), True, 'import pandas as pd\n'), ((24205, 24237), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (24217, 24237), True, 'import pandas as pd\n'), ((24437, 24469), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (24449, 24469), True, 'import pandas as pd\n'), ((24670, 24702), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (24682, 24702), True, 'import pandas as pd\n'), ((25095, 25127), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (25107, 25127), True, 'import pandas as pd\n'), ((25302, 25334), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (25314, 25334), True, 'import pandas as pd\n'), ((25649, 25681), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (25661, 25681), True, 'import pandas as pd\n'), ((25886, 25918), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (25898, 25918), True, 'import pandas as pd\n'), ((26601, 26633), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (26613, 26633), True, 'import pandas as pd\n'), ((27238, 27270), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (27250, 27270), True, 'import pandas as pd\n'), ((28016, 28048), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (28028, 28048), True, 'import pandas as pd\n'), ((29435, 29467), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (29447, 29467), True, 'import pandas as pd\n'), ((42459, 42508), 'numpy.linspace', 'np.linspace', (['log_min', 'log_max', 'lambda_grid_points'], {}), '(log_min, log_max, lambda_grid_points)\n', (42470, 42508), True, 'import numpy as np\n'), ((45502, 45515), 'math.sqrt', 'sqrt', (['minimum'], {}), '(minimum)\n', (45506, 45515), False, 'from math import exp, sqrt\n'), ((28609, 28641), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (28621, 28641), True, 'import pandas as pd\n'), ((30052, 30084), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (30064, 30084), True, 'import pandas as pd\n'), ((33630, 33667), 'random.randint', 'random.randint', (['(0)', '(df_a0.shape[1] - 1)'], {}), '(0, df_a0.shape[1] - 1)\n', (33644, 33667), False, 'import random\n'), ((33958, 33995), 'random.randint', 'random.randint', (['(0)', '(df_a1.shape[1] - 1)'], {}), '(0, df_a1.shape[1] - 1)\n', (33972, 33995), False, 'import random\n'), ((34286, 34323), 'random.randint', 'random.randint', (['(0)', '(df_a2.shape[1] - 1)'], {}), '(0, df_a2.shape[1] - 1)\n', (34300, 34323), False, 'import random\n'), ((34614, 34651), 'random.randint', 'random.randint', (['(0)', '(df_a3.shape[1] - 1)'], {}), '(0, df_a3.shape[1] - 1)\n', (34628, 34651), False, 'import random\n'), ((34942, 34979), 'random.randint', 'random.randint', (['(0)', '(df_a4.shape[1] - 1)'], {}), '(0, df_a4.shape[1] - 1)\n', (34956, 34979), False, 'import random\n'), ((35265, 35302), 'random.randint', 'random.randint', (['(0)', '(df_b0.shape[1] - 1)'], {}), '(0, df_b0.shape[1] - 1)\n', (35279, 35302), False, 'import random\n'), ((35588, 35625), 'random.randint', 'random.randint', (['(0)', '(df_b1.shape[1] - 1)'], {}), '(0, df_b1.shape[1] - 1)\n', (35602, 35625), False, 'import random\n'), ((35911, 35948), 'random.randint', 'random.randint', (['(0)', '(df_b2.shape[1] - 1)'], {}), '(0, df_b2.shape[1] - 1)\n', (35925, 35948), False, 'import random\n'), ((36234, 36271), 'random.randint', 'random.randint', (['(0)', '(df_b3.shape[1] - 1)'], {}), '(0, df_b3.shape[1] - 1)\n', (36248, 36271), False, 'import random\n'), ((36557, 36594), 'random.randint', 'random.randint', (['(0)', '(df_c3.shape[1] - 1)'], {}), '(0, df_c3.shape[1] - 1)\n', (36571, 36594), False, 'import random\n'), ((36880, 36917), 'random.randint', 'random.randint', (['(0)', '(df_d3.shape[1] - 1)'], {}), '(0, df_d3.shape[1] - 1)\n', (36894, 36917), False, 'import random\n'), ((37203, 37240), 'random.randint', 'random.randint', (['(0)', '(df_e3.shape[1] - 1)'], {}), '(0, df_e3.shape[1] - 1)\n', (37217, 37240), False, 'import random\n'), ((37526, 37563), 'random.randint', 'random.randint', (['(0)', '(df_f1.shape[1] - 1)'], {}), '(0, df_f1.shape[1] - 1)\n', (37540, 37563), False, 'import random\n'), ((37844, 37880), 'random.randint', 'random.randint', (['(0)', '(df_x.shape[1] - 1)'], {}), '(0, df_x.shape[1] - 1)\n', (37858, 37880), False, 'import random\n'), ((12012, 12061), 'ai4materials.utils.unit_conversion.convert_unit', 'uc.convert_unit', (['(1)', 'energy_unit'], {'target_unit': '"""eV"""'}), "(1, energy_unit, target_unit='eV')\n", (12027, 12061), True, 'import ai4materials.utils.unit_conversion as uc\n'), ((33058, 33090), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (33070, 33090), True, 'import pandas as pd\n'), ((12233, 12288), 'ai4materials.utils.unit_conversion.convert_unit', 'uc.convert_unit', (['(1)', 'length_unit'], {'target_unit': '"""angstrom"""'}), "(1, length_unit, target_unit='angstrom')\n", (12248, 12288), True, 'import ai4materials.utils.unit_conversion as uc\n'), ((43521, 43537), 'numpy.nonzero', 'np.nonzero', (['coef'], {}), '(coef)\n', (43531, 43537), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 3 20:18:00 2019
@author: wcoll
"""
import numpy as np
import matplotlib.pyplot as plt
from co2_forcing_AR6 import co2_forcing_AR6
from ch4_forcing_AR6 import ch4_forcing_AR6
from n2o_forcing_AR6 import n2o_forcing_AR6
# All from table 7.8
co2_erf_AR6 = 2.16
ch4_erf_AR6 = 0.54
n2o_erf_AR6 = 0.21
hc_erf_AR6 = 0.41
o3_erf_AR6 = 0.47
ari_erf_AR6 = -0.22
aci_erf_AR6 = -0.84
co2_1850 = 286.7 # LLGHG_history_AR6_v8a
co2_2014 = 397.12
ch4_1750 = 729.2
ch4_1850 = 807.6 # LLGHG_history_AR6_v8a
ch4_2014 = 1822.88 #
ch4_2019 = 1866.3
n2o_1850 = 272.5 # LLGHG_history_AR6_v8a
n2o_2014 = 327.37
#Rapid adjustments to WMGHGs
co2_ra = 0.05 # FGD
ch4_ra = -0.14 # FGD
n2o_ra = 0.07 # FGD
tot_em_co2 = 582. # Cumulative C since 1850 - from MAGICC input files
ch4_erf = ch4_forcing_AR6(ch4_2014, ch4_1850, n2o_1850)*(1+ch4_ra)
n2o_erf = n2o_forcing_AR6(n2o_2014, n2o_1850, co2_1850, ch4_1850)*(1+n2o_ra)
hc_erf = 0.40 # 1850-2014 Interpolated between 2011 (0.39) and 2019 (0.41)
erf_bc = 0.15 # Thornhill et al.
irf_ari = -0.3 # AR6 FGD 1750-2014
erf_aci = -1.0 # AR6 FGD 1750-2014
ncols = 5 # columns in csv file
nspec = 9 # number of species
dtype = 'U12'+', f8'*ncols
data = np.genfromtxt('attribution_input.csv', delimiter=',', filling_values=0,
names=True, dtype=(dtype))
data_sd = np.genfromtxt('attribution_input_sd.csv', delimiter=',', filling_values=0,
names=True, dtype=(dtype))
rfo3 = data['o3_rf']
rfo3_sd = data_sd['o3_rf_sd']
lifech4 = data['lifech4']
lifech4_sd = data_sd['lifech4_sd']
ari = data['ari']
ari_sd = data_sd['ari_sd']
ac = data['ac']
ac_sd = data_sd['ac_sd']
erf = data['erf']
erf_sd = data_sd['erf_sd']
i_ch4 = np.where(data['Experiment']=='CH4')[0][0]
i_nox = np.where(data['Experiment']=='NOx')[0][0]
i_voc = np.where(data['Experiment']=='VOC')[0][0]
i_n2o = np.where(data['Experiment']=='N2O')[0][0]
i_hc = np.where(data['Experiment']=='HC')[0][0]
i_gas = np.array([i_ch4, i_n2o, i_hc, i_nox, i_voc])
i_non_ch4 = np.array([i_n2o, i_hc, i_nox, i_voc])
total_o3 = np.sum(rfo3)
alpha = 1.30 # From chapter 6
#print(alpha)
ch4 = ch4_2014*(1+lifech4)**alpha
ch4_sd = (ch4-ch4_2014)*lifech4_sd/lifech4
ch4_sd = np.where(lifech4 == 0, 0., ch4_sd)
# Ozone primary mode
rfo3perch4 = rfo3[i_ch4]/(ch4_2014-ch4_1850) # Use CH4 expt
rfo3perch4_sd = rfo3_sd[i_ch4]/(ch4_2014-ch4_1850) # Use CH4 expt
rfo3_prime = rfo3perch4*(ch4-ch4_2014)
rfo3_prime_sd = np.sqrt(
(rfo3perch4_sd*(ch4-ch4_2014))**2+
# add 15% uncertainty in radiative transfer - from Ragnhild
(rfo3perch4*(ch4-ch4_2014)*0.15)**2)
# Subtract total o3 prime from direct o3
# - ensures total sum of direct and prime terms is unchanged
rfo3[i_ch4] -= np.sum(rfo3_prime)
# CH4 forcing
rfch4 = np.zeros(nspec)
rfch4_sd = np.zeros(nspec)
for ispec in np.arange(nspec):
rfch4[ispec] = \
ch4_forcing_AR6(ch4[ispec], ch4_2014, n2o_2014)* \
(1+ch4_ra)
rfch4_sd[ispec] = \
ch4_forcing_AR6(ch4[ispec]+ch4_sd[ispec], ch4_2014, n2o_2014)* \
(1+ch4_ra)-rfch4[ispec]
# Add in 14% spectral uncertainty
rfch4_sd=np.sqrt((rfch4*0.14)**2+(rfch4_sd)**2)
em_co2 = np.zeros(nspec)
em_co2[[i_ch4, i_hc, i_voc]] = [6.6, 0.02, 26.]
# From MAGICC input files
# CH4 HC VOC, CO CO2 scalings applied of 75%, 100%, 50%, 100%
# Assume 88% of CH4 emitted oxidised (12% remains as CH4)
# Assume can attributed present day CO2 change by scaling cumulative emissions
co2 = (em_co2/tot_em_co2)*(co2_2014-co2_1850)
rfco2=np.zeros(nspec)
for ispec in np.arange(nspec):
rfco2[ispec] = \
co2_forcing_AR6(co2_2014, co2_2014-co2[ispec], n2o_2014)* \
(1+co2_ra)
# co2 contribution from direct co2 emissions
rfco2_co2 = co2_forcing_AR6(co2_2014, co2_1850, n2o_2014)*(1+co2_ra) \
-np.sum(rfco2) # Subtract off non-co2 carbon contributions
#Set up WMGHG direct ERFs
rfghg = np.zeros(nspec)
rfghg[i_ch4] = ch4_erf
rfghg[i_n2o] = n2o_erf
rfghg[i_hc] = hc_erf
# subtract sum of lifetime terms from rfghg[i_ch4]
# - ensures total sum of methane rf is ch4_erf
rfghg[i_ch4] -= np.sum(rfch4)
#Aerosols
#Set indicies
i_bc = np.where(data['Experiment']=='BC')[0][0]
i_oc = np.where(data['Experiment']=='OC')[0][0]
i_so2 = np.where(data['Experiment']=='SO2')[0][0]
i_nh3 = np.where(data['Experiment']=='NH3')[0][0]
i_aer = np.array([i_bc, i_oc, i_so2, i_nh3]) # all aerosols
i_scat = np.array([i_oc, i_so2, i_nh3]) # scattering aerosols
#Set aerosol ari to be erf-ac to ensure components add to erf
ari[i_aer] = erf[i_aer]-ac[i_aer]
ari_sd[i_aer] = np.sqrt(erf_sd[i_aer]**2 +ac_sd[i_aer]**2)
# scale SO2+OC to get total ari
irf_ari_scat = irf_ari-ari[i_bc] # Set non-BC ari to 7.3.3 FGD
ari_scat = np.sum(ari[i_scat])
ari[i_scat] = ari[i_scat]*irf_ari_scat/ari_scat
ari_sd[i_scat] = ari_sd[i_scat]*irf_ari_scat/ari_scat
# scale aci to get total aci from 7.3.3
total_aci = np.sum(ac[i_aer])
ac[i_aer] = ac[i_aer]*erf_aci/total_aci
ac_sd[i_aer] = ac_sd[i_aer]*erf_aci/total_aci
#Scale everything to table 7.8
scale_co2 = co2_erf_AR6/(rfco2_co2+np.sum(rfco2))
rfco2_co2 *= scale_co2
scale_ch4 = ch4_erf_AR6/(rfghg[i_ch4]+np.sum(rfch4))
rfghg[i_ch4] *= scale_ch4
rfch4 *= scale_ch4
rfch4_sd *= scale_ch4
scale_n2o = n2o_erf_AR6/rfghg[i_n2o]
rfghg[i_n2o] *= scale_n2o
scale_hc = hc_erf_AR6/rfghg[i_hc]
rfghg[i_hc] *= scale_hc
scale_o3 = o3_erf_AR6/(np.sum(rfo3)+np.sum(rfo3_prime))
rfo3 *= scale_o3
rfo3_sd *= scale_o3
rfo3_prime *= scale_o3
rfo3_prime_sd *= scale_o3
scale_ari = ari_erf_AR6/np.sum(ari)
ari_sd *= scale_ari
ari *= scale_ari
scale_aci = aci_erf_AR6/np.sum(ac[i_aer])
ac[i_aer] *= scale_aci
ac_sd[i_aer] *= scale_aci
rfghg_sd = rfghg*0.14 # assume 14% for all WMGHGs
table = np.zeros(nspec+1,
dtype={'names':
['Species', 'CO2', 'GHG', 'CH4_lifetime', 'O3',
'O3_prime', 'Strat_H2O', 'Aerosol', 'Cloud', 'Total'],
'formats':
['U20', 'f8', 'f8', 'f8', 'f8',
'f8', 'f8', 'f8', 'f8', 'f8']})
table_sd = np.zeros(nspec+1,
dtype={'names':
['Species', 'CO2_sd', 'GHG_sd', 'CH4_lifetime_sd',
'O3_sd', 'O3_prime_sd', 'Strat_H2O_sd',
'Aerosol_sd', 'Cloud_sd', 'Total_sd'],
'formats':
['U20', 'f8', 'f8', 'f8', 'f8',
'f8', 'f8', 'f8', 'f8', 'f8']})
table['Species'][0] = 'CO2'
table['CO2'][0] = rfco2_co2
table['Total'][0] = rfco2_co2
table_sd['Species'][0] = 'CO2'
table_sd['CO2_sd'][0] = rfco2_co2*0.12 # 12% uncertainty
table_sd['Total_sd'][0] = rfco2_co2*0.12
for ispec in np.arange(nspec):
table['Species'][ispec+1] = data['Experiment'][ispec]
table['CO2'][ispec+1] = rfco2[ispec]
table['GHG'][ispec+1] = rfghg[ispec]
table['CH4_lifetime'][ispec+1] = rfch4[ispec]
table['O3'][ispec+1] = rfo3[ispec]
table['O3_prime'][ispec+1] = rfo3_prime[ispec]
table['Aerosol'][ispec+1] = ari[ispec]
table['Cloud'][ispec+1] = ac[ispec]
table['Total'][ispec+1] = np.sum([rfco2[ispec], rfghg[ispec], rfch4[ispec],
rfo3[ispec], rfo3_prime[ispec], ari[ispec], ac[ispec]])
table_sd['Species'][ispec+1] = data['Experiment'][ispec]
table_sd['CO2_sd'][ispec+1] = rfco2[ispec]*0.12
table_sd['GHG_sd'][ispec+1] = rfghg_sd[ispec]
table_sd['CH4_lifetime_sd'][ispec+1] = rfch4_sd[ispec]
table_sd['O3_sd'][ispec+1] = rfo3_sd[ispec]
table_sd['O3_prime_sd'][ispec+1] = rfo3_prime_sd[ispec]
table_sd['Aerosol_sd'][ispec+1] = ari_sd[ispec]
table_sd['Cloud_sd'][ispec+1] = ac_sd[ispec]
table_sd['Total_sd'][ispec+1] = np.sqrt(np.sum(np.square(
[rfco2[ispec]*0.12, rfghg_sd[ispec], rfch4_sd[ispec],
rfo3_sd[ispec]+rfo3_prime_sd[ispec], ari_sd[ispec], ac_sd[ispec]])))
table['Strat_H2O'][i_ch4+1] = 0.05
table['Total'][i_ch4+1] += 0.05
table_sd['Strat_H2O_sd'][i_ch4+1] = 0.05
table_sd['Total_sd'][i_ch4+1] = np.sqrt(np.sum(np.square(
[rfco2[i_ch4]*0.12, rfghg_sd[i_ch4]+rfch4_sd[i_ch4],
rfo3_sd[i_ch4]+rfo3_prime_sd[i_ch4], 0.05,
ari_sd[i_ch4], ac_sd[i_ch4]])))
np.savetxt("attribution_output_1750_2019.csv", table, delimiter=',',
fmt='%15s'+9*', %8.3f',
header=','.join(table.dtype.names))
np.savetxt("attribution_output_1750_2019.csv_sd.csv", table_sd, delimiter=',',
fmt='%15s'+9*', %8.3f',
header=','.join(table_sd.dtype.names))
plt.figure()
width = 0.7
species =[r'CO$_2$', r'CH$_4$', r'N$_2$O', 'Halocarbon', r'NO$_X$', 'VOC', r'SO$_2$',
'Organic Carbon', 'Black Carbon', 'Ammonia']
exp_list = \
np.array([i_ch4, i_n2o, i_hc, i_nox, i_voc, i_so2, i_oc, i_bc, i_nh3])
ybar = np.arange(nspec+1, 0, -1)
labels = [r'CO$_2$', 'WMGHG', r'CH$_4$ lifetime', r'O$_3$', 'Aerosol (ari)', 'Cloud']
pos_ghg = np.zeros(nspec+1)
pos_ch4 = np.zeros(nspec+1)
pos_o3 = np.zeros(nspec+1)
pos_aer = np.zeros(nspec+1)
pos_cloud = np.zeros(nspec+1)
pos_h2o = np.zeros(nspec+1)
pos_co2 = np.zeros(nspec+1)
neg_ch4 = np.zeros(nspec+1)
neg_o3 = np.zeros(nspec+1)
neg_aer = np.zeros(nspec+1)
neg_cloud = np.zeros(nspec+1)
#CO2
pos_co2[0] =rfco2_co2 ; pos_ghg[0] = pos_co2[0] ; pos_ch4[0] = pos_co2[0]
pos_o3[0]=pos_co2[0]; pos_h2o[0] = pos_co2[0]
pos_aer[0] = pos_co2[0]; pos_cloud[0] = pos_co2[0]
#print(pos_ghg)
# Gases
pos_co2[i_gas+1] = rfco2[i_gas]
pos_ghg[i_gas+1] = pos_co2[i_gas+1]+rfghg[i_gas]
#print(pos_ghg)
pos_ch4[i_gas+1] = pos_ghg[i_gas+1]+ \
np.maximum(rfch4[i_gas], 0.)
neg_ch4[i_gas+1] = np.minimum(rfch4[i_gas], 0.)
pos_o3[i_gas+1] = pos_ch4[i_gas+1]+ \
np.maximum(rfo3[i_gas]+rfo3_prime[i_gas], 0.)
neg_o3[i_gas+1] = neg_ch4[i_gas+1]+ \
np.minimum(rfo3[i_gas]+rfo3_prime[i_gas], 0.)
pos_h2o[i_gas+1] = pos_o3[i_gas+1]
pos_h2o[i_ch4+1] += 0.05 # AR6 FGD
pos_aer[i_gas+1] = pos_h2o[i_gas+1]+ \
np.maximum(ari[i_gas], 0.)
neg_aer[i_gas+1] = neg_o3[i_gas+1]+ \
np.minimum(ari[i_gas], 0.)
pos_cloud[i_gas+1] = pos_aer[i_gas+1]+ \
np.maximum(ac[i_gas], 0.)
neg_cloud[i_gas+1] = neg_aer[i_gas+1]+ \
np.minimum(ac[i_gas], 0.)
#Aerosols
pos_aer[i_aer+1] = np.maximum(ari[i_aer], 0.)
neg_aer[i_aer+1] = np.minimum(ari[i_aer], 0.)
pos_cloud[i_aer+1] = pos_aer[i_aer+1]+ \
np.maximum(ac[i_aer], 0.)
neg_cloud[i_aer+1] = neg_aer[i_aer+1]+ \
np.minimum(ac[i_aer], 0.)
error = np.zeros(nspec+1)
error[0] = co2[0]*0.12 # 12% uncertainty
error[i_ch4+1] = np.sqrt((rfghg_sd[i_ch4]+rfch4_sd[i_ch4])**2+ # CH4
(rfo3_sd[i_ch4]+rfo3_prime_sd[i_ch4])**2+ # O3
0.05**2+ # Strat H2O
ari_sd[i_ch4]**2+
ac_sd[i_ch4]**2)
error[i_non_ch4+1] = np.sqrt(rfghg_sd[i_non_ch4]**2+
rfch4_sd[i_non_ch4]**2+
(rfo3_sd[i_non_ch4]+rfo3_prime_sd[i_non_ch4])**2+
ari_sd[i_non_ch4]**2+
ac_sd[i_non_ch4]**2)
error[i_aer+1] = np.sqrt(ari_sd[i_aer]**2+
ac_sd[i_aer]**2)
plt.barh(ybar, pos_co2, width, color='grey', label=labels[0])
plt.barh(ybar, pos_ghg-pos_co2, width, left=pos_co2, color='darkred', label=labels[1])
plt.barh(ybar, pos_ch4-pos_ghg, width, left=pos_ghg, color='red', label=labels[2])
plt.barh(ybar, pos_o3-pos_ch4, width, left=pos_ch4, color='green', label=labels[3])
plt.barh(ybar, pos_h2o-pos_o3, width, left=pos_o3, color='darkblue', label=r'H$_2$O(strat)')
plt.barh(ybar, pos_aer-pos_h2o, width, left=pos_h2o, color='blue', label=labels[4])
plt.barh(ybar, pos_cloud-pos_aer, width, left=pos_aer, color='lightblue', label=labels[5])
plt.barh(ybar, neg_ch4, width, color='red')
plt.barh(ybar, neg_o3-neg_ch4, width, left=neg_ch4, color='green')
plt.barh(ybar, neg_aer-neg_o3, width, left=neg_o3, color='blue')
plt.barh(ybar, neg_cloud-neg_aer, width, left=neg_aer, color='lightblue')
plt.errorbar(pos_cloud+neg_cloud,ybar, marker='x', linestyle='None', color='k', label='sum', xerr=error)
plt.yticks([])
for i in np.arange(nspec+1):
plt.text(-1.55, ybar[i], species[i])
plt.title('Components of 1750 to 2019 forcing')
plt.xlabel(r'W m$^{-2}$')
plt.xlim(-1.6, 2.0)
plt.legend(loc='lower right')
plt.axvline(x=0., color='k', linewidth=0.25)
plt.show()
| [
"numpy.sqrt",
"numpy.array",
"ch4_forcing_AR6.ch4_forcing_AR6",
"matplotlib.pyplot.errorbar",
"co2_forcing_AR6.co2_forcing_AR6",
"numpy.genfromtxt",
"numpy.arange",
"n2o_forcing_AR6.n2o_forcing_AR6",
"numpy.where",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks... | [((1223, 1323), 'numpy.genfromtxt', 'np.genfromtxt', (['"""attribution_input.csv"""'], {'delimiter': '""","""', 'filling_values': '(0)', 'names': '(True)', 'dtype': 'dtype'}), "('attribution_input.csv', delimiter=',', filling_values=0,\n names=True, dtype=dtype)\n", (1236, 1323), True, 'import numpy as np\n'), ((1353, 1456), 'numpy.genfromtxt', 'np.genfromtxt', (['"""attribution_input_sd.csv"""'], {'delimiter': '""","""', 'filling_values': '(0)', 'names': '(True)', 'dtype': 'dtype'}), "('attribution_input_sd.csv', delimiter=',', filling_values=0,\n names=True, dtype=dtype)\n", (1366, 1456), True, 'import numpy as np\n'), ((1979, 2023), 'numpy.array', 'np.array', (['[i_ch4, i_n2o, i_hc, i_nox, i_voc]'], {}), '([i_ch4, i_n2o, i_hc, i_nox, i_voc])\n', (1987, 2023), True, 'import numpy as np\n'), ((2036, 2073), 'numpy.array', 'np.array', (['[i_n2o, i_hc, i_nox, i_voc]'], {}), '([i_n2o, i_hc, i_nox, i_voc])\n', (2044, 2073), True, 'import numpy as np\n'), ((2086, 2098), 'numpy.sum', 'np.sum', (['rfo3'], {}), '(rfo3)\n', (2092, 2098), True, 'import numpy as np\n'), ((2231, 2266), 'numpy.where', 'np.where', (['(lifech4 == 0)', '(0.0)', 'ch4_sd'], {}), '(lifech4 == 0, 0.0, ch4_sd)\n', (2239, 2266), True, 'import numpy as np\n'), ((2469, 2567), 'numpy.sqrt', 'np.sqrt', (['((rfo3perch4_sd * (ch4 - ch4_2014)) ** 2 + (rfo3perch4 * (ch4 - ch4_2014) *\n 0.15) ** 2)'], {}), '((rfo3perch4_sd * (ch4 - ch4_2014)) ** 2 + (rfo3perch4 * (ch4 -\n ch4_2014) * 0.15) ** 2)\n', (2476, 2567), True, 'import numpy as np\n'), ((2739, 2757), 'numpy.sum', 'np.sum', (['rfo3_prime'], {}), '(rfo3_prime)\n', (2745, 2757), True, 'import numpy as np\n'), ((2781, 2796), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (2789, 2796), True, 'import numpy as np\n'), ((2808, 2823), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (2816, 2823), True, 'import numpy as np\n'), ((2837, 2853), 'numpy.arange', 'np.arange', (['nspec'], {}), '(nspec)\n', (2846, 2853), True, 'import numpy as np\n'), ((3126, 3170), 'numpy.sqrt', 'np.sqrt', (['((rfch4 * 0.14) ** 2 + rfch4_sd ** 2)'], {}), '((rfch4 * 0.14) ** 2 + rfch4_sd ** 2)\n', (3133, 3170), True, 'import numpy as np\n'), ((3176, 3191), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (3184, 3191), True, 'import numpy as np\n'), ((3519, 3534), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (3527, 3534), True, 'import numpy as np\n'), ((3548, 3564), 'numpy.arange', 'np.arange', (['nspec'], {}), '(nspec)\n', (3557, 3564), True, 'import numpy as np\n'), ((3896, 3911), 'numpy.zeros', 'np.zeros', (['nspec'], {}), '(nspec)\n', (3904, 3911), True, 'import numpy as np\n'), ((4094, 4107), 'numpy.sum', 'np.sum', (['rfch4'], {}), '(rfch4)\n', (4100, 4107), True, 'import numpy as np\n'), ((4337, 4373), 'numpy.array', 'np.array', (['[i_bc, i_oc, i_so2, i_nh3]'], {}), '([i_bc, i_oc, i_so2, i_nh3])\n', (4345, 4373), True, 'import numpy as np\n'), ((4398, 4428), 'numpy.array', 'np.array', (['[i_oc, i_so2, i_nh3]'], {}), '([i_oc, i_so2, i_nh3])\n', (4406, 4428), True, 'import numpy as np\n'), ((4564, 4611), 'numpy.sqrt', 'np.sqrt', (['(erf_sd[i_aer] ** 2 + ac_sd[i_aer] ** 2)'], {}), '(erf_sd[i_aer] ** 2 + ac_sd[i_aer] ** 2)\n', (4571, 4611), True, 'import numpy as np\n'), ((4715, 4734), 'numpy.sum', 'np.sum', (['ari[i_scat]'], {}), '(ari[i_scat])\n', (4721, 4734), True, 'import numpy as np\n'), ((4890, 4907), 'numpy.sum', 'np.sum', (['ac[i_aer]'], {}), '(ac[i_aer])\n', (4896, 4907), True, 'import numpy as np\n'), ((5732, 5957), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {'dtype': "{'names': ['Species', 'CO2', 'GHG', 'CH4_lifetime', 'O3', 'O3_prime',\n 'Strat_H2O', 'Aerosol', 'Cloud', 'Total'], 'formats': ['U20', 'f8',\n 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8']}"}), "(nspec + 1, dtype={'names': ['Species', 'CO2', 'GHG',\n 'CH4_lifetime', 'O3', 'O3_prime', 'Strat_H2O', 'Aerosol', 'Cloud',\n 'Total'], 'formats': ['U20', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8',\n 'f8', 'f8']})\n", (5740, 5957), True, 'import numpy as np\n'), ((6110, 6362), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {'dtype': "{'names': ['Species', 'CO2_sd', 'GHG_sd', 'CH4_lifetime_sd', 'O3_sd',\n 'O3_prime_sd', 'Strat_H2O_sd', 'Aerosol_sd', 'Cloud_sd', 'Total_sd'],\n 'formats': ['U20', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8', 'f8']}"}), "(nspec + 1, dtype={'names': ['Species', 'CO2_sd', 'GHG_sd',\n 'CH4_lifetime_sd', 'O3_sd', 'O3_prime_sd', 'Strat_H2O_sd', 'Aerosol_sd',\n 'Cloud_sd', 'Total_sd'], 'formats': ['U20', 'f8', 'f8', 'f8', 'f8',\n 'f8', 'f8', 'f8', 'f8', 'f8']})\n", (6118, 6362), True, 'import numpy as np\n'), ((6783, 6799), 'numpy.arange', 'np.arange', (['nspec'], {}), '(nspec)\n', (6792, 6799), True, 'import numpy as np\n'), ((8599, 8611), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8609, 8611), True, 'import matplotlib.pyplot as plt\n'), ((8782, 8852), 'numpy.array', 'np.array', (['[i_ch4, i_n2o, i_hc, i_nox, i_voc, i_so2, i_oc, i_bc, i_nh3]'], {}), '([i_ch4, i_n2o, i_hc, i_nox, i_voc, i_so2, i_oc, i_bc, i_nh3])\n', (8790, 8852), True, 'import numpy as np\n'), ((8860, 8887), 'numpy.arange', 'np.arange', (['(nspec + 1)', '(0)', '(-1)'], {}), '(nspec + 1, 0, -1)\n', (8869, 8887), True, 'import numpy as np\n'), ((8984, 9003), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (8992, 9003), True, 'import numpy as np\n'), ((9012, 9031), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9020, 9031), True, 'import numpy as np\n'), ((9039, 9058), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9047, 9058), True, 'import numpy as np\n'), ((9067, 9086), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9075, 9086), True, 'import numpy as np\n'), ((9097, 9116), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9105, 9116), True, 'import numpy as np\n'), ((9125, 9144), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9133, 9144), True, 'import numpy as np\n'), ((9153, 9172), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9161, 9172), True, 'import numpy as np\n'), ((9181, 9200), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9189, 9200), True, 'import numpy as np\n'), ((9208, 9227), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9216, 9227), True, 'import numpy as np\n'), ((9236, 9255), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9244, 9255), True, 'import numpy as np\n'), ((9266, 9285), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (9274, 9285), True, 'import numpy as np\n'), ((9688, 9717), 'numpy.minimum', 'np.minimum', (['rfch4[i_gas]', '(0.0)'], {}), '(rfch4[i_gas], 0.0)\n', (9698, 9717), True, 'import numpy as np\n'), ((10366, 10393), 'numpy.maximum', 'np.maximum', (['ari[i_aer]', '(0.0)'], {}), '(ari[i_aer], 0.0)\n', (10376, 10393), True, 'import numpy as np\n'), ((10412, 10439), 'numpy.minimum', 'np.minimum', (['ari[i_aer]', '(0.0)'], {}), '(ari[i_aer], 0.0)\n', (10422, 10439), True, 'import numpy as np\n'), ((10625, 10644), 'numpy.zeros', 'np.zeros', (['(nspec + 1)'], {}), '(nspec + 1)\n', (10633, 10644), True, 'import numpy as np\n'), ((10701, 10859), 'numpy.sqrt', 'np.sqrt', (['((rfghg_sd[i_ch4] + rfch4_sd[i_ch4]) ** 2 + (rfo3_sd[i_ch4] + rfo3_prime_sd\n [i_ch4]) ** 2 + 0.05 ** 2 + ari_sd[i_ch4] ** 2 + ac_sd[i_ch4] ** 2)'], {}), '((rfghg_sd[i_ch4] + rfch4_sd[i_ch4]) ** 2 + (rfo3_sd[i_ch4] +\n rfo3_prime_sd[i_ch4]) ** 2 + 0.05 ** 2 + ari_sd[i_ch4] ** 2 + ac_sd[\n i_ch4] ** 2)\n', (10708, 10859), True, 'import numpy as np\n'), ((11012, 11186), 'numpy.sqrt', 'np.sqrt', (['(rfghg_sd[i_non_ch4] ** 2 + rfch4_sd[i_non_ch4] ** 2 + (rfo3_sd[i_non_ch4] +\n rfo3_prime_sd[i_non_ch4]) ** 2 + ari_sd[i_non_ch4] ** 2 + ac_sd[\n i_non_ch4] ** 2)'], {}), '(rfghg_sd[i_non_ch4] ** 2 + rfch4_sd[i_non_ch4] ** 2 + (rfo3_sd[\n i_non_ch4] + rfo3_prime_sd[i_non_ch4]) ** 2 + ari_sd[i_non_ch4] ** 2 + \n ac_sd[i_non_ch4] ** 2)\n', (11019, 11186), True, 'import numpy as np\n'), ((11294, 11341), 'numpy.sqrt', 'np.sqrt', (['(ari_sd[i_aer] ** 2 + ac_sd[i_aer] ** 2)'], {}), '(ari_sd[i_aer] ** 2 + ac_sd[i_aer] ** 2)\n', (11301, 11341), True, 'import numpy as np\n'), ((11363, 11424), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', 'pos_co2', 'width'], {'color': '"""grey"""', 'label': 'labels[0]'}), "(ybar, pos_co2, width, color='grey', label=labels[0])\n", (11371, 11424), True, 'import matplotlib.pyplot as plt\n'), ((11425, 11517), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(pos_ghg - pos_co2)', 'width'], {'left': 'pos_co2', 'color': '"""darkred"""', 'label': 'labels[1]'}), "(ybar, pos_ghg - pos_co2, width, left=pos_co2, color='darkred',\n label=labels[1])\n", (11433, 11517), True, 'import matplotlib.pyplot as plt\n'), ((11512, 11601), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(pos_ch4 - pos_ghg)', 'width'], {'left': 'pos_ghg', 'color': '"""red"""', 'label': 'labels[2]'}), "(ybar, pos_ch4 - pos_ghg, width, left=pos_ghg, color='red', label=\n labels[2])\n", (11520, 11601), True, 'import matplotlib.pyplot as plt\n'), ((11595, 11685), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(pos_o3 - pos_ch4)', 'width'], {'left': 'pos_ch4', 'color': '"""green"""', 'label': 'labels[3]'}), "(ybar, pos_o3 - pos_ch4, width, left=pos_ch4, color='green', label=\n labels[3])\n", (11603, 11685), True, 'import matplotlib.pyplot as plt\n'), ((11679, 11776), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(pos_h2o - pos_o3)', 'width'], {'left': 'pos_o3', 'color': '"""darkblue"""', 'label': '"""H$_2$O(strat)"""'}), "(ybar, pos_h2o - pos_o3, width, left=pos_o3, color='darkblue',\n label='H$_2$O(strat)')\n", (11687, 11776), True, 'import matplotlib.pyplot as plt\n'), ((11772, 11862), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(pos_aer - pos_h2o)', 'width'], {'left': 'pos_h2o', 'color': '"""blue"""', 'label': 'labels[4]'}), "(ybar, pos_aer - pos_h2o, width, left=pos_h2o, color='blue', label=\n labels[4])\n", (11780, 11862), True, 'import matplotlib.pyplot as plt\n'), ((11856, 11952), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(pos_cloud - pos_aer)', 'width'], {'left': 'pos_aer', 'color': '"""lightblue"""', 'label': 'labels[5]'}), "(ybar, pos_cloud - pos_aer, width, left=pos_aer, color='lightblue',\n label=labels[5])\n", (11864, 11952), True, 'import matplotlib.pyplot as plt\n'), ((11947, 11990), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', 'neg_ch4', 'width'], {'color': '"""red"""'}), "(ybar, neg_ch4, width, color='red')\n", (11955, 11990), True, 'import matplotlib.pyplot as plt\n'), ((11991, 12059), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(neg_o3 - neg_ch4)', 'width'], {'left': 'neg_ch4', 'color': '"""green"""'}), "(ybar, neg_o3 - neg_ch4, width, left=neg_ch4, color='green')\n", (11999, 12059), True, 'import matplotlib.pyplot as plt\n'), ((12058, 12124), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(neg_aer - neg_o3)', 'width'], {'left': 'neg_o3', 'color': '"""blue"""'}), "(ybar, neg_aer - neg_o3, width, left=neg_o3, color='blue')\n", (12066, 12124), True, 'import matplotlib.pyplot as plt\n'), ((12123, 12198), 'matplotlib.pyplot.barh', 'plt.barh', (['ybar', '(neg_cloud - neg_aer)', 'width'], {'left': 'neg_aer', 'color': '"""lightblue"""'}), "(ybar, neg_cloud - neg_aer, width, left=neg_aer, color='lightblue')\n", (12131, 12198), True, 'import matplotlib.pyplot as plt\n'), ((12197, 12308), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['(pos_cloud + neg_cloud)', 'ybar'], {'marker': '"""x"""', 'linestyle': '"""None"""', 'color': '"""k"""', 'label': '"""sum"""', 'xerr': 'error'}), "(pos_cloud + neg_cloud, ybar, marker='x', linestyle='None',\n color='k', label='sum', xerr=error)\n", (12209, 12308), True, 'import matplotlib.pyplot as plt\n'), ((12302, 12316), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (12312, 12316), True, 'import matplotlib.pyplot as plt\n'), ((12326, 12346), 'numpy.arange', 'np.arange', (['(nspec + 1)'], {}), '(nspec + 1)\n', (12335, 12346), True, 'import numpy as np\n'), ((12387, 12434), 'matplotlib.pyplot.title', 'plt.title', (['"""Components of 1750 to 2019 forcing"""'], {}), "('Components of 1750 to 2019 forcing')\n", (12396, 12434), True, 'import matplotlib.pyplot as plt\n'), ((12435, 12459), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""W m$^{-2}$"""'], {}), "('W m$^{-2}$')\n", (12445, 12459), True, 'import matplotlib.pyplot as plt\n'), ((12461, 12480), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.6)', '(2.0)'], {}), '(-1.6, 2.0)\n', (12469, 12480), True, 'import matplotlib.pyplot as plt\n'), ((12481, 12510), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (12491, 12510), True, 'import matplotlib.pyplot as plt\n'), ((12511, 12556), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0.0)', 'color': '"""k"""', 'linewidth': '(0.25)'}), "(x=0.0, color='k', linewidth=0.25)\n", (12522, 12556), True, 'import matplotlib.pyplot as plt\n'), ((12557, 12567), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12565, 12567), True, 'import matplotlib.pyplot as plt\n'), ((813, 858), 'ch4_forcing_AR6.ch4_forcing_AR6', 'ch4_forcing_AR6', (['ch4_2014', 'ch4_1850', 'n2o_1850'], {}), '(ch4_2014, ch4_1850, n2o_1850)\n', (828, 858), False, 'from ch4_forcing_AR6 import ch4_forcing_AR6\n'), ((880, 935), 'n2o_forcing_AR6.n2o_forcing_AR6', 'n2o_forcing_AR6', (['n2o_2014', 'n2o_1850', 'co2_1850', 'ch4_1850'], {}), '(n2o_2014, n2o_1850, co2_1850, ch4_1850)\n', (895, 935), False, 'from n2o_forcing_AR6 import n2o_forcing_AR6\n'), ((3803, 3816), 'numpy.sum', 'np.sum', (['rfco2'], {}), '(rfco2)\n', (3809, 3816), True, 'import numpy as np\n'), ((5531, 5542), 'numpy.sum', 'np.sum', (['ari'], {}), '(ari)\n', (5537, 5542), True, 'import numpy as np\n'), ((5605, 5622), 'numpy.sum', 'np.sum', (['ac[i_aer]'], {}), '(ac[i_aer])\n', (5611, 5622), True, 'import numpy as np\n'), ((7194, 7304), 'numpy.sum', 'np.sum', (['[rfco2[ispec], rfghg[ispec], rfch4[ispec], rfo3[ispec], rfo3_prime[ispec],\n ari[ispec], ac[ispec]]'], {}), '([rfco2[ispec], rfghg[ispec], rfch4[ispec], rfo3[ispec], rfo3_prime[\n ispec], ari[ispec], ac[ispec]])\n', (7200, 7304), True, 'import numpy as np\n'), ((9640, 9669), 'numpy.maximum', 'np.maximum', (['rfch4[i_gas]', '(0.0)'], {}), '(rfch4[i_gas], 0.0)\n', (9650, 9669), True, 'import numpy as np\n'), ((9773, 9821), 'numpy.maximum', 'np.maximum', (['(rfo3[i_gas] + rfo3_prime[i_gas])', '(0.0)'], {}), '(rfo3[i_gas] + rfo3_prime[i_gas], 0.0)\n', (9783, 9821), True, 'import numpy as np\n'), ((9875, 9923), 'numpy.minimum', 'np.minimum', (['(rfo3[i_gas] + rfo3_prime[i_gas])', '(0.0)'], {}), '(rfo3[i_gas] + rfo3_prime[i_gas], 0.0)\n', (9885, 9923), True, 'import numpy as np\n'), ((10049, 10076), 'numpy.maximum', 'np.maximum', (['ari[i_gas]', '(0.0)'], {}), '(ari[i_gas], 0.0)\n', (10059, 10076), True, 'import numpy as np\n'), ((10133, 10160), 'numpy.minimum', 'np.minimum', (['ari[i_gas]', '(0.0)'], {}), '(ari[i_gas], 0.0)\n', (10143, 10160), True, 'import numpy as np\n'), ((10222, 10248), 'numpy.maximum', 'np.maximum', (['ac[i_gas]', '(0.0)'], {}), '(ac[i_gas], 0.0)\n', (10232, 10248), True, 'import numpy as np\n'), ((10310, 10336), 'numpy.minimum', 'np.minimum', (['ac[i_gas]', '(0.0)'], {}), '(ac[i_gas], 0.0)\n', (10320, 10336), True, 'import numpy as np\n'), ((10501, 10527), 'numpy.maximum', 'np.maximum', (['ac[i_aer]', '(0.0)'], {}), '(ac[i_aer], 0.0)\n', (10511, 10527), True, 'import numpy as np\n'), ((10589, 10615), 'numpy.minimum', 'np.minimum', (['ac[i_aer]', '(0.0)'], {}), '(ac[i_aer], 0.0)\n', (10599, 10615), True, 'import numpy as np\n'), ((12350, 12386), 'matplotlib.pyplot.text', 'plt.text', (['(-1.55)', 'ybar[i]', 'species[i]'], {}), '(-1.55, ybar[i], species[i])\n', (12358, 12386), True, 'import matplotlib.pyplot as plt\n'), ((1731, 1768), 'numpy.where', 'np.where', (["(data['Experiment'] == 'CH4')"], {}), "(data['Experiment'] == 'CH4')\n", (1739, 1768), True, 'import numpy as np\n'), ((1781, 1818), 'numpy.where', 'np.where', (["(data['Experiment'] == 'NOx')"], {}), "(data['Experiment'] == 'NOx')\n", (1789, 1818), True, 'import numpy as np\n'), ((1831, 1868), 'numpy.where', 'np.where', (["(data['Experiment'] == 'VOC')"], {}), "(data['Experiment'] == 'VOC')\n", (1839, 1868), True, 'import numpy as np\n'), ((1881, 1918), 'numpy.where', 'np.where', (["(data['Experiment'] == 'N2O')"], {}), "(data['Experiment'] == 'N2O')\n", (1889, 1918), True, 'import numpy as np\n'), ((1930, 1966), 'numpy.where', 'np.where', (["(data['Experiment'] == 'HC')"], {}), "(data['Experiment'] == 'HC')\n", (1938, 1966), True, 'import numpy as np\n'), ((2884, 2931), 'ch4_forcing_AR6.ch4_forcing_AR6', 'ch4_forcing_AR6', (['ch4[ispec]', 'ch4_2014', 'n2o_2014'], {}), '(ch4[ispec], ch4_2014, n2o_2014)\n', (2899, 2931), False, 'from ch4_forcing_AR6 import ch4_forcing_AR6\n'), ((3595, 3653), 'co2_forcing_AR6.co2_forcing_AR6', 'co2_forcing_AR6', (['co2_2014', '(co2_2014 - co2[ispec])', 'n2o_2014'], {}), '(co2_2014, co2_2014 - co2[ispec], n2o_2014)\n', (3610, 3653), False, 'from co2_forcing_AR6 import co2_forcing_AR6\n'), ((3731, 3776), 'co2_forcing_AR6.co2_forcing_AR6', 'co2_forcing_AR6', (['co2_2014', 'co2_1850', 'n2o_2014'], {}), '(co2_2014, co2_1850, n2o_2014)\n', (3746, 3776), False, 'from co2_forcing_AR6 import co2_forcing_AR6\n'), ((4140, 4176), 'numpy.where', 'np.where', (["(data['Experiment'] == 'BC')"], {}), "(data['Experiment'] == 'BC')\n", (4148, 4176), True, 'import numpy as np\n'), ((4188, 4224), 'numpy.where', 'np.where', (["(data['Experiment'] == 'OC')"], {}), "(data['Experiment'] == 'OC')\n", (4196, 4224), True, 'import numpy as np\n'), ((4237, 4274), 'numpy.where', 'np.where', (["(data['Experiment'] == 'SO2')"], {}), "(data['Experiment'] == 'SO2')\n", (4245, 4274), True, 'import numpy as np\n'), ((4287, 4324), 'numpy.where', 'np.where', (["(data['Experiment'] == 'NH3')"], {}), "(data['Experiment'] == 'NH3')\n", (4295, 4324), True, 'import numpy as np\n'), ((5061, 5074), 'numpy.sum', 'np.sum', (['rfco2'], {}), '(rfco2)\n', (5067, 5074), True, 'import numpy as np\n'), ((5138, 5151), 'numpy.sum', 'np.sum', (['rfch4'], {}), '(rfch4)\n', (5144, 5151), True, 'import numpy as np\n'), ((5381, 5393), 'numpy.sum', 'np.sum', (['rfo3'], {}), '(rfo3)\n', (5387, 5393), True, 'import numpy as np\n'), ((5394, 5412), 'numpy.sum', 'np.sum', (['rfo3_prime'], {}), '(rfo3_prime)\n', (5400, 5412), True, 'import numpy as np\n'), ((8126, 8272), 'numpy.square', 'np.square', (['[rfco2[i_ch4] * 0.12, rfghg_sd[i_ch4] + rfch4_sd[i_ch4], rfo3_sd[i_ch4] +\n rfo3_prime_sd[i_ch4], 0.05, ari_sd[i_ch4], ac_sd[i_ch4]]'], {}), '([rfco2[i_ch4] * 0.12, rfghg_sd[i_ch4] + rfch4_sd[i_ch4], rfo3_sd[\n i_ch4] + rfo3_prime_sd[i_ch4], 0.05, ari_sd[i_ch4], ac_sd[i_ch4]])\n', (8135, 8272), True, 'import numpy as np\n'), ((2986, 3049), 'ch4_forcing_AR6.ch4_forcing_AR6', 'ch4_forcing_AR6', (['(ch4[ispec] + ch4_sd[ispec])', 'ch4_2014', 'n2o_2014'], {}), '(ch4[ispec] + ch4_sd[ispec], ch4_2014, n2o_2014)\n', (3001, 3049), False, 'from ch4_forcing_AR6 import ch4_forcing_AR6\n'), ((7820, 7959), 'numpy.square', 'np.square', (['[rfco2[ispec] * 0.12, rfghg_sd[ispec], rfch4_sd[ispec], rfo3_sd[ispec] +\n rfo3_prime_sd[ispec], ari_sd[ispec], ac_sd[ispec]]'], {}), '([rfco2[ispec] * 0.12, rfghg_sd[ispec], rfch4_sd[ispec], rfo3_sd[\n ispec] + rfo3_prime_sd[ispec], ari_sd[ispec], ac_sd[ispec]])\n', (7829, 7959), True, 'import numpy as np\n')] |
"""
ๆๅคฑๅฝๆฐ loss
้ขๆตๅผ(predict)(y)ไธๅทฒ็ฅ็ญๆก(target)(y_)็ๅทฎ่ท
ๅๆน่ฏฏๅทฎ MSE mean-square error
MSE(y, y_) = sigma ((y - y_)^2 / n)
loss = tf.reduce_mean(tf.square(y, y_))
ๅๅไผ ๆญ BP back propagation
ไธบ่ฎญ็ปๆจกๅๅๆฐ, ๅจๆๆๅๆฐไธ็จๆขฏๅบฆไธ้, ไฝฟNNๆจกๅๅจ่ฎญ็ปๆฐๆฎไธ็ๆๅคฑๆๅฐ.
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
train_step = tf.train.MomentumOptimizer(learning_rate, momentum).minimize(loss)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
ๅญฆไน ็ LR learning rate
ๅญฆไน ็ ๅคง ๅญฆไน ็ ๅฐ
ๅญฆไน ้ๅบฆ ๅฟซ ๆ
ข
ไฝฟ็จๆถ้ด็น ๅๅผๅง่ฎญ็ปๆถ ไธๅฎ่ฝฎๆฐ่ฟๅ
ๅฏไฝ็จ 1.ๆๆๅคฑๅผ็็ธ; 2.ๆๆฏ่ก. 1.ๆ่ฟๆๅ; 2.ๆถๆ้ๅบฆๆ
ข.
ๅๅผๅง่ฎญ็ปๆถ: ๅญฆไน ็ไปฅ 0.01 ~ 0.001 ไธบๅฎ.
ไธๅฎ่ฝฎๆฐ่ฟๅ: ้ๆธๅ็ผ.
ๆฅ่ฟ่ฎญ็ป็ปๆ: ๅญฆไน ้็็่กฐๅๅบ่ฏฅๅจ100ๅไปฅไธ.
"""
import tensorflow as tf
import numpy as np
def back_propagation_test():
# ่ฎญ็ปๆฌกๆฐ
steps = 3000
# ๆฏๆฌกๅๅ
ฅๆฐๆฎๆฐ้
batch_size = 8
# ้ๆบ็งๅญ
seed = 8731
# ๅบไบseedไบง็้ๆบๆฐ
rng = np.random.RandomState(seed)
# ็ๆ32็ป้้ๅไฝ็งฏไฝไธบ่พๅ
ฅๆฐๆฎ้, 32่ก2ๅ็็ฉ้ต
mat_x = rng.rand(32, 2)
mat_y = []
# print(mat_x)
# ๅ่ฎพ"ไฝ็งฏ + ้้ < 1"็้ถไปถๅๆ ผ, ๆ้ mat_y. ไปXไธญๅๅบไธ่ก, ๅคๆญๅฆๆไธค่
็ๅๅฐไบ1, ็ปY่ตๅผ1, ๅฆๅ่ตๅผ0.
# ็ฅ็ป็ฝ็ปๅคๆญ็ไพๆฎๆฏ"ๆฐๆฎ"ๅ"ๆฆ็", ๅฎๅนถไธ็ฅ้ไบบไธบๆ ๆณจyๆฏ0ๆ1็ๆนๆณ.
# pythonic code: mat_y = [[int(x0 + x1 < 1)] for (x0, x1) in mat_x]
for x0, x1 in mat_x:
if x0 + x1 < 1:
mat_y.append([1])
else:
mat_y.append([0])
# print(mat_y)
# ๅๅไผ ๆญ
x = tf.placeholder(tf.float32, shape=(None, 2))
y_ = tf.placeholder(tf.float32, shape=(None, 1))
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
a = tf.matmul(x, w1)
y = tf.matmul(a, w2)
# ๅๅไผ ๆญ
loss = tf.reduce_mean(tf.square(y - y_))
lr = 0.001
train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)
# ่ฎญ็ป
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# ่พๅบๆช่ฎญ็ป็ๅๆฐๅๅผ
print("่ฎญ็ปๅ็็ปๆ")
print(sess.run(w1))
print(sess.run(w2))
for i in range(steps):
# ๆฐๆฎ้ๅชๆ32ไธช(่ก), ๆไปฅๅฏน32ๅไฝ, ่ฎฉstartๅจๆฐๆฎ้่ๅดๅ
, i * batch_size่ฎฉๆฏๆฌก่ฎญ็ป่ทจๅบฆbatch_sizeไธชๆฐๆฎ
start = (i * batch_size) % 32
end = start + batch_size
feeds = {
x: mat_x[start:end],
y_: mat_y[start:end]
}
# ๆฏๆฌกๅพช็ฏไธญ, ไปฃๅ
ฅ่พๅ
ฅ็นๅพ(data)ๅๆ ๅ็ญๆก(target)
sess.run(train_step, feed_dict=feeds)
if i % 500 == 0:
total_loss = sess.run(loss, feed_dict={x: mat_x, y_: mat_y})
print("ๅจ%dๆฌก่ฎญ็ปๅ, ๆๅคฑไธบ%g" % (i, total_loss))
print("่ฎญ็ปๅ็็ปๆ")
print(sess.run(w1))
print(sess.run(w2))
return None
if __name__ == '__main__':
back_propagation_test()
| [
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.matmul",
"tensorflow.square",
"numpy.random.RandomState"
] | [((902, 929), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (923, 929), True, 'import numpy as np\n'), ((1379, 1422), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 2)'}), '(tf.float32, shape=(None, 2))\n', (1393, 1422), True, 'import tensorflow as tf\n'), ((1432, 1475), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)'}), '(tf.float32, shape=(None, 1))\n', (1446, 1475), True, 'import tensorflow as tf\n'), ((1616, 1632), 'tensorflow.matmul', 'tf.matmul', (['x', 'w1'], {}), '(x, w1)\n', (1625, 1632), True, 'import tensorflow as tf\n'), ((1641, 1657), 'tensorflow.matmul', 'tf.matmul', (['a', 'w2'], {}), '(a, w2)\n', (1650, 1657), True, 'import tensorflow as tf\n'), ((1498, 1540), 'tensorflow.random_normal', 'tf.random_normal', (['[2, 3]'], {'stddev': '(1)', 'seed': '(1)'}), '([2, 3], stddev=1, seed=1)\n', (1514, 1540), True, 'import tensorflow as tf\n'), ((1563, 1605), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 1]'], {'stddev': '(1)', 'seed': '(1)'}), '([3, 1], stddev=1, seed=1)\n', (1579, 1605), True, 'import tensorflow as tf\n'), ((1696, 1713), 'tensorflow.square', 'tf.square', (['(y - y_)'], {}), '(y - y_)\n', (1705, 1713), True, 'import tensorflow as tf\n'), ((1819, 1831), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1829, 1831), True, 'import tensorflow as tf\n'), ((1859, 1892), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1890, 1892), True, 'import tensorflow as tf\n'), ((1747, 1784), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['lr'], {}), '(lr)\n', (1780, 1784), True, 'import tensorflow as tf\n')] |
'''
Infers pseudo log likelihood approximations from ESM models.
'''
import argparse
from collections import defaultdict
import os
import pathlib
import numpy as np
import pandas as pd
import torch
from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, BatchConverter
from utils import read_fasta, save
from utils.esm_utils import PLLFastaBatchedDataset, PLLBatchConverter
criterion = torch.nn.CrossEntropyLoss(reduction='none')
def create_parser():
parser = argparse.ArgumentParser(
description="Extract per-token representations and model outputs for sequences in a FASTA file" # noqa
)
parser.add_argument(
"fasta_file",
type=pathlib.Path,
help="FASTA file on which to extract representations",
)
parser.add_argument(
"wt_fasta_file",
type=pathlib.Path,
help="FASTA file for WT",
)
parser.add_argument(
"output_dir",
type=pathlib.Path,
help="output dir",
)
parser.add_argument(
"--model_location",
type=str,
help="model location",
default="/mnt/esm_weights/esm1b/esm1b_t33_650M_UR50S.pt"
)
parser.add_argument(
"--toks_per_batch", type=int, default=4096, help="maximum batch size"
)
parser.add_argument("--msa_transformer", action="store_true")
parser.add_argument("--save_hidden", action="store_true", help="save rep")
parser.add_argument("--nogpu", action="store_true", help="Do not use GPU even if available")
return parser
def main(args):
model, alphabet = pretrained.load_model_and_alphabet(args.model_location)
#batch_converter = alphabet.get_batch_converter()
batch_converter = PLLBatchConverter(alphabet)
mask_idx = torch.tensor(alphabet.mask_idx)
padding_idx = torch.tensor(alphabet.padding_idx)
model.eval()
if torch.cuda.is_available() and not args.nogpu:
model = model.cuda()
mask_idx = mask_idx.cuda()
print("Transferred model to GPU")
wt = read_fasta(args.wt_fasta_file)[0]
wt_data = [("WT", wt, -1)]
_, _, wt_toks, _ = batch_converter(wt_data)
dataset = PLLFastaBatchedDataset.from_file(args.fasta_file, wt)
batches = dataset.get_batch_indices(args.toks_per_batch, extra_toks_per_seq=1)
data_loader = torch.utils.data.DataLoader(
dataset, collate_fn=batch_converter, batch_sampler=batches
)
print(f"Read {args.fasta_file} with {len(dataset)} sequences")
repr_layers = [model.num_layers] # extract last layer
pll_diff = defaultdict(list)
with torch.no_grad():
for batch_idx, (labels, strs, toks, mask_pos) in enumerate(data_loader):
if batch_idx % 100 == 0:
print(
f"Processing {batch_idx + 1} of {len(batches)} batches ({toks.size(0)} sequences)"
)
if torch.cuda.is_available() and not args.nogpu:
toks = toks.to(device="cuda", non_blocking=True)
mask_pos = mask_pos.to(device="cuda", non_blocking=True)
wt_toks = wt_toks.to(device="cuda", non_blocking=True)
mask = torch.zeros(toks.shape, dtype=torch.bool, device=toks.device)
row_idx = torch.arange(mask.size(0)).long()
mask[row_idx, mask_pos] = True
masked_toks = torch.where(mask, mask_idx, toks)
if args.msa_transformer:
masked_toks = torch.unsqueeze(masked_toks, 1)
out = model(masked_toks, repr_layers=repr_layers,
return_contacts=False)
logits = out["logits"]
if args.msa_transformer:
logits = torch.squeeze(logits, 1)
logits_tr = logits.transpose(1, 2) # [B, E, T]
loss = criterion(logits_tr, toks)
npll = torch.sum(mask * loss, dim=1).to(device="cpu").numpy()
wt_toks_rep = wt_toks.repeat(toks.shape[0], 1)
masked_wt_toks = torch.where(mask, mask_idx, wt_toks_rep)
if args.msa_transformer:
masked_wt_toks = torch.unsqueeze(masked_wt_toks, 1)
out = model(masked_wt_toks, repr_layers=repr_layers,
return_contacts=False)
logits = out["logits"]
if args.msa_transformer:
logits = torch.squeeze(logits, 1)
logits_tr = logits.transpose(1, 2) # [B, E, T]
loss_wt = criterion(logits_tr, wt_toks_rep)
npll_wt = torch.sum(mask * loss_wt, dim=1).to(
device="cpu").numpy()
for i, label in enumerate(labels):
pll_diff[label].append(npll_wt[i] - npll[i])
args.output_dir.mkdir(parents=True, exist_ok=True)
pll_diff = {k: np.sum(v) for k, v in pll_diff.items()}
df = pd.DataFrame.from_dict(pll_diff, columns=['pll'], orient='index')
df.to_csv(os.path.join(args.output_dir, 'pll.csv'), index=True)
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
main(args)
| [
"utils.esm_utils.PLLFastaBatchedDataset.from_file",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.sum",
"torch.squeeze",
"argparse.ArgumentParser",
"torch.unsqueeze",
"pandas.DataFrame.from_dict",
"esm.pretrained.load_model_and_alphabet",
"utils.esm_utils.PLLBatchConverter",
"ut... | [((412, 455), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (437, 455), False, 'import torch\n'), ((492, 622), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract per-token representations and model outputs for sequences in a FASTA file"""'}), "(description=\n 'Extract per-token representations and model outputs for sequences in a FASTA file'\n )\n", (515, 622), False, 'import argparse\n'), ((1584, 1639), 'esm.pretrained.load_model_and_alphabet', 'pretrained.load_model_and_alphabet', (['args.model_location'], {}), '(args.model_location)\n', (1618, 1639), False, 'from esm import Alphabet, FastaBatchedDataset, ProteinBertModel, pretrained, BatchConverter\n'), ((1716, 1743), 'utils.esm_utils.PLLBatchConverter', 'PLLBatchConverter', (['alphabet'], {}), '(alphabet)\n', (1733, 1743), False, 'from utils.esm_utils import PLLFastaBatchedDataset, PLLBatchConverter\n'), ((1759, 1790), 'torch.tensor', 'torch.tensor', (['alphabet.mask_idx'], {}), '(alphabet.mask_idx)\n', (1771, 1790), False, 'import torch\n'), ((1809, 1843), 'torch.tensor', 'torch.tensor', (['alphabet.padding_idx'], {}), '(alphabet.padding_idx)\n', (1821, 1843), False, 'import torch\n'), ((2159, 2212), 'utils.esm_utils.PLLFastaBatchedDataset.from_file', 'PLLFastaBatchedDataset.from_file', (['args.fasta_file', 'wt'], {}), '(args.fasta_file, wt)\n', (2191, 2212), False, 'from utils.esm_utils import PLLFastaBatchedDataset, PLLBatchConverter\n'), ((2314, 2405), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'collate_fn': 'batch_converter', 'batch_sampler': 'batches'}), '(dataset, collate_fn=batch_converter,\n batch_sampler=batches)\n', (2341, 2405), False, 'import torch\n'), ((2560, 2577), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2571, 2577), False, 'from collections import defaultdict\n'), ((4799, 4864), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['pll_diff'], {'columns': "['pll']", 'orient': '"""index"""'}), "(pll_diff, columns=['pll'], orient='index')\n", (4821, 4864), True, 'import pandas as pd\n'), ((1869, 1894), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1892, 1894), False, 'import torch\n'), ((2031, 2061), 'utils.read_fasta', 'read_fasta', (['args.wt_fasta_file'], {}), '(args.wt_fasta_file)\n', (2041, 2061), False, 'from utils import read_fasta, save\n'), ((2588, 2603), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2601, 2603), False, 'import torch\n'), ((4750, 4759), 'numpy.sum', 'np.sum', (['v'], {}), '(v)\n', (4756, 4759), True, 'import numpy as np\n'), ((4879, 4919), 'os.path.join', 'os.path.join', (['args.output_dir', '"""pll.csv"""'], {}), "(args.output_dir, 'pll.csv')\n", (4891, 4919), False, 'import os\n'), ((3157, 3218), 'torch.zeros', 'torch.zeros', (['toks.shape'], {'dtype': 'torch.bool', 'device': 'toks.device'}), '(toks.shape, dtype=torch.bool, device=toks.device)\n', (3168, 3218), False, 'import torch\n'), ((3345, 3378), 'torch.where', 'torch.where', (['mask', 'mask_idx', 'toks'], {}), '(mask, mask_idx, toks)\n', (3356, 3378), False, 'import torch\n'), ((3973, 4013), 'torch.where', 'torch.where', (['mask', 'mask_idx', 'wt_toks_rep'], {}), '(mask, mask_idx, wt_toks_rep)\n', (3984, 4013), False, 'import torch\n'), ((2882, 2907), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2905, 2907), False, 'import torch\n'), ((3446, 3477), 'torch.unsqueeze', 'torch.unsqueeze', (['masked_toks', '(1)'], {}), '(masked_toks, 1)\n', (3461, 3477), False, 'import torch\n'), ((3679, 3703), 'torch.squeeze', 'torch.squeeze', (['logits', '(1)'], {}), '(logits, 1)\n', (3692, 3703), False, 'import torch\n'), ((4084, 4118), 'torch.unsqueeze', 'torch.unsqueeze', (['masked_wt_toks', '(1)'], {}), '(masked_wt_toks, 1)\n', (4099, 4118), False, 'import torch\n'), ((4323, 4347), 'torch.squeeze', 'torch.squeeze', (['logits', '(1)'], {}), '(logits, 1)\n', (4336, 4347), False, 'import torch\n'), ((3829, 3858), 'torch.sum', 'torch.sum', (['(mask * loss)'], {'dim': '(1)'}), '(mask * loss, dim=1)\n', (3838, 3858), False, 'import torch\n'), ((4486, 4518), 'torch.sum', 'torch.sum', (['(mask * loss_wt)'], {'dim': '(1)'}), '(mask * loss_wt, dim=1)\n', (4495, 4518), False, 'import torch\n')] |
#!/usr/bin/env python3
import re
from collections import namedtuple
import numpy as np
Node = namedtuple('Node', ['name', 'x', 'y', 'size', 'used', 'avail'])
nodes = []
node_lookup = {}
max_x = -1
max_y = -1
empty = None
with open('input.txt', 'r') as f:
f.readline() # Shell command
f.readline() # Headers
line_re = re.compile(r'([a-z0-9/-]+)\s+(\d+)T\s+(\d+)T\s+(\d+)T\s+')
name_re = re.compile(r'[^-]+-x(\d+)-y(\d+)')
for line in f:
m = line_re.match(line)
name_m = name_re.match(m[1])
x = int(name_m[1])
y = int(name_m[2])
max_x = max(x, max_x)
max_y = max(y, max_y)
node = Node(m[1], x, y, int(m[2]), int(m[3]), int(m[4]))
node_lookup[(x, y)] = node
if node.used == 0:
empty = node
print("Grid: {}*{}={} ({})".format(max_x+1, max_y+1, (max_x+1)*(max_y+1), len(node_lookup)))
print("Target is", node_lookup[(0, max_y)])
print("Empty", empty)
needed = node_lookup[(0, max_y)].used
def swap(x1, y1, x2, y2):
global board, moves
tmp = board[x1, y1]
board[x1, y1] = board[x2, y2]
board[x2, y2] = tmp
moves += 1
board = np.zeros((max_x+1, max_y+1), dtype=int)
for x in range(max_x+1):
for y in range(max_y+1):
node = node_lookup[x, y]
print(empty.avail, node.used, x, y)
if x == 0 and y == 0:
board[0, 0] = 1
elif x == max_x and y == 0:
board[x, y] = 2
elif node.used == 0:
board[x, y] = 4
elif node.used > empty.avail:
board[x, y] = 3
else:
pass
chars = ['.', 'O', 'G', '#', '_']
# for x in range(max_x + 1):
# line = ''
# for y in range(max_y + 1):
# line += chars[board[x,y]]
# print(line)
loc = [empty.x, empty.y]
moves = 0
# Not proud of this, but sometimes visual inspection is the way to go
while loc[0] != 1: # Up
swap(loc[0], loc[1], loc[0] - 1, loc[1])
loc = [loc[0]-1, loc[1]]
while loc[1] > 0: # Over
swap(loc[0], loc[1], loc[0], loc[1]-1)
loc = [loc[0], loc[1]-1]
while loc[0] < max_x - 1: # Down
swap(loc[0], loc[1], loc[0] + 1, loc[1])
loc = [loc[0]+1, loc[1]]
while loc[0] != 0: # Shimmy to the top
x, y = loc
swap(x, y, x+1, y)
swap (x+1, y, x+1, y+1)
swap(x+1, y+1, x, y+1)
swap(x, y+1, x-1, y+1)
swap(x-1, y+1, x-1, y)
loc = [x-1, y]
#
# print("After {} moves".format(moves))
# for x in range(max_x + 1):
# line = ''
# for y in range(max_y + 1):
# line += chars[board[x,y]]
# print(line)
print("Moves:", moves+1)
| [
"numpy.zeros",
"collections.namedtuple",
"re.compile"
] | [((96, 159), 'collections.namedtuple', 'namedtuple', (['"""Node"""', "['name', 'x', 'y', 'size', 'used', 'avail']"], {}), "('Node', ['name', 'x', 'y', 'size', 'used', 'avail'])\n", (106, 159), False, 'from collections import namedtuple\n'), ((1150, 1193), 'numpy.zeros', 'np.zeros', (['(max_x + 1, max_y + 1)'], {'dtype': 'int'}), '((max_x + 1, max_y + 1), dtype=int)\n', (1158, 1193), True, 'import numpy as np\n'), ((332, 396), 're.compile', 're.compile', (['"""([a-z0-9/-]+)\\\\s+(\\\\d+)T\\\\s+(\\\\d+)T\\\\s+(\\\\d+)T\\\\s+"""'], {}), "('([a-z0-9/-]+)\\\\s+(\\\\d+)T\\\\s+(\\\\d+)T\\\\s+(\\\\d+)T\\\\s+')\n", (342, 396), False, 'import re\n'), ((405, 440), 're.compile', 're.compile', (['"""[^-]+-x(\\\\d+)-y(\\\\d+)"""'], {}), "('[^-]+-x(\\\\d+)-y(\\\\d+)')\n", (415, 440), False, 'import re\n')] |
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import numpy.ma as ma
import glob
from collections import namedtuple, OrderedDict
import netCDF4 as nc
import os
import scipy
import scipy.io as sio
from scipy import interpolate, signal
from pyproj import Proj,transform
import sys
sys.path.append('/ocean/ssahu/CANYONS/wcvi/grid/')
from bathy_common import *
from matplotlib import path
from salishsea_tools import viz_tools
import xarray as xr
from salishsea_tools import nc_tools
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
import cmocean as cmo
import matplotlib.gridspec as gridspec
from scipy.io import loadmat
from scipy.interpolate import griddata
from dateutil.parser import parse
from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools
import gsw
import seaborn as sns
zlevels = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc').variables['deptht'][:32]
y_wcvi_slice = np.array(np.arange(180,350))
x_wcvi_slice = np.array(np.arange(480,650))
nc_file = nc.Dataset('/data/ssahu/ARIANE/LB_08/lot_of_particles_time_corrected.nc')
init_x = nc_file.variables['init_x']
init_z = nc_file.variables['init_z']
init_age = nc_file.variables['init_age']
traj_depth = nc_file.variables['traj_depth'][:]
traj_lon = nc_file.variables['traj_lon'][:]
traj_lat = nc_file.variables['traj_lat'][:]
traj_rho = nc_file.variables['traj_dens'][:]
traj_tem = nc_file.variables['traj_temp'][:]
traj_sal = nc_file.variables['traj_salt'][:]
traj_time = nc_file.variables['traj_time'][:]
final_age = nc_file.variables['final_age']
lon1=nc_file.variables['traj_lon'][:]
lat1=nc_file.variables['traj_lat'][:]
dep1=nc_file.variables['traj_depth'][:]
x1=nc_file.variables['init_x'][:]
y1=nc_file.variables['init_y'][:]
t1=nc_file.variables['traj_time'][:]
x2=nc_file.variables['final_x'][:]
y2=nc_file.variables['final_y'][:]
def plot_tracks():
bathy = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc')
Z = bathy.variables['Bathymetry']
lon = bathy['nav_lon'][...]
lat = bathy['nav_lat'][...]
cmap=plt.cm.get_cmap('nipy_spectral')
# cmap = plt.cm.get_cmap('gist_rainbow')
cmap.set_bad('#8b7765')
cmin = 0
cmax = 300
import matplotlib as mpl
# norm = mpl.colors.Normalize(vmin=cmin,vmax=cmax)
fig, ax = plt.subplots(1, 1, figsize=(16,12)); ax.grid()
CS = ax.contour(x_wcvi_slice,y_wcvi_slice,Z[y_wcvi_slice,x_wcvi_slice], np.arange(100,200,10))
CS1 = ax.contour(x_wcvi_slice,y_wcvi_slice,Z[y_wcvi_slice,x_wcvi_slice], level = [0,100,200,500,1000])
CLS = plt.clabel(CS, inline=3,fmt='%0.0f m', fontsize=12)
CLS1 = plt.clabel(CS1, inline=3,fmt='%0.0f m', fontsize=12)
for part in np.arange(x1.shape[0]):
for k in np.arange(traj_depth.shape[0]):
y, x = geo_tools.find_closest_model_point(traj_lon[k,part],traj_lat[k,part],\
lon,lat,grid='NEMO',tols=\
{'NEMO': {'tol_lon': 0.1, 'tol_lat': 0.1},\
'GEM2.5': {'tol_lon': 0.1, 'tol_lat': 0.1}})
track1 = ax.scatter(x_wcvi_slice[np.int(np.rint(x1[part]))-1], y_wcvi_slice[np.int(np.rint(y1[part]))-1], s=500, marker = '*', c = 'red', alpha=0.9, edgecolor= 'orangered')
q = ax.scatter(x, y, c = np.multiply(-1,np.ma.array(traj_depth[k,part]).filled()), cmap = cmap, vmin = cmin, vmax= cmax, s=10, alpha=0.5)#, edgecolor= 'cyan')
ax.set_xlabel('x index', fontsize =16)
ax.set_ylabel('y index', fontsize = 16)
ax.tick_params(axis='both',labelsize =16)
# ax.set_title('Particle track at depth \u2248 {d:.1f} m'.format(d = np.abs(traj_depth[0,traj_depth_level])), fontsize =16)
ax.legend(loc = 'best')
viz_tools.plot_land_mask(ax, bathy, yslice=y_wcvi_slice, xslice=x_wcvi_slice, color='burlywood')
viz_tools.plot_coastline(ax, bathy, yslice=y_wcvi_slice, xslice=x_wcvi_slice, color='brown')
ax.grid()
cbar = fig.colorbar(q, ax=ax)
cbar.set_label(label='Depth ($m$)', fontsize = 16)
cbar.ax.tick_params(labelsize=16)
fig.tight_layout()
plt.savefig('/home/ssahu/saurav/3D_images_for_video_spice/LB_08_particles.png')
plt.close()
plot_tracks()
| [
"matplotlib.pyplot.savefig",
"salishsea_tools.geo_tools.find_closest_model_point",
"numpy.ma.array",
"matplotlib.use",
"netCDF4.Dataset",
"salishsea_tools.viz_tools.plot_land_mask",
"matplotlib.pyplot.close",
"salishsea_tools.viz_tools.plot_coastline",
"numpy.rint",
"matplotlib.pyplot.clabel",
"... | [((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((398, 448), 'sys.path.append', 'sys.path.append', (['"""/ocean/ssahu/CANYONS/wcvi/grid/"""'], {}), "('/ocean/ssahu/CANYONS/wcvi/grid/')\n", (413, 448), False, 'import sys\n'), ((1175, 1248), 'netCDF4.Dataset', 'nc.Dataset', (['"""/data/ssahu/ARIANE/LB_08/lot_of_particles_time_corrected.nc"""'], {}), "('/data/ssahu/ARIANE/LB_08/lot_of_particles_time_corrected.nc')\n", (1185, 1248), True, 'import netCDF4 as nc\n'), ((1099, 1118), 'numpy.arange', 'np.arange', (['(180)', '(350)'], {}), '(180, 350)\n', (1108, 1118), True, 'import numpy as np\n'), ((1143, 1162), 'numpy.arange', 'np.arange', (['(480)', '(650)'], {}), '(480, 650)\n', (1152, 1162), True, 'import numpy as np\n'), ((2079, 2184), 'netCDF4.Dataset', 'nc.Dataset', (['"""/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc"""'], {}), "(\n '/data/mdunphy/NEP036-N30-OUT/INV/Bathymetry_EastCoast_NEMO_R036_GEBCO_corr_v14.nc'\n )\n", (2089, 2184), True, 'import netCDF4 as nc\n'), ((2295, 2327), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""nipy_spectral"""'], {}), "('nipy_spectral')\n", (2310, 2327), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2569), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(16, 12)'}), '(1, 1, figsize=(16, 12))\n', (2545, 2569), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2848), 'matplotlib.pyplot.clabel', 'plt.clabel', (['CS'], {'inline': '(3)', 'fmt': '"""%0.0f m"""', 'fontsize': '(12)'}), "(CS, inline=3, fmt='%0.0f m', fontsize=12)\n", (2806, 2848), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2912), 'matplotlib.pyplot.clabel', 'plt.clabel', (['CS1'], {'inline': '(3)', 'fmt': '"""%0.0f m"""', 'fontsize': '(12)'}), "(CS1, inline=3, fmt='%0.0f m', fontsize=12)\n", (2869, 2912), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2951), 'numpy.arange', 'np.arange', (['x1.shape[0]'], {}), '(x1.shape[0])\n', (2938, 2951), True, 'import numpy as np\n'), ((4006, 4107), 'salishsea_tools.viz_tools.plot_land_mask', 'viz_tools.plot_land_mask', (['ax', 'bathy'], {'yslice': 'y_wcvi_slice', 'xslice': 'x_wcvi_slice', 'color': '"""burlywood"""'}), "(ax, bathy, yslice=y_wcvi_slice, xslice=\n x_wcvi_slice, color='burlywood')\n", (4030, 4107), False, 'from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools\n'), ((4107, 4204), 'salishsea_tools.viz_tools.plot_coastline', 'viz_tools.plot_coastline', (['ax', 'bathy'], {'yslice': 'y_wcvi_slice', 'xslice': 'x_wcvi_slice', 'color': '"""brown"""'}), "(ax, bathy, yslice=y_wcvi_slice, xslice=\n x_wcvi_slice, color='brown')\n", (4131, 4204), False, 'from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools\n'), ((4379, 4458), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/home/ssahu/saurav/3D_images_for_video_spice/LB_08_particles.png"""'], {}), "('/home/ssahu/saurav/3D_images_for_video_spice/LB_08_particles.png')\n", (4390, 4458), True, 'import matplotlib.pyplot as plt\n'), ((4463, 4474), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4472, 4474), True, 'import matplotlib.pyplot as plt\n'), ((2656, 2679), 'numpy.arange', 'np.arange', (['(100)', '(200)', '(10)'], {}), '(100, 200, 10)\n', (2665, 2679), True, 'import numpy as np\n'), ((2970, 3000), 'numpy.arange', 'np.arange', (['traj_depth.shape[0]'], {}), '(traj_depth.shape[0])\n', (2979, 3000), True, 'import numpy as np\n'), ((945, 1059), 'netCDF4.Dataset', 'nc.Dataset', (['"""/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc"""'], {}), "(\n '/data/mdunphy/NEP036-N30-OUT/CDF_COMB_COMPRESSED/NEP036-N30_IN_20140915_00001440_grid_T.nc'\n )\n", (955, 1059), True, 'import netCDF4 as nc\n'), ((3021, 3217), 'salishsea_tools.geo_tools.find_closest_model_point', 'geo_tools.find_closest_model_point', (['traj_lon[k, part]', 'traj_lat[k, part]', 'lon', 'lat'], {'grid': '"""NEMO"""', 'tols': "{'NEMO': {'tol_lon': 0.1, 'tol_lat': 0.1}, 'GEM2.5': {'tol_lon': 0.1,\n 'tol_lat': 0.1}}"}), "(traj_lon[k, part], traj_lat[k, part],\n lon, lat, grid='NEMO', tols={'NEMO': {'tol_lon': 0.1, 'tol_lat': 0.1},\n 'GEM2.5': {'tol_lon': 0.1, 'tol_lat': 0.1}})\n", (3055, 3217), False, 'from salishsea_tools import geo_tools, viz_tools, tidetools, nc_tools\n'), ((3387, 3404), 'numpy.rint', 'np.rint', (['x1[part]'], {}), '(x1[part])\n', (3394, 3404), True, 'import numpy as np\n'), ((3430, 3447), 'numpy.rint', 'np.rint', (['y1[part]'], {}), '(y1[part])\n', (3437, 3447), True, 'import numpy as np\n'), ((3572, 3604), 'numpy.ma.array', 'np.ma.array', (['traj_depth[k, part]'], {}), '(traj_depth[k, part])\n', (3583, 3604), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import pyfits
from pylab import *
import Marsh
import numpy
import scipy
def getSpectrum(filename,b,Aperture,minimum_column,maximum_column):
hdulist = pyfits.open(filename) # Here we obtain the image...
data=hdulist[0].data # ... and we obtain the image matrix.
Result=Marsh.SimpleExtraction((data.flatten()).astype(double),scipy.polyval(b,numpy.arange(data.shape[1])).astype(double),data.shape[0],data.shape[1],data.shape[1],Aperture,minimum_column,maximum_column)
FinalMatrix=asarray(Result) # After the function, we convert our list to a Numpy array.
return FinalMatrix
# Main function:
FileName='../../../transmission_spectroscopy/WASP6/data.fits' # Filename of the image of the spectrum.
b=pyfits.getdata('../../../transmission_spectroscopy/WASP6/trace_coeffs.fits') # We write our trace...
Aperture=15
Spectrum=getSpectrum(FileName,b,Aperture,0,0) # In this example we take our spectrum from x=50 to 1500,
# where x is in our matrix coordinates.
x=arange(len(Spectrum))
plot(x,Spectrum,'-') # Plot the results.
show()
| [
"pyfits.open",
"numpy.arange",
"pyfits.getdata"
] | [((837, 913), 'pyfits.getdata', 'pyfits.getdata', (['"""../../../transmission_spectroscopy/WASP6/trace_coeffs.fits"""'], {}), "('../../../transmission_spectroscopy/WASP6/trace_coeffs.fits')\n", (851, 913), False, 'import pyfits\n'), ((178, 199), 'pyfits.open', 'pyfits.open', (['filename'], {}), '(filename)\n', (189, 199), False, 'import pyfits\n'), ((414, 441), 'numpy.arange', 'numpy.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (426, 441), False, 'import numpy\n')] |
"""Generate confidence intervals for RNA velocity models by bootstrapping
across reads.
Our bootstrapping procedure is as follows:
1. Given a spliced count matrix ([Cells, Genes]) S and an unspliced matrix U,
create a total counts matrix X = S + U.
2.1 For each cell X_i \in X, fit a multinomial distribution. Sample D (depth) reads
from each multinomial to create a sampled count distribution across genes \hat X_i.
2.2 For each gene g in \hat X_i, fit a binomial distribution Binom(n=\hat X_ig, p=\frac{S_ig}{X_ig})
which represents the distribution of spliced vs. unspliced counts.
2.3 Sample an estimate of the spliced counts for X_ig, \hat S_ig ~ Binom(n=X_ig, p=S_ig/X_ig).
Compute the conjugate unspliced read count \hat U_ig = \hat X_ig - \hat S_ig.
3. Given the complete bootstrapped samples \hat S, \hat U, estimate a bootstrapped
velocity vector for consideration.
Bootstrap samples of cell counts therefore have the same number of counts as the original
cell, preventing any issues due to differing library depths:
\sum_i \sum_j X_{ij} \equiv \sum_i \sum_j \hat X_{ij}
"""
import numpy as np
import anndata
import scvelo as scv
import time
import os.path as osp
import argparse
import multiprocessing
class VelocityCI(object):
"""Compute confidence intervals for RNA velocity vectors
Attributes
----------
adata : anndata.AnnData
[Cells, Genes] experiment with spliced and unspliced read
matrices in `.layers` as "spliced", "unspliced", "ambiguous".
`.X` should contain raw count values, rather than transformed
counts.
S : np.ndarray
[Cells, Genes] spliced read counts.
U : np.ndarray
[Cells, Genes] unspliced read counts.
A : np.ndarray
[Cells, Genes] ambiguous read counts.
Methods
-------
_sample_abundance_profile(x)
sample a total read count vector from a multinomial fit
to the observed count vector `x`.
_sample_spliced_unspliced(s, u, a, x_hat)
sample spliced, unspliced, and ambiguous read counts from
a multinomial given a sample of total read counts `x_hat`
and observed `s`pliced, `u`nspliced, `a`mbigious counts.
_sample_matrices()
samples a matrix of spliced, unspliced and ambiguous read
counts for all cells and genes in `.adata`.
_fit_velocity(SUA_hat,)
fits a velocity model to sampled spliced, unspliced counts
in an output from `_sample_matrices()`
bootstrap_velocity(n_iter, embed)
generate bootstrap samples of RNA velocity estimates using
`_sample_matrices` and `_fit_velocity` sequentially.
Notes
-----
Parallelization requires use of shared ctypes to avoid copying our
large data arrays for each child process. See `_sample_matrices` for
a discussion of the relevant considerations and solutions.
Due to this issue, we have modified `__getstate__` such that pickling
this object will not preserve all of the relevant data.
"""
def __init__(
self,
adata: anndata.AnnData,
) -> None:
"""Compute confidence intervals for RNA velocity vectors
Parameters
----------
adata : anndata.AnnData
[Cells, Genes] experiment with spliced and unspliced read
matrices in `.layers` as "spliced", "unspliced", "ambiguous".
`.X` should contain raw count values, rather than transformed
counts.
Returns
-------
None.
"""
# check that all necessary layers are present
if 'spliced' not in adata.layers.keys():
msg = 'spliced matrix must be available in `adata.layers`.'
raise ValueError(msg)
if 'unspliced' not in adata.layers.keys():
msg = 'unspliced matrix must be available in `adata.layers`.'
raise ValueError(msg)
if 'ambiguous' not in adata.layers.keys():
msg = 'ambiguous matrix must be available in `adata.layers`.'
raise ValueError(msg)
# copy relevant layers in memory to avoid altering the original
# input
self.adata = adata
self.S = adata.layers['spliced'].copy()
self.U = adata.layers['unspliced'].copy()
self.A = adata.layers['ambiguous'].copy()
# convert arrays to dense format if they are sparse
if type(self.S) != np.ndarray:
try:
self.S = self.S.toarray()
except ValueError:
msg = 'self.S was not np.ndarray, failed .toarray()'
print(msg)
if type(self.U) != np.ndarray:
try:
self.U = self.U.toarray()
except ValueError:
msg = 'self.U was not np.ndarray, failed .toarray()'
print(msg)
if type(self.A) != np.ndarray:
try:
self.A = self.A.toarray()
except ValueError:
msg = 'self.A was not np.ndarray, failed .toarray()'
print(msg)
# here, `X` is the total number of counts per feature regardless
# of the region where the reads map
self.X = self.S + self.U + self.A
self.data_shape = self.X.shape
assert type(self.X) == np.ndarray
# set normalization scale for velocity fitting
self.counts_per_cell_after = 1e4
return
def __getstate__(self,) -> dict:
"""
Override the default `__getstate__` behavior
so we do not pickle huge arrays.
Returns
-------
d : dict
object state dictionary, with large arrays removed
to allow pickling and passage to child processes.
Notes
-----
When we perform multiprocessing, we pickly the `VelocityCI`
class to pass to workers. Here, we remove all large memory
objects from the `__getstate__` method which is used during
the pickle process to gather all the relevant components of
an object in memory. We provide access to a shared buffer
with these objects to each worker to avoid copying them.
"""
d = dict(self.__dict__)
for attr in ['X', 'S', 'U', 'A']:
del d[attr]
del d[attr+'_batch']
large_arr = ['adata', 'SUA_hat', 'embed', 'velocity_estimates']
for k in large_arr:
if k in d.keys():
del d[k]
return d
def _sample_abundance_profile(
self,
x: np.ndarray,
) -> np.ndarray:
"""Given an observed mRNA abundance profile, fit a multinomial
distribution and randomly sample a corresponding profile.
Parameters
----------
x : np.ndarray
[Genes,] observed mRNA counts vector.
Returns
-------
x_hat : np.ndarray
[Genes,] a randomly sampled abundance profile,
given the multinomial distribution specified by `x`.
"""
# we need to instantiate a local random state to ensure
# each multiprocess thread generates true random numbers
local_rnd = np.random.RandomState()
# cast everything to `np.float64` before operations due to a
# `numpy` bug
# https://github.com/numpy/numpy/issues/8317
x = x.astype(np.float64)
# compute relative abundance profile as feature proportions
pvals = x / np.sum(x)
# sample a count distribution from the multinomial
x_hat = local_rnd.multinomial(
n=int(np.sum(x)),
pvals=pvals,
)
return x_hat
def _sample_spliced_unspliced(
self,
s: np.ndarray,
u: np.ndarray,
a: np.ndarray,
x_hat: np.ndarray,
) -> np.ndarray:
"""Sample the proportion of spliced/unspliced reads for a
randomly sampled mRNA profile given observed spliced and
unspliced read counts.
Parameters
----------
s : np.ndarray
[Genes,] observed spliced read counts for each gene.
u : np.ndarray
[Genes,] observed unspliced read counts for each gene.
a : np.ndarray
[Genes,] ambiguous read counts for each gene.
x_hat : np.ndarray
[Genes,] sampled total gene counts profile.
Returns
-------
sua_hat : np.ndarray
[Genes, (Spliced, Unspliced, Ambiguous)] read counts
randomly sampled from a multinomial.
"""
# we need to instantiate a local random state to ensure
# each multiprocess thread generates true random numbers
local_rnd = np.random.RandomState()
# Genes, (Spliced, Unspliced, Ambiguous)
sua_hat = np.zeros((len(x_hat), 3))
# compute total reads per feature
x = s + u + a
x = x.astype(np.float64)
# for each gene, sample the proportion of counts that originate
# from spliced, unspliced, or ambiguous regions using a multinomial
# distribution parameterized with the observed proportions
for g in range(len(x_hat)):
if x[g] == 0:
sua_hat[g, :] = 0
continue
pvals = np.array([s[g], u[g], a[g]], dtype=np.float64) / x[g]
sua_hat[g, :] = local_rnd.multinomial(
n=x_hat[g],
pvals=pvals,
)
return sua_hat
def _sample_cell(self,
i: int,
) -> np.ndarray:
"""Draw samples for a single cell.
Parameters
----------
i : int
cell index in `.X, .S, .U, .A` matrices.
Returns
-------
sua_hat : np.ndarray
[Genes, (Spliced, Unspliced, Ambig.)] for a single
cell at index `i` in `.X`, ...
Notes
-----
This implementation allows for simple parallelization with
a map across the cell indices.
"""
# gather the count arrays from a shared `RawArray`
# buffer and reshape them from flat [N*M,] to array
# [N, M] format
X = np.frombuffer(
var_args['X_batch'],
dtype=np.float64,
).reshape(var_args['data_shape_batch'])
S = np.frombuffer(
var_args['S_batch'],
).reshape(var_args['data_shape_batch'])
U = np.frombuffer(
var_args['U_batch'],
dtype=np.float64,
).reshape(var_args['data_shape_batch'])
A = np.frombuffer(
var_args['A_batch'],
dtype=np.float64,
).reshape(var_args['data_shape_batch'])
# get the read counts of each type for
# a single cell
x = X[i, :] # total read counts
s = S[i, :] # spliced read counts
u = U[i, :] # unspliced read counts
a = A[i, :] # ambiguous read counts
# sample the relative abudance across genes
x_hat = self._sample_abundance_profile(
x=x,
)
# for each gene, sample the proportion of reads
# originating from each type of region
sua_hat = self._sample_spliced_unspliced(
s=s,
u=u,
a=a,
x_hat=x_hat,
)
return sua_hat
def _sample_matrices(
self,
batch_size: int = 256,
) -> np.ndarray:
"""Sample a spliced and unspliced counts matrix
for a bootstrapped velocity vector estimation.
Parameters
----------
batch_size : int
number of cells to sample in parallel.
smaller batches use less RAM.
Returns
-------
SUA_hat : np.ndarray
[Cells, Genes, (Spliced, Unspliced, Ambiguous)]
randomly sampled array of read counts assigned
to a splicing status.
Notes
-----
`_sample_matrices` uses `multiprocessing` to parallelize
bootstrap simulations. We run into a somewhat tricky issue
do to the size of our source data arrays (`.X, .S, .U, .A`).
The usual approach to launching multiple processes is to use
a `multiprocessing.Pool` to launch child processes, then copy
the relevant data to each process by passing it as arguments
or through pickling of object attributes.
Here, the size of our arrays means that copying the large matrices
to memory for each child process is (1) memory prohibitive and
(2) really, really slow, defeating the whole purpose of parallelization.
Here, we've implemented a batch processing solution to preserve RAM.
We also use shared ctype arrays to avoid copying memory across workers.
Use of ctype arrays increases the performance by ~5-fold. From this, we
infer that copying even just the minibatch count arrays across all the
child processes is prohibitively expensive.
We can create shared ctype arrays using `multiprocessing.sharedctypes`
that allow child processes to reference a single copy of each
relevant array in memory.
Because these data are read-only, we can get away with using
`multiprocessing.RawArray` since we don't need process synchronization
locks or any other sophisticated synchronization.
Using `RawArray` with child processes in a pool is a little strange.
We can't pass the `RawArray` pointer through a pickle, so we have to
declare the pointers as global variables that get inherited by each
child process through use of an `initializer` function in the pool.
We also have to ensure that our parent object `__getstate__` function
doesn't contain any of these large arrays, so that they aren't
accidently pickled in with the class methods. To fix that, we modify
`__getstate__` above to remove large attributes from the object dict.
"""
# [Cells, Genes, (Spliced, Unspliced, Ambiguous)]
SUA_hat = np.zeros(
self.X.shape + (3,)
)
# compute the total number of batches to use
n_batches = int(np.ceil(self.X.shape[0]/batch_size))
batch_idx = 0
for batch in range(n_batches):
end_idx = min(batch_idx+batch_size, self.X.shape[0])
# set batch specific count arrays as attributes
for attr in ['X', 'S', 'U', 'A']:
attr_all = getattr(self, attr)
attr_batch = attr_all[batch_idx:end_idx, :]
setattr(self, attr+'_batch', attr_batch)
# generate shared arrays for child processes
shared_arrays = {'data_shape_batch': self.X_batch.shape}
for attr in ['X_batch', 'S_batch', 'U_batch', 'A_batch']:
data = getattr(self, attr)
# create the shared array
# RawArray will only take a flat, 1D array
# so we create it with as many elements as
# our desired data
shared = multiprocessing.RawArray(
'd', # doubles
int(np.prod(data.shape)),
)
# load our new shared array into a numpy frame
# and copy data into it after reshaping
shared_np = np.frombuffer(
shared,
dtype=np.float64,
)
shared_np = shared_np.reshape(data.shape)
# copy data into the new shared buffer
# this is reflected in `shared`, even though we're
# copying to the numpy frame here
np.copyto(shared_np, data)
shared_arrays[attr] = shared
# create a global dictionary to hold arguments
# we pass to each worker using an initializer.
# this is necessary because we can't pass `RawArray`
# in a pickled object (e.g. as an attribute of `self`)
global var_args
var_args = {}
# this method is called after each work is initialized
# and sets all of the shared arrays as part of the global
# variable `var_args`
def init_worker(shared_arrays):
for k in shared_arrays:
var_args[k] = shared_arrays[k]
start = time.time()
print(f'Drawing bootstrapped samples, batch {batch:04}...')
with multiprocessing.Pool(
initializer=init_worker,
initargs=(shared_arrays,)) as P:
results = P.map(
self._sample_cell,
range(self.X_batch.shape[0]),
)
# [Cells, Genes, (Spliced, Unspliced, Ambiguous)]
batch_SUA_hat = np.stack(results, 0)
SUA_hat[batch_idx:end_idx, :, :] = batch_SUA_hat
batch_idx += batch_size
end = time.time()
print('Duration: ', end-start)
return SUA_hat
def _fit_velocity(
self,
SUA_hat: np.ndarray,
velocity_mode: str='deterministic',
) -> np.ndarray:
"""Fit a deterministic RNA velocity model to the
bootstrapped count matrices.
Parameters
----------
SUA_hat : np.ndarray
[Cells, Genes, (Spliced, Unspliced, Ambiguous)]
randomly sampled array of read counts assigned
to a splicing status.
velocity_mode : str
mode argument for `scvelo.tl.velocity`.
one of ("deterministic", "stochastic", "dynamical").
Returns
-------
velocity : np.ndarray
[Cells, Genes] RNA velocity estimates.
"""
dtype = np.float64
# create an AnnData object from a bootstrap sample
# of counts
boot = anndata.AnnData(
X=SUA_hat[:, :, 0].astype(dtype).copy(),
obs=self.adata.obs.copy(),
var=self.adata.var.copy(),
)
for i, k in enumerate(['spliced', 'unspliced', 'ambiguous']):
boot.layers[k] = SUA_hat[:, :, i].astype(dtype)
if self.velocity_prefilter_genes is not None:
# filter genes to match a pre-existing velocity computation
# this is useful for e.g. embedding in a common PC space
# with the observed velocity
boot = boot[:, self.velocity_prefilter_genes].copy()
# normalize
scv.pp.normalize_per_cell(
boot,
counts_per_cell_after=self.counts_per_cell_after,
)
# filter genes as in the embedding
if hasattr(self, 'embed'):
# if an embedded AnnData is provided
# subset to genes used for the original embedding
cell_bidx = np.array([
x in self.embed.obs_names for x in boot.obs_names
])
boot = boot[:, self.embed.var_names].copy()
boot = boot[cell_bidx, :].copy()
print(
'Subset bootstrap samples to embedding dims: ',
boot.shape,
)
else:
msg = 'must providing an embedding object containing\n'
msg += 'cells and genes to use for velocity estimation.'
raise ValueError(msg)
# log1p only the `.X` layer, leaving `.layers` untouched.
scv.pp.log1p(boot)
# fit the velocity model deterministically, following the original
# RNA velocity publication
scv.pp.pca(boot, use_highly_variable=False)
scv.pp.moments(boot, n_pcs=30, n_neighbors=100)
scv.tl.velocity(boot, mode=velocity_mode)
return boot.layers['velocity']
def bootstrap_velocity(
self,
n_iter: int = 100,
embed: anndata.AnnData = None,
velocity_prefilter_genes: list = None,
verbose: bool = False,
save_counts: str = None,
**kwargs,
) -> np.ndarray:
"""
Generated bootstrap estimates of the RNA velocity for
each cell and gene.
Parameters
----------
n_iter : int
number of bootstrap iterations to perform.
embed : anndata.AnnData, optional
[Cells, Genes] experiment describing the genes of interest
and containing a relevant embedding for projection of
velocity vectors.
velocity_prefilter_genes : list
genes selected by `scv.pp.filter_genes` in the embedding object
before normalization. often selected with `min_shared_counts`.
it is important to carry over this prefiltering step to ensure
that normalization is comparable to the original embedding.
verbose : bool
use verbose stdout printing.
save_counts : str, optional
save sampled count matrices to the specified path as
`sampled_counts_{_iter:04}.npy` with shape
[Sample, Cells, Genes, (Spliced, Unspliced, Ambig.)].
**kwargs passed to `_sample_matrices()`.
Returns
-------
velocity : np.ndarray
[Sample, Cells, Genes] bootstrap estimates of RNA
velocity for each cell and gene.
"""
# use genes in an embedding object if provided, otherwise
# get the n_top_genes most variable genes
if embed is not None:
self.embed = embed
embed_genes = self.embed.shape[1]
else:
embed_genes = self.n_top_genes
if velocity_prefilter_genes is not None:
self.velocity_prefilter_genes = velocity_prefilter_genes
else:
self.velocity_prefilter_genes = None
# store velocity estimates for each gene
# [Iterations, Cells, Genes]
velocity = np.zeros((n_iter, self.embed.shape[0], embed_genes))
for _iter in range(n_iter):
if verbose:
print('Beginning sampling for iteration %03d' % _iter)
# sample a counts matrix
SUA_hat = self._sample_matrices(**kwargs)
if save_counts is not None:
# save the raw counts sample to disk
np.save(
osp.join(
save_counts,
f'sampled_counts_{_iter:04}.npy',
),
SUA_hat,
)
if verbose:
print('Sampling complete.')
print('Fitting velocity model...')
# fit a velocity model to the sampled counts matrix
# yielding an estimate of velocity for each gene
iter_velo = self._fit_velocity(
SUA_hat=SUA_hat,
)
velocity[_iter, :, :] = iter_velo
if verbose:
print('Velocity fit, iteration %03d complete.' % _iter)
self.velocity_estimates = velocity
return velocity
def bootstrap_vectors(
self,
embed: anndata.AnnData = None,
) -> np.ndarray:
"""
Generate embedded velocity vectors for each bootstrapped sample
of spliced/unspliced counts.
Returns
-------
velocity_embeddings : np.ndarray
[n_iter, Cells, EmbeddingDims] RNA velocity vectors
for each bootstrap sampled set of counts in the
provided PCA embedding space.
"""
if embed is not None:
self.embed = embed
if not hasattr(self, 'embed'):
msg = 'must provide an `embed` argument.'
raise AttributeError(msg)
# copy the embedding object to use for low-rank embedding
project = self.embed.copy()
# remove any extant `velocity_settings` to use defaults.
# in the current `scvelo`, using non-default settings will throw a silly
# error in `scv.tl.velocity_embedding`.
if 'velocity_settings' in project.uns.keys():
project.uns.pop('velocity_settings')
# for each velocity profile estimate, compute the corresponding
# PCA embedding of those vectors using "direct_projection",
# aka as standard matrix multiplication.
#
# the `scvelo` nearest neighbor projection method introduces
# several assumptions that we do not wish to inherit here.
velocity_embeddings = []
for _iter in range(self.velocity_estimates.shape[0]):
V = self.velocity_estimates[_iter, :, :]
project.layers['velocity'] = V
scv.tl.velocity_embedding(
project,
basis='pca',
direct_pca_projection=True,
autoscale=False, # do not adjust vectors for aesthetics
)
velocity_embeddings.append(
project.obsm['velocity_pca'],
)
velocity_embeddings = np.stack(
velocity_embeddings,
axis=0,
)
self.velocity_embeddings = velocity_embeddings
return velocity_embeddings
def compute_ci(self,) -> np.ndarray:
"""
Compute confidence intervals for the velocity vector
on each cell from bootstrap samples of embedded velocity vectors.
Returns
-------
velocity_intervals : np.ndarray
[Cells, EmbeddingDims, (Mean, Std, LowerCI, UpperCI)]
estimates of the mean and confidence interval around the
RNA velocity vector computed for each cell.
"""
if not hasattr(self, 'velocity_embeddings'):
msg = 'must run `bootstrap_vectors` first to generate vector samples.'
raise AttributeError(msg)
# [Cells, Dims, (Mean, SD, Lower, Upper)]
self.velocity_intervals = np.zeros(
self.velocity_embeddings.shape[1:] + (4,)
)
# for each cell, compute the mean, std, and CI for
# each dimension in the embedding
# this provides a hypersphere of confidence for cell state transitions
# in the embedding space
for j in range(self.velocity_embeddings.shape[1]):
cell = self.velocity_embeddings[:, j, :] # Iter, Dims
mean = np.mean(cell, axis=0) # Dims
std = np.std(cell, axis=0) # Dims
# compute the 95% CI assuming normality
l_ci = mean - 1.96*std
u_ci = mean + 1.96*std
self.velocity_intervals[j, :, 0] = mean
self.velocity_intervals[j, :, 1] = std
self.velocity_intervals[j, :, 2] = l_ci
self.velocity_intervals[j, :, 3] = u_ci
return self.velocity_intervals
##################################################
# main
##################################################
def add_parser_arguments(parser):
"""Add arguments to an `argparse.ArgumentParser`."""
parser.add_argument(
'--data',
type=str,
help='path to AnnData object with "spliced", "unspliced", "ambiguous" in `.layers`',
)
parser.add_argument(
'--out_path',
type=str,
help='output path for velocity bootstrap samples.'
)
parser.add_argument(
'--n_iter',
type=int,
default=100,
help='number of bootstrap iterations to perform.'
)
return parser
def make_parser():
"""Generate an `argparse.ArgumentParser`."""
parser = argparse.ArgumentParser(
description='Compute confidence intervals for RNA velocity by molecular bootstrapping'
)
parser = add_parser_arguments(parser)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# load anndata
print('Loading data...')
adata = anndata.read_h5ad(args.data)
print(f'{adata.shape[0]} cells and {adata.shape[1]} genes loaded.')
# check for layers
for k in ['spliced', 'unspliced', 'ambiguous']:
if k not in adata.layers.keys():
msg = f'{k} not found in `adata.layers`'
raise ValueError(msg)
# intialize velocity bootstrap object
print('\nBootstrap sampling velocity...\n')
vci = VelocityCI(
adata=adata,
)
# sample velocity vectors
velocity_bootstraps = vci.bootstrap_velocity(
n_iter=args.n_iter,
save_counts=args.out_path,
)
# save bootstrap samples to disk
np.save(
osp.join(args.out_path, 'velocity_bootstrap_samples.npy'),
velocity_bootstraps,
)
print('Done.')
return
if __name__ == '__main__':
main()
| [
"scvelo.pp.normalize_per_cell",
"numpy.copyto",
"numpy.prod",
"numpy.array",
"scvelo.pp.log1p",
"anndata.read_h5ad",
"numpy.random.RandomState",
"numpy.mean",
"argparse.ArgumentParser",
"numpy.stack",
"scvelo.pp.pca",
"numpy.frombuffer",
"numpy.ceil",
"scvelo.pp.moments",
"scvelo.tl.velo... | [((27557, 27673), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute confidence intervals for RNA velocity by molecular bootstrapping"""'}), "(description=\n 'Compute confidence intervals for RNA velocity by molecular bootstrapping')\n", (27580, 27673), False, 'import argparse\n'), ((27876, 27904), 'anndata.read_h5ad', 'anndata.read_h5ad', (['args.data'], {}), '(args.data)\n', (27893, 27904), False, 'import anndata\n'), ((7170, 7193), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (7191, 7193), True, 'import numpy as np\n'), ((8701, 8724), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (8722, 8724), True, 'import numpy as np\n'), ((14119, 14148), 'numpy.zeros', 'np.zeros', (['(self.X.shape + (3,))'], {}), '(self.X.shape + (3,))\n', (14127, 14148), True, 'import numpy as np\n'), ((18595, 18681), 'scvelo.pp.normalize_per_cell', 'scv.pp.normalize_per_cell', (['boot'], {'counts_per_cell_after': 'self.counts_per_cell_after'}), '(boot, counts_per_cell_after=self.\n counts_per_cell_after)\n', (18620, 18681), True, 'import scvelo as scv\n'), ((19505, 19523), 'scvelo.pp.log1p', 'scv.pp.log1p', (['boot'], {}), '(boot)\n', (19517, 19523), True, 'import scvelo as scv\n'), ((19643, 19686), 'scvelo.pp.pca', 'scv.pp.pca', (['boot'], {'use_highly_variable': '(False)'}), '(boot, use_highly_variable=False)\n', (19653, 19686), True, 'import scvelo as scv\n'), ((19695, 19742), 'scvelo.pp.moments', 'scv.pp.moments', (['boot'], {'n_pcs': '(30)', 'n_neighbors': '(100)'}), '(boot, n_pcs=30, n_neighbors=100)\n', (19709, 19742), True, 'import scvelo as scv\n'), ((19751, 19792), 'scvelo.tl.velocity', 'scv.tl.velocity', (['boot'], {'mode': 'velocity_mode'}), '(boot, mode=velocity_mode)\n', (19766, 19792), True, 'import scvelo as scv\n'), ((21945, 21997), 'numpy.zeros', 'np.zeros', (['(n_iter, self.embed.shape[0], embed_genes)'], {}), '((n_iter, self.embed.shape[0], embed_genes))\n', (21953, 21997), True, 'import numpy as np\n'), ((25049, 25086), 'numpy.stack', 'np.stack', (['velocity_embeddings'], {'axis': '(0)'}), '(velocity_embeddings, axis=0)\n', (25057, 25086), True, 'import numpy as np\n'), ((25936, 25987), 'numpy.zeros', 'np.zeros', (['(self.velocity_embeddings.shape[1:] + (4,))'], {}), '(self.velocity_embeddings.shape[1:] + (4,))\n', (25944, 25987), True, 'import numpy as np\n'), ((28530, 28587), 'os.path.join', 'osp.join', (['args.out_path', '"""velocity_bootstrap_samples.npy"""'], {}), "(args.out_path, 'velocity_bootstrap_samples.npy')\n", (28538, 28587), True, 'import os.path as osp\n'), ((7459, 7468), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (7465, 7468), True, 'import numpy as np\n'), ((14248, 14285), 'numpy.ceil', 'np.ceil', (['(self.X.shape[0] / batch_size)'], {}), '(self.X.shape[0] / batch_size)\n', (14255, 14285), True, 'import numpy as np\n'), ((16467, 16478), 'time.time', 'time.time', ([], {}), '()\n', (16476, 16478), False, 'import time\n'), ((16919, 16939), 'numpy.stack', 'np.stack', (['results', '(0)'], {}), '(results, 0)\n', (16927, 16939), True, 'import numpy as np\n'), ((17056, 17067), 'time.time', 'time.time', ([], {}), '()\n', (17065, 17067), False, 'import time\n'), ((18926, 18989), 'numpy.array', 'np.array', (['[(x in self.embed.obs_names) for x in boot.obs_names]'], {}), '([(x in self.embed.obs_names) for x in boot.obs_names])\n', (18934, 18989), True, 'import numpy as np\n'), ((24707, 24803), 'scvelo.tl.velocity_embedding', 'scv.tl.velocity_embedding', (['project'], {'basis': '"""pca"""', 'direct_pca_projection': '(True)', 'autoscale': '(False)'}), "(project, basis='pca', direct_pca_projection=True,\n autoscale=False)\n", (24732, 24803), True, 'import scvelo as scv\n'), ((26368, 26389), 'numpy.mean', 'np.mean', (['cell'], {'axis': '(0)'}), '(cell, axis=0)\n', (26375, 26389), True, 'import numpy as np\n'), ((26416, 26436), 'numpy.std', 'np.std', (['cell'], {'axis': '(0)'}), '(cell, axis=0)\n', (26422, 26436), True, 'import numpy as np\n'), ((9282, 9328), 'numpy.array', 'np.array', (['[s[g], u[g], a[g]]'], {'dtype': 'np.float64'}), '([s[g], u[g], a[g]], dtype=np.float64)\n', (9290, 9328), True, 'import numpy as np\n'), ((10198, 10250), 'numpy.frombuffer', 'np.frombuffer', (["var_args['X_batch']"], {'dtype': 'np.float64'}), "(var_args['X_batch'], dtype=np.float64)\n", (10211, 10250), True, 'import numpy as np\n'), ((10336, 10370), 'numpy.frombuffer', 'np.frombuffer', (["var_args['S_batch']"], {}), "(var_args['S_batch'])\n", (10349, 10370), True, 'import numpy as np\n'), ((10444, 10496), 'numpy.frombuffer', 'np.frombuffer', (["var_args['U_batch']"], {'dtype': 'np.float64'}), "(var_args['U_batch'], dtype=np.float64)\n", (10457, 10496), True, 'import numpy as np\n'), ((10582, 10634), 'numpy.frombuffer', 'np.frombuffer', (["var_args['A_batch']"], {'dtype': 'np.float64'}), "(var_args['A_batch'], dtype=np.float64)\n", (10595, 10634), True, 'import numpy as np\n'), ((15416, 15455), 'numpy.frombuffer', 'np.frombuffer', (['shared'], {'dtype': 'np.float64'}), '(shared, dtype=np.float64)\n', (15429, 15455), True, 'import numpy as np\n'), ((15761, 15787), 'numpy.copyto', 'np.copyto', (['shared_np', 'data'], {}), '(shared_np, data)\n', (15770, 15787), True, 'import numpy as np\n'), ((16568, 16640), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'initializer': 'init_worker', 'initargs': '(shared_arrays,)'}), '(initializer=init_worker, initargs=(shared_arrays,))\n', (16588, 16640), False, 'import multiprocessing\n'), ((7585, 7594), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (7591, 7594), True, 'import numpy as np\n'), ((22377, 22432), 'os.path.join', 'osp.join', (['save_counts', 'f"""sampled_counts_{_iter:04}.npy"""'], {}), "(save_counts, f'sampled_counts_{_iter:04}.npy')\n", (22385, 22432), True, 'import os.path as osp\n'), ((15229, 15248), 'numpy.prod', 'np.prod', (['data.shape'], {}), '(data.shape)\n', (15236, 15248), True, 'import numpy as np\n')] |
import getpass
import json
import pathlib
import random
import time
from typing import List
import torch
import numpy as np
from src.utils.log import create_base_logger, create_logdir
from src.utils import measure_runtime, get_git_version
from .experiment import Experiment
class ExperimentSet:
def __init__(self, **kwargs):
self.config = kwargs
self.name = self.config["name"]
self.experiment_config = self.config["experiment"]
self.seeds: List[int] = self.config["seeds"]
self.remote: bool = self.config.get("remote", False)
self.logdir = create_logdir(f"learning_{self.name}")
self.create_set_info()
self.datadir = pathlib.Path("data")
self.datadir.mkdir(parents=True, exist_ok=True)
self.logger = create_base_logger(self.logdir, name="experiment_set")
self.device = torch.device("cuda" if (self.config.get("cuda", True) and torch.cuda.is_available()) else "cpu")
self.logger.info(f"Using device {self.device}")
def run(self):
with measure_runtime(self.logdir):
for seed in self.seeds:
torch.cuda.empty_cache()
self.logger.info(f"Seed {seed} used to run experiment")
self.set_random_seeds(seed)
exp_logdir: pathlib.Path = self.create_experiment_logdir(seed)
experiment: Experiment = Experiment(seed=seed, logdir=exp_logdir, datadir=self.datadir,
set_name=self.name, device=self.device,
remote=self.remote, **self.experiment_config)
experiment.run()
experiment.plot()
@staticmethod
def set_random_seeds(seed: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def create_experiment_logdir(self, seed: int) -> pathlib.Path:
"""
creates a logdir for an Experiment. These logdirs are contained within the ExperimentSet logdir.
this function also fills the created dir with some basic information about the experiment such as the
config used and the random seed that was set.
:param seed:
:return: the created logdir that should be passed to the Experiment
"""
exp_logdir: pathlib.Path = self.logdir / f"seed_{seed}"
# Check that this seed has not been used before
assert not exp_logdir.exists()
exp_logdir.mkdir(exist_ok=False, parents=False)
with open(str(exp_logdir / "config.json"), "w") as fp:
json.dump(self.experiment_config, fp, indent=4)
with open(str(exp_logdir / "seed.json"), "w") as fp:
json.dump({"seed": seed}, fp, indent=4)
with open(str(exp_logdir / "git_version.json"), "w") as fp:
json.dump({"version": get_git_version()}, fp, indent=4)
return exp_logdir
def create_set_info(self):
with open(str(self.logdir / "config.json"), "w") as fp:
json.dump(self.config, fp, indent=4)
with open(self.logdir / "set.json", "w")as fp:
json.dump({"name": self.name,
"user": getpass.getuser(),
"experiment_config": self.experiment_config,
"seeds": self.seeds,
"start_time": time.time()
}, fp) | [
"torch.manual_seed",
"src.utils.get_git_version",
"pathlib.Path",
"random.seed",
"src.utils.log.create_base_logger",
"torch.cuda.is_available",
"numpy.random.seed",
"src.utils.measure_runtime",
"getpass.getuser",
"src.utils.log.create_logdir",
"time.time",
"torch.cuda.empty_cache",
"json.dum... | [((599, 637), 'src.utils.log.create_logdir', 'create_logdir', (['f"""learning_{self.name}"""'], {}), "(f'learning_{self.name}')\n", (612, 637), False, 'from src.utils.log import create_base_logger, create_logdir\n'), ((693, 713), 'pathlib.Path', 'pathlib.Path', (['"""data"""'], {}), "('data')\n", (705, 713), False, 'import pathlib\n'), ((793, 847), 'src.utils.log.create_base_logger', 'create_base_logger', (['self.logdir'], {'name': '"""experiment_set"""'}), "(self.logdir, name='experiment_set')\n", (811, 847), False, 'from src.utils.log import create_base_logger, create_logdir\n'), ((1793, 1810), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1804, 1810), False, 'import random\n'), ((1819, 1839), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1833, 1839), True, 'import numpy as np\n'), ((1848, 1871), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1865, 1871), False, 'import torch\n'), ((1058, 1086), 'src.utils.measure_runtime', 'measure_runtime', (['self.logdir'], {}), '(self.logdir)\n', (1073, 1086), False, 'from src.utils import measure_runtime, get_git_version\n'), ((2721, 2768), 'json.dump', 'json.dump', (['self.experiment_config', 'fp'], {'indent': '(4)'}), '(self.experiment_config, fp, indent=4)\n', (2730, 2768), False, 'import json\n'), ((2842, 2881), 'json.dump', 'json.dump', (["{'seed': seed}", 'fp'], {'indent': '(4)'}), "({'seed': seed}, fp, indent=4)\n", (2851, 2881), False, 'import json\n'), ((3154, 3190), 'json.dump', 'json.dump', (['self.config', 'fp'], {'indent': '(4)'}), '(self.config, fp, indent=4)\n', (3163, 3190), False, 'import json\n'), ((1140, 1164), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1162, 1164), False, 'import torch\n'), ((929, 954), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (952, 954), False, 'import torch\n'), ((2985, 3002), 'src.utils.get_git_version', 'get_git_version', ([], {}), '()\n', (3000, 3002), False, 'from src.utils import measure_runtime, get_git_version\n'), ((3320, 3337), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (3335, 3337), False, 'import getpass\n'), ((3488, 3499), 'time.time', 'time.time', ([], {}), '()\n', (3497, 3499), False, 'import time\n')] |
import numpy as np
with open('data/21.txt') as file:
mappings = dict()
for line in file.readlines():
parts = tuple(map(lambda p: np.array(list(map(lambda r: [c == '#' for c in r], p.split('/')))),
line.strip().split(' => ')))
# All possible transformations of our mappings:
# - 0, 90, 180 and 270 degree rotations
# - vertical (up<>down) flip followed by the 4 rotations
# - horizontal (left<>right) flip followed by the 4 rotations
# ...Some of these may be equivalent...
keys = [
*(np.rot90(parts[0], k) for k in range(3)),
*(np.rot90(np.flipud(parts[0]), k) for k in range(3)),
*(np.rot90(np.fliplr(parts[0]), k) for k in range(3)),
]
for key in keys:
mappings[key.tobytes()] = parts[1]
def expand(old_grid, length, oss, nss):
"""
Expands a grid by replacing all segments with their mapped versions.
:param old_grid: The grid to expand
:param length: The current size of the grid (length in any direction)
:param oss: The size of the segment to be replaced (old segment size)
:param nss: The size of the segment that will replace it (new segment size)
:return: A new grid, with each segment expanded
"""
new_grid = np.empty((length // oss * nss, length // oss * nss), dtype=bool)
for x in range(0, length, oss):
for y in range(0, length, oss):
new_x = x // oss * nss
new_y = y // oss * nss
new_grid[new_x:new_x+nss, new_y:new_y+nss] = mappings[old_grid[x:x + oss, y:y + oss].tobytes()]
return new_grid
grid = np.array([[False, True, False], [False, False, True], [True, True, True]])
for i in range(18):
size = len(grid)
grid = expand(grid, size, 2, 3) if size % 2 == 0 else expand(grid, size, 3, 4)
if i == 4:
print(f'Part one: {sum(sum(grid))}')
elif i == 17:
print(f'Part two: {sum(sum(grid))}')
| [
"numpy.flipud",
"numpy.fliplr",
"numpy.array",
"numpy.empty",
"numpy.rot90"
] | [((1733, 1807), 'numpy.array', 'np.array', (['[[False, True, False], [False, False, True], [True, True, True]]'], {}), '([[False, True, False], [False, False, True], [True, True, True]])\n', (1741, 1807), True, 'import numpy as np\n'), ((1357, 1421), 'numpy.empty', 'np.empty', (['(length // oss * nss, length // oss * nss)'], {'dtype': 'bool'}), '((length // oss * nss, length // oss * nss), dtype=bool)\n', (1365, 1421), True, 'import numpy as np\n'), ((591, 612), 'numpy.rot90', 'np.rot90', (['parts[0]', 'k'], {}), '(parts[0], k)\n', (599, 612), True, 'import numpy as np\n'), ((656, 675), 'numpy.flipud', 'np.flipud', (['parts[0]'], {}), '(parts[0])\n', (665, 675), True, 'import numpy as np\n'), ((723, 742), 'numpy.fliplr', 'np.fliplr', (['parts[0]'], {}), '(parts[0])\n', (732, 742), True, 'import numpy as np\n')] |
import numpy as np
from numpy import where
from pandas import DataFrame
from src.support import get_samples, display_cross_tab
from src.model import fit_predict, preprocessing_pipeline
from src.plots import create_model_plots, plot_smd
from src.propensity import create_matched_df, calc_smd
class PropensityScorer:
def __init__(self):
self.model_dispute = None
self.model_propensity=None
self.df = DataFrame()
self.model_input = DataFrame()
self.df_balanced = DataFrame()
self.smd_scores = DataFrame()
@staticmethod
def prep_data(cat_pct_min):
df = get_samples()
model_input = preprocessing_pipeline(
df, cat_pct_min=cat_pct_min
)
return df, model_input
def create_model_dispute(self):
self.df["dispute_status"] = where(self.df["dispute_status"] == "won", 1, 0)
self.model_dispute, self.df["dispute_pred"] = fit_predict(self.model_input, self.df["dispute_status"])
def create_model_propensity(self):
label_card_company = self.df["formatted_credit_card_company"] == "MasterCard"
credit_card_feature_names = self.model_input.filter(
regex="credit_card", axis=1
).columns
propensity_model_input = self.model_input.drop(
credit_card_feature_names, axis=1
)
self.model_propensity, self.df["propensity_score"] = fit_predict(propensity_model_input, label_card_company)
def compare_orders_matching(self):
smd_scores = DataFrame()
model_input_labeled = self.model_input.join(self.df["dispute_status"])
print("Visa Mastercard diff - unmatched\n",
model_input_labeled.groupby(
"formatted_credit_card_company_Visa").agg(mean_dispute_won=("dispute_status", "mean"))
)
smd_scores["unmatched"] = calc_smd(model_input_labeled)
model_input_labeled_balanced = model_input_labeled.filter(self.df_balanced.index, axis=0)
print("Visa Mastercard diff - matched\n",
model_input_labeled_balanced.groupby(
"formatted_credit_card_company_Visa").agg(mean_dispute_won=("dispute_status", "mean"))
)
smd_scores["matched"] = calc_smd(
model_input_labeled_balanced
)
smd_scores.sort_values("unmatched", inplace=True, ascending=False)
return smd_scores
def run(self):
self.df, self.model_input = self.prep_data(cat_pct_min=0.2)
display_cross_tab(self.df)
self.create_model_dispute()
self.create_model_propensity()
create_model_plots(
self.df,
feature_plots=["dispute_pred", "propensity_score"],
desc="full"
)
self.df_balanced = create_matched_df(self.df)
display_cross_tab(self.df_balanced)
create_model_plots(self.df_balanced, ["propensity_score"], "balanced")
self.smd_scores = self.compare_orders_matching()
plot_smd(self.smd_scores)
if __name__ == "__main__":
main_score = PropensityScorer()
main_score.run()
| [
"src.plots.create_model_plots",
"src.plots.plot_smd",
"src.propensity.create_matched_df",
"src.model.fit_predict",
"src.support.display_cross_tab",
"numpy.where",
"src.support.get_samples",
"pandas.DataFrame",
"src.model.preprocessing_pipeline",
"src.propensity.calc_smd"
] | [((429, 440), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (438, 440), False, 'from pandas import DataFrame\n'), ((468, 479), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (477, 479), False, 'from pandas import DataFrame\n'), ((507, 518), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (516, 518), False, 'from pandas import DataFrame\n'), ((545, 556), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (554, 556), False, 'from pandas import DataFrame\n'), ((621, 634), 'src.support.get_samples', 'get_samples', ([], {}), '()\n', (632, 634), False, 'from src.support import get_samples, display_cross_tab\n'), ((657, 708), 'src.model.preprocessing_pipeline', 'preprocessing_pipeline', (['df'], {'cat_pct_min': 'cat_pct_min'}), '(df, cat_pct_min=cat_pct_min)\n', (679, 708), False, 'from src.model import fit_predict, preprocessing_pipeline\n'), ((835, 882), 'numpy.where', 'where', (["(self.df['dispute_status'] == 'won')", '(1)', '(0)'], {}), "(self.df['dispute_status'] == 'won', 1, 0)\n", (840, 882), False, 'from numpy import where\n'), ((937, 993), 'src.model.fit_predict', 'fit_predict', (['self.model_input', "self.df['dispute_status']"], {}), "(self.model_input, self.df['dispute_status'])\n", (948, 993), False, 'from src.model import fit_predict, preprocessing_pipeline\n'), ((1412, 1467), 'src.model.fit_predict', 'fit_predict', (['propensity_model_input', 'label_card_company'], {}), '(propensity_model_input, label_card_company)\n', (1423, 1467), False, 'from src.model import fit_predict, preprocessing_pipeline\n'), ((1529, 1540), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (1538, 1540), False, 'from pandas import DataFrame\n'), ((1868, 1897), 'src.propensity.calc_smd', 'calc_smd', (['model_input_labeled'], {}), '(model_input_labeled)\n', (1876, 1897), False, 'from src.propensity import create_matched_df, calc_smd\n'), ((2250, 2288), 'src.propensity.calc_smd', 'calc_smd', (['model_input_labeled_balanced'], {}), '(model_input_labeled_balanced)\n', (2258, 2288), False, 'from src.propensity import create_matched_df, calc_smd\n'), ((2508, 2534), 'src.support.display_cross_tab', 'display_cross_tab', (['self.df'], {}), '(self.df)\n', (2525, 2534), False, 'from src.support import get_samples, display_cross_tab\n'), ((2618, 2714), 'src.plots.create_model_plots', 'create_model_plots', (['self.df'], {'feature_plots': "['dispute_pred', 'propensity_score']", 'desc': '"""full"""'}), "(self.df, feature_plots=['dispute_pred',\n 'propensity_score'], desc='full')\n", (2636, 2714), False, 'from src.plots import create_model_plots, plot_smd\n'), ((2784, 2810), 'src.propensity.create_matched_df', 'create_matched_df', (['self.df'], {}), '(self.df)\n', (2801, 2810), False, 'from src.propensity import create_matched_df, calc_smd\n'), ((2819, 2854), 'src.support.display_cross_tab', 'display_cross_tab', (['self.df_balanced'], {}), '(self.df_balanced)\n', (2836, 2854), False, 'from src.support import get_samples, display_cross_tab\n'), ((2863, 2933), 'src.plots.create_model_plots', 'create_model_plots', (['self.df_balanced', "['propensity_score']", '"""balanced"""'], {}), "(self.df_balanced, ['propensity_score'], 'balanced')\n", (2881, 2933), False, 'from src.plots import create_model_plots, plot_smd\n'), ((2999, 3024), 'src.plots.plot_smd', 'plot_smd', (['self.smd_scores'], {}), '(self.smd_scores)\n', (3007, 3024), False, 'from src.plots import create_model_plots, plot_smd\n')] |
# -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : pandas test
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# pandas ์๋ Timestamp, DatetimeIndex, Period, PeriodIndex ํด๋์ค๊ฐ ์๋ค.
# ํ์์คํฌํ ํ์๋ค
print(pd.Timestamp('2/15/2019 07:20PM'))
print(pd.Timestamp('2019-02-15 07:20PM'))
# ํ๋ฌ ํ ๊ณ์ฐ
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(months=1))
# 1์๋ฌ๋ก ์ค์
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(month=1))
# 10์ผ ํ ๊ณ์ฐ
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(days=10))
# 10์ผ๋ก ์ค์
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(day=10))
# datetime ํ์์ผ๋ก ๋ณ๊ฒฝ
dt = pd.Timestamp('2019-02-15 07:20PM').to_pydatetime()
print(dt.year)
print(dt.month)
print(dt.day)
print(dt.hour)
print(dt.minute)
print(dt.second)
print()
# period ๋ ์ผ์ ๊ธฐ๊ฐ์ ๋ํ๋ด๋ ํ์์ผ๋ก, ํน์ ๋ ์ด๋, ๋ฌ์ ๋ํ๋ธ๋ค.
# 'M' ๋ฌ์ ๋ํ๋ธ๋ค.
print(pd.Period('02/2019'))
# 'D' ์ผ์ ๋ํ๋ธ๋ค.
print(pd.Period('02/15/2019'))
print()
# timestamp ๋ก series ๋ฅผ ์์ฑ
t1 = pd.Series(list('abc'), [pd.Timestamp(
'2016-09-01'), pd.Timestamp('2016-09-02'), pd.Timestamp('2016-09-03')])
print(t1)
# timestamp ์ธ๋ฑ์ค๋ datetimeindex ๋ก ์ง์ ๋์ด ์๋ค.
print(type(t1.index))
print()
# period ์ธ๋ฑ์ค๋ periodindex ๋ก ์ง์ ๋์ด ์๋ค.
t2 = pd.Series(list('def'), [pd.Period('2016-09'),
pd.Period('2016-10'), pd.Period('2016-11')])
print(t2)
print(type(t2.index))
print()
d1 = ['2 June 2013', 'Aug 29, 2014', '2015-06-26', '7/12/16']
ts3 = pd.DataFrame(np.random.randint(10, 100, (4, 2)),
index=d1, columns=list('ab'))
print(ts3)
print()
# ์ธ๋ฑ์ค๋ฅผ datetime ํฌ๋งท์ผ๋ก ๋ณ๊ฒฝํ ์ ์๋ค.
ts3.index = pd.to_datetime(ts3.index)
print(ts3)
print()
# ์ผ์ ์ฐ์ ํ๊ธฐํ๊ณ ์ํ๊ณ ์ฐจ์ด
# 2012-04-07 00:00:00
print(pd.to_datetime('4.7.12'))
# 2012-07-04 00:00:00
print(pd.to_datetime('4.7.12', dayfirst=True))
print()
# ํ์์คํฌํ ์ฐจ์ด
print(pd.Timestamp('9/3/2016') - pd.Timestamp('9/1/2016'))
# Timedelta ๋ก 12์ผ 3์๊ฐ ํ ๊ณ์ฐ
print(pd.Timestamp('9/2/2016 8:10AM') + pd.Timedelta('12D 3H'))
print()
# DataFrame ์ ๋ฌธ์๋ก ๋ ๋ ์ง๋ฅผ datetime ํ์์ผ๋ก ๋ฐ๊พธ๊ธฐ
df = pd.DataFrame(data=['2019-01-02', '2109-01-02',
'2019-01-03'], columns=['sample date'])
print(df)
print(df['sample date'].astype('datetime64'))
print()
# 2016-10-01 ๋ถํฐ 2์ฃผ๊ฐ๊ฒฉ ๋ ์ง 9๊ฐ ์์ฑ
dates = pd.date_range('2016-10-01', periods=9, freq='2W-SUN')
print(dates)
print()
# count1 ์ปฌ๋ผ: 100 +(-5 ๋ถํฐ 10๊น์ง ์ซ์ 9๊ฐ ๋๋ค).(์ด์ ์์๊ฐ+ํ์ฌ์์๊ฐ=>ํ์ฌ์์๊ฐ)
# count2 ์ปฌ๋ผ: 100 +(-5 ๋ถํฐ 10๊น์ง ์ซ์ 9๊ฐ ๋๋ค)
# 2๊ฐ์ ์ปฌ๋ผ์ ใด๊ฐ์ง DataFrame ์์ฑ
df = pd.DataFrame({'Count 1': 100 + np.random.randint(-5, 10, 9).cumsum(),
'Count 2': 120 + np.random.randint(-5, 10, 9)}, index=dates)
print(df)
# ๋ชจ๋ ์ธ๋ฑ์ค๋ datetime ์ด๊ณ weekday_name ์ ์ผ์์ผ์ด๋ค.
print(df.index)
print(df.index.weekday_name)
# ์ปฌ๋ผ์์ ์ด์ ๊ฐ๊ณผ์ ์ฐจ์ด๋ฅผ ๋ํ๋ธ๋ค.
print(df.diff())
print()
# dataframe ์ ๊ฐ ๋ฌ(month)๋ณ ํ๊ท ๊ณ์ฐ
print(df.resample('M').mean())
# datetime ์ผ๋ก ์ธ๋ฑ์ฑ์ ๋์ด ๋ค์๊ณผ ๊ฐ์ ์กฐํ๊ฐ ๊ฐ๋ฅํ๋ค.
# 2016 ์ฐ๋๋ง ์ฐพ๊ธฐ
print(df['2016'])
# 2016 11์๋ง ์ฐพ๊ธฐ
print(df['2016-11'])
# 2017 1์ ์ดํ๋ง ์ฐพ๊ธฐ
print(df['2017-01':])
# 2์ฃผ๊ฐ๊ฒฉ ์ธ๋ฑ์ค๋ฅผ -> 1์ฃผ๊ฐ๊ฒฉ ์ธ๋ฑ์ค๋ก ๋ฐ๊พธ๊ณ , NaN ์ ์ด์ ๊ฐ์ผ๋ก ์ฑ์ฐ๊ธฐ(ffill)
print(df.asfreq('W', method='ffill'))
# ๊ทธ๋ํ๋ก ๊ทธ๋ฆฌ๊ธฐ
# (์ฃผํผํฐ ๋
ธํธ๋ถ ์ฌ์ฉ์)
# %matplotlib inline
df.plot()
plt.show()
| [
"pandas.Timedelta",
"numpy.random.randint",
"pandas.DateOffset",
"pandas.date_range",
"pandas.DataFrame",
"pandas.Timestamp",
"pandas.Period",
"pandas.to_datetime",
"matplotlib.pyplot.show"
] | [((1610, 1635), 'pandas.to_datetime', 'pd.to_datetime', (['ts3.index'], {}), '(ts3.index)\n', (1624, 1635), True, 'import pandas as pd\n'), ((2026, 2117), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "['2019-01-02', '2109-01-02', '2019-01-03']", 'columns': "['sample date']"}), "(data=['2019-01-02', '2109-01-02', '2019-01-03'], columns=[\n 'sample date'])\n", (2038, 2117), True, 'import pandas as pd\n'), ((2241, 2294), 'pandas.date_range', 'pd.date_range', (['"""2016-10-01"""'], {'periods': '(9)', 'freq': '"""2W-SUN"""'}), "('2016-10-01', periods=9, freq='2W-SUN')\n", (2254, 2294), True, 'import pandas as pd\n'), ((3110, 3120), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3118, 3120), True, 'import matplotlib.pyplot as plt\n'), ((245, 278), 'pandas.Timestamp', 'pd.Timestamp', (['"""2/15/2019 07:20PM"""'], {}), "('2/15/2019 07:20PM')\n", (257, 278), True, 'import pandas as pd\n'), ((286, 320), 'pandas.Timestamp', 'pd.Timestamp', (['"""2019-02-15 07:20PM"""'], {}), "('2019-02-15 07:20PM')\n", (298, 320), True, 'import pandas as pd\n'), ((872, 892), 'pandas.Period', 'pd.Period', (['"""02/2019"""'], {}), "('02/2019')\n", (881, 892), True, 'import pandas as pd\n'), ((915, 938), 'pandas.Period', 'pd.Period', (['"""02/15/2019"""'], {}), "('02/15/2019')\n", (924, 938), True, 'import pandas as pd\n'), ((1462, 1496), 'numpy.random.randint', 'np.random.randint', (['(10)', '(100)', '(4, 2)'], {}), '(10, 100, (4, 2))\n', (1479, 1496), True, 'import numpy as np\n'), ((1704, 1728), 'pandas.to_datetime', 'pd.to_datetime', (['"""4.7.12"""'], {}), "('4.7.12')\n", (1718, 1728), True, 'import pandas as pd\n'), ((1758, 1797), 'pandas.to_datetime', 'pd.to_datetime', (['"""4.7.12"""'], {'dayfirst': '(True)'}), "('4.7.12', dayfirst=True)\n", (1772, 1797), True, 'import pandas as pd\n'), ((338, 371), 'pandas.Timestamp', 'pd.Timestamp', (['"""2/15/2019 07:20PM"""'], {}), "('2/15/2019 07:20PM')\n", (350, 371), True, 'import pandas as pd\n'), ((374, 397), 'pandas.DateOffset', 'pd.DateOffset', ([], {'months': '(1)'}), '(months=1)\n', (387, 397), True, 'import pandas as pd\n'), ((415, 448), 'pandas.Timestamp', 'pd.Timestamp', (['"""2/15/2019 07:20PM"""'], {}), "('2/15/2019 07:20PM')\n", (427, 448), True, 'import pandas as pd\n'), ((451, 473), 'pandas.DateOffset', 'pd.DateOffset', ([], {'month': '(1)'}), '(month=1)\n', (464, 473), True, 'import pandas as pd\n'), ((492, 525), 'pandas.Timestamp', 'pd.Timestamp', (['"""2/15/2019 07:20PM"""'], {}), "('2/15/2019 07:20PM')\n", (504, 525), True, 'import pandas as pd\n'), ((528, 550), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(10)'}), '(days=10)\n', (541, 550), True, 'import pandas as pd\n'), ((568, 601), 'pandas.Timestamp', 'pd.Timestamp', (['"""2/15/2019 07:20PM"""'], {}), "('2/15/2019 07:20PM')\n", (580, 601), True, 'import pandas as pd\n'), ((604, 625), 'pandas.DateOffset', 'pd.DateOffset', ([], {'day': '(10)'}), '(day=10)\n', (617, 625), True, 'import pandas as pd\n'), ((651, 685), 'pandas.Timestamp', 'pd.Timestamp', (['"""2019-02-15 07:20PM"""'], {}), "('2019-02-15 07:20PM')\n", (663, 685), True, 'import pandas as pd\n'), ((1004, 1030), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016-09-01"""'], {}), "('2016-09-01')\n", (1016, 1030), True, 'import pandas as pd\n'), ((1037, 1063), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016-09-02"""'], {}), "('2016-09-02')\n", (1049, 1063), True, 'import pandas as pd\n'), ((1065, 1091), 'pandas.Timestamp', 'pd.Timestamp', (['"""2016-09-03"""'], {}), "('2016-09-03')\n", (1077, 1091), True, 'import pandas as pd\n'), ((1243, 1263), 'pandas.Period', 'pd.Period', (['"""2016-09"""'], {}), "('2016-09')\n", (1252, 1263), True, 'import pandas as pd\n'), ((1294, 1314), 'pandas.Period', 'pd.Period', (['"""2016-10"""'], {}), "('2016-10')\n", (1303, 1314), True, 'import pandas as pd\n'), ((1316, 1336), 'pandas.Period', 'pd.Period', (['"""2016-11"""'], {}), "('2016-11')\n", (1325, 1336), True, 'import pandas as pd\n'), ((1825, 1849), 'pandas.Timestamp', 'pd.Timestamp', (['"""9/3/2016"""'], {}), "('9/3/2016')\n", (1837, 1849), True, 'import pandas as pd\n'), ((1852, 1876), 'pandas.Timestamp', 'pd.Timestamp', (['"""9/1/2016"""'], {}), "('9/1/2016')\n", (1864, 1876), True, 'import pandas as pd\n'), ((1911, 1942), 'pandas.Timestamp', 'pd.Timestamp', (['"""9/2/2016 8:10AM"""'], {}), "('9/2/2016 8:10AM')\n", (1923, 1942), True, 'import pandas as pd\n'), ((1945, 1967), 'pandas.Timedelta', 'pd.Timedelta', (['"""12D 3H"""'], {}), "('12D 3H')\n", (1957, 1967), True, 'import pandas as pd\n'), ((2557, 2585), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(10)', '(9)'], {}), '(-5, 10, 9)\n', (2574, 2585), True, 'import numpy as np\n'), ((2482, 2510), 'numpy.random.randint', 'np.random.randint', (['(-5)', '(10)', '(9)'], {}), '(-5, 10, 9)\n', (2499, 2510), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from PyQt5 import QtGui
from PyQt5.QtCore import Qt
from GUI.config import Config
class Utils:
@staticmethod
def enhanceMask(mask):
des = mask
contour, _ = cv2.findContours(des,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
cv2.drawContours(des,[cnt],-1,(255,255,255),thickness=cv2.FILLED)
return des
@staticmethod
def extractLargerSegment(maskROAD):
contours, hierarchy = cv2.findContours(maskROAD.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
maxA = 0
maskTemp=np.zeros_like(maskROAD)
if(len(contours) > 0):
for h,cnt in enumerate(contours):
if(cv2.contourArea(cnt) > maxA):
cntMax=cnt
maxA = cv2.contourArea(cnt)
mask = np.zeros(maskROAD.shape,np.uint8)
cv2.drawContours(maskTemp,[cntMax],0,255,-1)
maskROAD = cv2.bitwise_and(maskROAD,maskTemp)
return maskROAD
@staticmethod
def preProcess(img):
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# using gaussian blur rule of thumb for kernel size
means, stds = cv2.meanStdDev(hsv_img)
std_heuristic = Utils.int_round_up_to_odd(stds.mean() * 2)
# apply gausian blur to smooth rough edges and reduce noisy edges
kernel_size = std_heuristic # must be positive and odd
gb_img = cv2.GaussianBlur(hsv_img, (kernel_size,kernel_size), 0)
return gb_img
# @staticmethod
# def process(img):
@staticmethod
def post_process(img):
kernel = np.ones((5, 5), np.uint8)
img_out = cv2.erode(img, kernel,iterations=3)
kernel = np.ones((20, 20), np.uint8)
img_out = cv2.dilate(img_out, kernel,iterations=5)
img_out = Utils.extractLargerSegment(img_out)
return img_out
@staticmethod
def maskImage(ori, mask):
return cv2.bitwise_and(ori, ori, mask=mask)
@staticmethod
def convert_cv_qt(cv_img, multiplier=1):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(Config.display_width * multiplier, Config.display_height * multiplier, Qt.KeepAspectRatio)
result = QtGui.QPixmap.fromImage(p)
return result
@staticmethod
def int_round_up_to_odd(f):
return int(f // 2 * 2 + 1)
| [
"cv2.meanStdDev",
"cv2.drawContours",
"numpy.ones",
"cv2.erode",
"PyQt5.QtGui.QPixmap.fromImage",
"cv2.bitwise_and",
"PyQt5.QtGui.QImage",
"cv2.contourArea",
"numpy.zeros",
"cv2.cvtColor",
"cv2.findContours",
"cv2.dilate",
"cv2.GaussianBlur",
"numpy.zeros_like"
] | [((211, 273), 'cv2.findContours', 'cv2.findContours', (['des', 'cv2.RETR_CCOMP', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(des, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n', (227, 273), False, 'import cv2\n'), ((596, 619), 'numpy.zeros_like', 'np.zeros_like', (['maskROAD'], {}), '(maskROAD)\n', (609, 619), True, 'import numpy as np\n'), ((1080, 1116), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (1092, 1116), False, 'import cv2\n'), ((1201, 1224), 'cv2.meanStdDev', 'cv2.meanStdDev', (['hsv_img'], {}), '(hsv_img)\n', (1215, 1224), False, 'import cv2\n'), ((1448, 1504), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['hsv_img', '(kernel_size, kernel_size)', '(0)'], {}), '(hsv_img, (kernel_size, kernel_size), 0)\n', (1464, 1504), False, 'import cv2\n'), ((1636, 1661), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1643, 1661), True, 'import numpy as np\n'), ((1680, 1716), 'cv2.erode', 'cv2.erode', (['img', 'kernel'], {'iterations': '(3)'}), '(img, kernel, iterations=3)\n', (1689, 1716), False, 'import cv2\n'), ((1733, 1760), 'numpy.ones', 'np.ones', (['(20, 20)', 'np.uint8'], {}), '((20, 20), np.uint8)\n', (1740, 1760), True, 'import numpy as np\n'), ((1779, 1820), 'cv2.dilate', 'cv2.dilate', (['img_out', 'kernel'], {'iterations': '(5)'}), '(img_out, kernel, iterations=5)\n', (1789, 1820), False, 'import cv2\n'), ((1963, 1999), 'cv2.bitwise_and', 'cv2.bitwise_and', (['ori', 'ori'], {'mask': 'mask'}), '(ori, ori, mask=mask)\n', (1978, 1999), False, 'import cv2\n'), ((2138, 2177), 'cv2.cvtColor', 'cv2.cvtColor', (['cv_img', 'cv2.COLOR_BGR2RGB'], {}), '(cv_img, cv2.COLOR_BGR2RGB)\n', (2150, 2177), False, 'import cv2\n'), ((2276, 2354), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['rgb_image.data', 'w', 'h', 'bytes_per_line', 'QtGui.QImage.Format_RGB888'], {}), '(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n', (2288, 2354), False, 'from PyQt5 import QtGui\n'), ((2503, 2529), 'PyQt5.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['p'], {}), '(p)\n', (2526, 2529), False, 'from PyQt5 import QtGui\n'), ((313, 384), 'cv2.drawContours', 'cv2.drawContours', (['des', '[cnt]', '(-1)', '(255, 255, 255)'], {'thickness': 'cv2.FILLED'}), '(des, [cnt], -1, (255, 255, 255), thickness=cv2.FILLED)\n', (329, 384), False, 'import cv2\n'), ((845, 879), 'numpy.zeros', 'np.zeros', (['maskROAD.shape', 'np.uint8'], {}), '(maskROAD.shape, np.uint8)\n', (853, 879), True, 'import numpy as np\n'), ((891, 939), 'cv2.drawContours', 'cv2.drawContours', (['maskTemp', '[cntMax]', '(0)', '(255)', '(-1)'], {}), '(maskTemp, [cntMax], 0, 255, -1)\n', (907, 939), False, 'import cv2\n'), ((959, 994), 'cv2.bitwise_and', 'cv2.bitwise_and', (['maskROAD', 'maskTemp'], {}), '(maskROAD, maskTemp)\n', (974, 994), False, 'import cv2\n'), ((717, 737), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (732, 737), False, 'import cv2\n'), ((805, 825), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (820, 825), False, 'import cv2\n')] |
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.l = [np.arange(1000), np.arange(1000)]
self.l10x10 = np.ones((10, 10))
def time_array_1(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_l(self):
np.array(self.l)
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
np.arange(100)
def time_zeros_100(self):
np.zeros(100)
def time_ones_100(self):
np.ones(100)
def time_empty_100(self):
np.empty(100)
def time_eye_100(self):
np.eye(100)
def time_identity_100(self):
np.identity(100)
def time_eye_3000(self):
np.eye(3000)
def time_identity_3000(self):
np.identity(3000)
def time_diag_l100(self):
np.diag(self.l100)
def time_diagflat_l100(self):
np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
np.triu(self.l10x10)
def time_tril_l10x10(self):
np.tril(self.l10x10)
class MA(Benchmark):
def setup(self):
self.l100 = range(100)
self.t100 = ([True] * 100)
def time_masked_array(self):
np.ma.masked_array()
def time_masked_array_l100(self):
np.ma.masked_array(self.l100)
def time_masked_array_l100_t100(self):
np.ma.masked_array(self.l100, self.t100)
class CorrConv(Benchmark):
params = [[50, 1000, 1e5],
[10, 100, 1000, 1e4],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
def setup(self, size1, size2, mode):
self.x1 = np.linspace(0, 1, num=size1)
self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2))
def time_correlate(self, size1, size2, mode):
np.correlate(self.x1, self.x2, mode=mode)
def time_convolve(self, size1, size2, mode):
np.convolve(self.x1, self.x2, mode=mode)
| [
"numpy.identity",
"numpy.dstack",
"numpy.eye",
"numpy.convolve",
"numpy.ones",
"numpy.hstack",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.empty",
"numpy.vstack",
"numpy.tril",
"numpy.correlate",
"numpy.diagflat",
"numpy.triu",
"numpy.arange",
"numpy.ma.mas... | [((296, 313), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (303, 313), True, 'import numpy as np\n'), ((351, 362), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (359, 362), True, 'import numpy as np\n'), ((404, 416), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (412, 416), True, 'import numpy as np\n'), ((455, 468), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (463, 468), True, 'import numpy as np\n'), ((509, 528), 'numpy.array', 'np.array', (['self.l100'], {}), '(self.l100)\n', (517, 528), True, 'import numpy as np\n'), ((566, 582), 'numpy.array', 'np.array', (['self.l'], {}), '(self.l)\n', (574, 582), True, 'import numpy as np\n'), ((621, 638), 'numpy.vstack', 'np.vstack', (['self.l'], {}), '(self.l)\n', (630, 638), True, 'import numpy as np\n'), ((677, 694), 'numpy.hstack', 'np.hstack', (['self.l'], {}), '(self.l)\n', (686, 694), True, 'import numpy as np\n'), ((733, 750), 'numpy.dstack', 'np.dstack', (['self.l'], {}), '(self.l)\n', (742, 750), True, 'import numpy as np\n'), ((791, 805), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (800, 805), True, 'import numpy as np\n'), ((845, 858), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (853, 858), True, 'import numpy as np\n'), ((897, 909), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (904, 909), True, 'import numpy as np\n'), ((949, 962), 'numpy.empty', 'np.empty', (['(100)'], {}), '(100)\n', (957, 962), True, 'import numpy as np\n'), ((1000, 1011), 'numpy.eye', 'np.eye', (['(100)'], {}), '(100)\n', (1006, 1011), True, 'import numpy as np\n'), ((1054, 1070), 'numpy.identity', 'np.identity', (['(100)'], {}), '(100)\n', (1065, 1070), True, 'import numpy as np\n'), ((1109, 1121), 'numpy.eye', 'np.eye', (['(3000)'], {}), '(3000)\n', (1115, 1121), True, 'import numpy as np\n'), ((1165, 1182), 'numpy.identity', 'np.identity', (['(3000)'], {}), '(3000)\n', (1176, 1182), True, 'import numpy as np\n'), ((1222, 1240), 'numpy.diag', 'np.diag', (['self.l100'], {}), '(self.l100)\n', (1229, 1240), True, 'import numpy as np\n'), ((1284, 1306), 'numpy.diagflat', 'np.diagflat', (['self.l100'], {}), '(self.l100)\n', (1295, 1306), True, 'import numpy as np\n'), ((1353, 1386), 'numpy.diagflat', 'np.diagflat', (['[self.l50, self.l50]'], {}), '([self.l50, self.l50])\n', (1364, 1386), True, 'import numpy as np\n'), ((1428, 1448), 'numpy.triu', 'np.triu', (['self.l10x10'], {}), '(self.l10x10)\n', (1435, 1448), True, 'import numpy as np\n'), ((1490, 1510), 'numpy.tril', 'np.tril', (['self.l10x10'], {}), '(self.l10x10)\n', (1497, 1510), True, 'import numpy as np\n'), ((1663, 1683), 'numpy.ma.masked_array', 'np.ma.masked_array', ([], {}), '()\n', (1681, 1683), True, 'import numpy as np\n'), ((1731, 1760), 'numpy.ma.masked_array', 'np.ma.masked_array', (['self.l100'], {}), '(self.l100)\n', (1749, 1760), True, 'import numpy as np\n'), ((1813, 1853), 'numpy.ma.masked_array', 'np.ma.masked_array', (['self.l100', 'self.t100'], {}), '(self.l100, self.t100)\n', (1831, 1853), True, 'import numpy as np\n'), ((2096, 2124), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'size1'}), '(0, 1, num=size1)\n', (2107, 2124), True, 'import numpy as np\n'), ((2245, 2286), 'numpy.correlate', 'np.correlate', (['self.x1', 'self.x2'], {'mode': 'mode'}), '(self.x1, self.x2, mode=mode)\n', (2257, 2286), True, 'import numpy as np\n'), ((2345, 2385), 'numpy.convolve', 'np.convolve', (['self.x1', 'self.x2'], {'mode': 'mode'}), '(self.x1, self.x2, mode=mode)\n', (2356, 2385), True, 'import numpy as np\n'), ((240, 255), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (249, 255), True, 'import numpy as np\n'), ((257, 272), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (266, 272), True, 'import numpy as np\n'), ((2150, 2186), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {'num': 'size2'}), '(0, 2 * np.pi, num=size2)\n', (2161, 2186), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.